├── README.md ├── torchneuromorphic ├── __init__.py ├── shd │ ├── __init__.py │ └── shd_dataloaders.py ├── dvssign │ ├── __init__.py │ ├── create_dvssign.py │ ├── test_dvssign.py │ ├── create_hdf5_sign.py │ └── dvssign_dataloaders.py ├── nmnist │ ├── __init__.py │ ├── create_nmnist_small_noxtgt.py │ ├── create_nmnist_small.py │ ├── create_hdf5.py │ └── nmnist_dataloaders.py ├── rosbags │ ├── __init__.py │ ├── create_hdf5.py │ └── rosbags_dataloaders.py ├── doublenmnist │ ├── __init__.py │ └── doublenmnist_dataloaders.py ├── dvs_gestures │ ├── __init__.py │ ├── create_hdf5.py │ └── dvsgestures_dataloaders.py ├── nomniglot │ ├── __init__.py │ ├── create_nomniglot.py │ ├── test_nomniglot.py │ ├── create_hdf5_omniglot.py │ └── nomniglot_dataloaders.py ├── ntidigits │ ├── __init__.py │ └── ntidigits_dataloaders.py ├── double_dvssign │ ├── __init__.py │ ├── test_doubledvssign.py │ └── doubledvssign_dataloaders.py ├── doublenmnist_torchmeta │ ├── __init__.py │ └── doublenmnist_dataloaders.py ├── .DS_Store ├── events_timeslices.py ├── neuromorphic_dataset.py ├── utils.py └── transforms.py ├── .DS_Store ├── tests ├── create_nmnist.py ├── create_dvsgestures.py ├── test_nmnist.py ├── test_shd.py ├── test_tidigits.py ├── test_dvsgestures.py ├── test_aedat_legacy.py ├── test_aedat_legacy_timesurface.py ├── create_metadoublenmnist.py └── create_dvsgestures_attn.py ├── setup.py └── .gitignore /README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/shd/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/dvssign/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/nmnist/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/rosbags/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/doublenmnist/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/dvs_gestures/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/nomniglot/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/ntidigits/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/double_dvssign/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /torchneuromorphic/doublenmnist_torchmeta/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nmi-lab/torchneuromorphic/HEAD/.DS_Store -------------------------------------------------------------------------------- /torchneuromorphic/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nmi-lab/torchneuromorphic/HEAD/torchneuromorphic/.DS_Store -------------------------------------------------------------------------------- /tests/create_nmnist.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:20:05 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.nmnist.nmnist_dataloaders import * 13 | 14 | if __name__ == "__main__": 15 | out = create_events_hdf5('data/nmnist/', 'data/nmnist/n_mnist.hdf5') 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /tests/create_dvsgestures.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:20:05 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.dvs_gestures.create_hdf5 import * 13 | 14 | if __name__ == "__main__": 15 | out = create_events_hdf5('data/dvsgesture/raw/', 'data/dvsgesture/dvs_gestures_build19.hdf5') 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /tests/test_nmnist.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_nmnist.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu Nov 7 20:30:14 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.nmnist.nmnist_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | 15 | if __name__ == "__main__": 16 | train_dl, test_dl = create_dataloader( 17 | root='data/nmnist/n_mnist.hdf5', 18 | batch_size=32, 19 | ds=1, 20 | num_workers=0) 21 | ho = iter(train_dl) 22 | frames, labels = next(ho) 23 | -------------------------------------------------------------------------------- /tests/test_shd.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_tidigits.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Fri 13 Sep 2019 06:44:14 AM PDT 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.shd.shd_dataloaders import * 13 | 14 | if __name__ == '__main__': 15 | #create_events_hdf5(directory = 'data/tidigits/', hdf5_filename = 'ntidigits_isolated.hdf5') 16 | train_dl, test_dl = create_dataloader(root = 'data/shd/shd.hdf5', chunk_size_train=1000, chunk_size_test=1000, batch_size=50, dt = 1000, ds = 1, num_workers=0) 17 | data_batch, target_batch = next(iter(test_dl)) 18 | 19 | -------------------------------------------------------------------------------- /tests/test_tidigits.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_tidigits.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Fri 13 Sep 2019 06:44:14 AM PDT 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.ntidigits.ntidigits_dataloaders import * 13 | 14 | if __name__ == '__main__': 15 | #create_events_hdf5(directory = 'data/tidigits/', hdf5_filename = 'ntidigits_isolated.hdf5') 16 | train_dl, test_dl = create_dataloader(root = 'data/tidigits/ntidigits_isolated.hdf5', chunk_size_train=1000, chunk_size_test=1000, batch_size=50, dt = 1000, ds = [1], num_workers=0) 17 | #data_batch, target_batch = next(iter(train_dl)) 18 | 19 | -------------------------------------------------------------------------------- /tests/test_dvsgestures.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Fri 19 Sep 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv3 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.dvs_gestures.dvsgestures_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import torchneuromorphic.transforms as transforms 15 | 16 | if __name__ == "__main__": 17 | train_dl, test_dl = create_dataloader( 18 | root='data/dvsgesture/dvs_gestures_build19.hdf5', 19 | batch_size=64, 20 | ds=4, 21 | num_workers=0) 22 | ho = iter(train_dl) 23 | frames, labels = next(ho) 24 | -------------------------------------------------------------------------------- /tests/test_aedat_legacy.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_aedat2.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue 16 Mar 2021 07:44:09 AM PDT 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv3 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.utils import plot_frames_imshow, legacy_aedat_to_events 13 | import torchneuromorphic.transforms as transforms 14 | import sys 15 | 16 | events = legacy_aedat_to_events(sys.argv[1]) 17 | dt = 1000 18 | size = [2, 240, 240] 19 | process_to_countframe = transforms.Compose([ 20 | transforms.Downsample(factor=[dt,1,1,1]), 21 | transforms.ToCountFrame(T = 500, size = size), 22 | #transforms.ToTensor() 23 | ]) 24 | frames = process_to_countframe(events) 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Torch Neuromorphic 3 | """ 4 | 5 | # Always prefer setuptools over distutils 6 | from setuptools import setup, find_packages 7 | # To use a consistent encoding 8 | from codecs import open 9 | from os import path 10 | 11 | here = path.abspath(path.dirname(__file__)) 12 | 13 | # Get the long description from the README file 14 | with open(path.join(here, 'README.md'), encoding='utf-8') as f: 15 | long_description = f.read() 16 | 17 | setup( 18 | name = "torchneuromorphic", 19 | version = "0.3.7", 20 | author = "Emre Neftci", 21 | author_email = "eneftci@uci.edu", 22 | description = ("Dataset loaders for pytorch"), 23 | packages=find_packages(exclude=['contrib', 'docs', 'tests']), 24 | download_url='https://github.com/nmi-lab/torchneuromorphic/archive/0.3.1.tar.gz', 25 | long_description=long_description, 26 | license='Apache License 2.0', 27 | install_requires=[ 28 | "torchvision>=0.4.1", 29 | "torch>=1.1.0", 30 | "scipy>=1.0", 31 | "h5py", 32 | "pandas", 33 | "dv", 34 | ] 35 | ) 36 | -------------------------------------------------------------------------------- /tests/test_aedat_legacy_timesurface.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_aedat_legacy_timesurface.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue 16 Mar 2021 01:28:22 PM PDT 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv3 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.utils import plot_frames_imshow, legacy_aedat_to_events 13 | import torchneuromorphic.transforms as transforms 14 | import pylab as plt 15 | import numpy as np 16 | import sys 17 | 18 | device = 'cuda' 19 | events = legacy_aedat_to_events(sys.argv[1]) 20 | dt = 1000 21 | size = [2, 346, 260] 22 | process_events = transforms.Compose([ 23 | transforms.Downsample(factor=[dt,1,1,1]), 24 | transforms.ToCountFrame(T = 1000, size = size), 25 | transforms.ToTensor(), 26 | transforms.ExpFilterEvents(tau=100, length=500, device=device) 27 | ]) 28 | frames = process_events(events) 29 | 30 | 31 | plot_frames_imshow(np.array([frames.detach().cpu().numpy()]), nim=1) 32 | plt.show() 33 | -------------------------------------------------------------------------------- /torchneuromorphic/dvssign/create_dvssign.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:20:05 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.dvssign.dvssign_dataloaders import * 13 | 14 | if __name__ == "__main__": 15 | 16 | #out = create_events_hdf5('/home/kennetms/Documents/torchneuromorphic/torchneuromorphic/dvssign/data/ASL-DVS', '/home/kennetms/Documents/torchneuromorphic/torchneuromorphic/dvssign/data/ASL-DVS/dvssign.hdf5') 17 | 18 | DVSSignDataset = DVSSignDataset(root='data/dvssign/data/ASL-DVS/dvssign.hdf5') 19 | 20 | 21 | f = h5py.File('data/ASL-DVS/dvssign.hdf5', 'r') 22 | 23 | print(list(f.keys())) 24 | 25 | data = f['data'] 26 | 27 | print(data['0'].keys()) 28 | 29 | print("addrs",data['0']['addrs'][0]) 30 | 31 | print("label",data['0']['labels']) 32 | 33 | print("time",data['0']['times'][0]) 34 | 35 | 36 | -------------------------------------------------------------------------------- /torchneuromorphic/double_dvssign/test_doubledvssign.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_nmnist.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu Nov 7 20:30:14 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.doubledvssign.dvssign_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import matplotlib.pyplot as plt 15 | 16 | if __name__ == "__main__": 17 | train_dl, test_dl = create_dataloader( 18 | root='data/ASL-DVS/dvssign.hdf5', 19 | batch_size=8, 20 | ds=1, 21 | chunk_size_train = 100, 22 | chunk_size_test = 100, 23 | num_workers=0) 24 | 25 | iter_meta_train = iter(train_dl) 26 | iter_meta_test = iter(test_dl) 27 | frames_train, labels_train = next(iter_meta_train) 28 | frames_test , labels_test = next(iter_meta_test) 29 | 30 | 31 | print(frames_train.shape) 32 | print(labels_train.shape) 33 | plot_frames_imshow(frames_train, labels_train, do1h=False, nim=4, avg=25) 34 | plt.savefig('dvssigns4.png') 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | scripts/logs 3 | scripts/runs_arg* 4 | __pycache__/ 5 | *.py[cod] 6 | .*.sw* 7 | *.bak 8 | *runs_args* 9 | *logs 10 | *.npy 11 | 12 | # C extensions 13 | *.so 14 | script[0-9]*.sh 15 | 16 | # Distribution / packaging 17 | obj/ 18 | .Python 19 | c_nsat/data/ 20 | c_nsat/obj 21 | *.hex 22 | *.svg 23 | bin/ 24 | evs_*_* 25 | env/ 26 | build/ 27 | develop-eggs/ 28 | dist/ 29 | downloads/ 30 | eggs/ 31 | .eggs/ 32 | parts/ 33 | sdist/ 34 | var/ 35 | *.egg-info/ 36 | .installed.cfg 37 | *.egg 38 | Results* 39 | decolle/runs* 40 | samples/data* 41 | decolle/data 42 | __gen__* 43 | 44 | # PyInstaller 45 | # Usually these files are written by a python script from a template 46 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 47 | *.manifest 48 | *.spec 49 | 50 | # Installer logs 51 | pip-log.txt 52 | pip-delete-this-directory.txt 53 | 54 | # Unit test / coverage reports 55 | htmlcov/ 56 | .tox/ 57 | .coverage 58 | .coverage.* 59 | .cache 60 | nosetests.xml 61 | coverage.xml 62 | *,cover 63 | *.ipynb_check* 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | dcll.egg-info/ 78 | *.pyc 79 | 80 | .project 81 | .pydevproject 82 | runs2/ 83 | -------------------------------------------------------------------------------- /torchneuromorphic/nomniglot/create_nomniglot.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:20:05 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.nomniglot.nomniglot_dataloaders import * 13 | 14 | if __name__ == "__main__": 15 | 16 | #out = create_events_hdf5('/home/kennetms/Documents/torchneuromorphic/torchneuromorphic/dvssign/data/ASL-DVS', '/home/kennetms/Documents/torchneuromorphic/torchneuromorphic/dvssign/data/ASL-DVS/dvssign.hdf5') 17 | 18 | NOmniglotDataset = NOmniglotDataset(root='/home/kennetms/Documents/torchneuromorphic/torchneuromorphic/nomniglot/data/nomniglot/nomniglot.hdf5') 19 | 20 | 21 | f = h5py.File('/home/kennetms/Documents/torchneuromorphic/torchneuromorphic/nomniglot/data/nomniglot/nomniglot.hdf5', 'r') 22 | 23 | print(list(f.keys())) 24 | 25 | data = f['data'] 26 | 27 | print(data['0'].keys()) 28 | 29 | print("addrs",data['0']['addrs'][0]) 30 | 31 | print("label",data['0']['labels']) 32 | 33 | print("time",data['0']['times'][0]) 34 | 35 | 36 | -------------------------------------------------------------------------------- /torchneuromorphic/dvssign/test_dvssign.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_nmnist.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu Nov 7 20:30:14 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.dvssign.dvssign_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import matplotlib.pyplot as plt 15 | 16 | if __name__ == "__main__": 17 | train_dl, test_dl = create_dataloader( 18 | root='data/ASL-DVS/dvssign.hdf5', 19 | batch_size=8, 20 | ds=1, 21 | chunk_size_train = 100, 22 | chunk_size_test = 100, 23 | num_workers=0) 24 | 25 | iter_meta_train = iter(train_dl) 26 | iter_meta_test = iter(test_dl) 27 | frames_train, labels_train = next(iter_meta_train) 28 | frames_test , labels_test = next(iter_meta_test) 29 | 30 | with h5py.File('data/ASL-DVS/dvssign.hdf5', 'r', swmr=True, libver="latest") as f: 31 | if 1: 32 | key = f['extra']['train_keys'][2] 33 | print(key) 34 | else: 35 | key = f['extra']['test_keys'][0] 36 | 37 | print(frames_train.shape) 38 | print(labels_train.shape) 39 | plot_frames_imshow(frames_train, labels_train, do1h=False, nim=4, avg=25) 40 | plt.savefig('dvssigns4.png') 41 | -------------------------------------------------------------------------------- /torchneuromorphic/nomniglot/test_nomniglot.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_nmnist.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu Nov 7 20:30:14 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.nomniglot.nomniglot_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import matplotlib.pyplot as plt 15 | 16 | if __name__ == "__main__": 17 | root = 'nomniglot_all.hdf5' #'/home/kennetms/Documents/snn_maml/data/nomniglot/nomniglot.hdf5' 18 | train_dl, valid_dl, test_dl = create_dataloader( 19 | root=root, 20 | batch_size=8, 21 | ds=8, 22 | dt=30000, 23 | chunk_size_train = 100, 24 | chunk_size_test = 100, 25 | num_workers=0) 26 | 27 | iter_meta_train = iter(train_dl) 28 | iter_meta_valid = iter(valid_dl) 29 | iter_meta_test = iter(test_dl) 30 | 31 | # make sure can make it through all data 32 | for x, t in train_dl: 33 | print(t.shape) 34 | 35 | for x, t in valid_dl: 36 | print(t.shape) 37 | 38 | for x, t in test_dl: 39 | print(t.shape) 40 | 41 | frames_train, labels_train = next(iter_meta_train) 42 | frames_valid, labels_valid = next(iter_meta_valid) 43 | frames_test , labels_test = next(iter_meta_test) 44 | 45 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 46 | if 1: 47 | keys = f['extra']['train_keys'] 48 | print(keys) 49 | else: 50 | key = f['extra']['test_keys'][0] 51 | 52 | print(frames_train.shape) 53 | print(labels_train.shape) 54 | plot_frames_imshow(frames_test, labels_test, do1h=False, nim=1, avg=100) 55 | plt.savefig('nomniglot.png') 56 | -------------------------------------------------------------------------------- /tests/create_metadoublenmnist.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_nmnist.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu Nov 7 20:30:14 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.doublenmnist_torchmeta.doublenmnist_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | from matplotlib import pyplot as plt 15 | from torchmeta.utils.data import CombinationMetaDataset 16 | from torchmeta.utils.data import BatchMetaDataLoader, MetaDataLoader 17 | from torchmeta.datasets.helpers import doublemnist 18 | from torchmeta.transforms import Categorical, ClassSplitter 19 | 20 | 21 | 22 | 23 | if __name__ == "__main__": 24 | root = 'data/nmnist/n_mnist.hdf5' 25 | batch_size = 72 26 | chunk_size = 300 27 | ds = 1 28 | dt = 1000 29 | transform = None 30 | target_transform = None 31 | nclasses = 5 32 | ntasks = 3 33 | samples_per_class = 2 34 | samples_per_test = 2 35 | classes_meta = np.arange(100, dtype='int') 36 | 37 | size = [2, 32//ds, 32//ds] 38 | 39 | 40 | if transform is None: 41 | transform = Compose([ 42 | CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]), 43 | Downsample(factor=[dt,1,ds,ds]), 44 | #ToEventSum(T = chunk_size, size = size), 45 | ToCountFrame(T = chunk_size, size = size), 46 | ToTensor()]) 47 | 48 | if target_transform is None: 49 | target_transform = Compose([Repeat(chunk_size), toOneHot(nclasses)]) 50 | 51 | cc = DoubleNMNIST(root = root, meta_test=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size, num_classes_per_task=5) 52 | #cd = ClassNMNISTDataset(root,meta_train=True, transform = transform, target_transform = target_transform, chunk_size=chunk_size) 53 | 54 | dmnist_it = BatchMetaDataLoader(ClassSplitter(cc, shuffle=True, num_train_per_class=3, num_test_per_class=5), batch_size=16, num_workers=0) 55 | sample = next(iter(dmnist_it)) 56 | data,targets = sample['train'] 57 | 58 | ##Load torchmeta MNIST for comparison 59 | #from torchmeta.datasets.doublemnist import DoubleMNISTClassDataset 60 | #dataset = DoubleMNISTClassDataset("data/",meta_train=True) 61 | #dataset_h = BatchMetaDataLoader(doublemnist("data/",meta_train=True, ways=5, shots=10), batch_size=16, num_workers=0) 62 | 63 | -------------------------------------------------------------------------------- /tests/create_dvsgestures_attn.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Fri 19 Sep 2019 7 | # Last Modified : Fri 28 Apr 2023 02:12:31 PM CEST 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv3 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.dvs_gestures.dvsgestures_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import torchneuromorphic.transforms as transforms 15 | 16 | def create_dataloader( 17 | root = 'data/dvsgesture/dvs_gestures_build19.hdf5', 18 | batch_size = 72 , 19 | chunk_size_train = 500, 20 | chunk_size_test = 1800, 21 | dt = 1000, 22 | transform_train = None, 23 | transform_test = None, 24 | target_transform_train = None, 25 | target_transform_test = None, 26 | n_events_attention = 5000, 27 | **dl_kwargs): 28 | 29 | 30 | default_transform = lambda chunk_size: transforms.Compose([ 31 | transforms.Downsample(factor=[dt,1,1,1]), 32 | transforms.Attention(n_events_attention, size=[2,64,64]), 33 | transforms.Downsample(factor=[1,1,2,2]), 34 | transforms.ToCountFrame(T = chunk_size, size = [2,32,32]), 35 | transforms.ToTensor() 36 | ]) 37 | 38 | if transform_train is None: 39 | transform_train = default_transform(chunk_size_train) 40 | if transform_test is None: 41 | transform_test = default_transform(chunk_size_test) 42 | 43 | if target_transform_train is None: 44 | target_transform_train = transforms.Compose([ 45 | transforms.Repeat(chunk_size_train), 46 | transforms.toOneHot(11)]) 47 | if target_transform_test is None: 48 | target_transform_test = transforms.Compose([ 49 | transforms.Repeat(chunk_size_test), 50 | transforms.toOneHot(11)]) 51 | 52 | train_d = DVSGestureDataset(root, 53 | train=True, 54 | transform = transform_train, 55 | target_transform = target_transform_train, 56 | chunk_size = chunk_size_train) 57 | 58 | train_dl = torch.utils.data.DataLoader(train_d, batch_size=batch_size, shuffle=True, **dl_kwargs) 59 | 60 | test_d = DVSGestureDataset(root, 61 | transform = transform_test, 62 | target_transform = target_transform_test, 63 | train=False, 64 | chunk_size = chunk_size_test) 65 | 66 | test_dl = torch.utils.data.DataLoader(test_d, batch_size=batch_size, **dl_kwargs) 67 | 68 | return train_dl, test_dl 69 | 70 | if __name__ == "__main__": 71 | train_dl, test_dl = create_dataloader( 72 | transform_train = lambda x:x, 73 | batch_size=32, 74 | num_workers=0) 75 | ho = iter(test_dl) 76 | frames, labels = next(ho) 77 | #plot_frames_imshow(frames, labels) 78 | 79 | -------------------------------------------------------------------------------- /torchneuromorphic/dvs_gestures/create_hdf5.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_hdf5.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:15:54 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import numpy as np 13 | from tqdm import tqdm 14 | import scipy.misc 15 | import h5py 16 | import glob 17 | import torch.utils.data 18 | from ..events_timeslices import * 19 | from ..utils import * 20 | import os 21 | 22 | def create_events_hdf5(directory, hdf5_filename): 23 | fns_train = gather_aedat(directory,1,24) 24 | fns_test = gather_aedat (directory,24,30) 25 | test_keys = [] 26 | train_keys = [] 27 | 28 | assert len(fns_train)==98 29 | 30 | with h5py.File(hdf5_filename, 'w') as f: 31 | f.clear() 32 | 33 | key = 0 34 | metas = [] 35 | data_grp = f.create_group('data') 36 | extra_grp = f.create_group('extra') 37 | for file_d in tqdm(fns_train+fns_test): 38 | istrain = file_d in fns_train 39 | data, labels_starttime = aedat_to_events(file_d) 40 | tms = data[:,0] 41 | ads = data[:,1:] 42 | lbls = labels_starttime[:,0] 43 | start_tms = labels_starttime[:,1] 44 | end_tms = labels_starttime[:,2] 45 | out = [] 46 | 47 | for i, v in enumerate(lbls): 48 | if istrain: 49 | train_keys.append(key) 50 | else: 51 | test_keys.append(key) 52 | s_ = get_slice(tms, ads, start_tms[i], end_tms[i]) 53 | times = s_[0] 54 | addrs = s_[1] 55 | subj, light = file_d.split('/')[-1].split('.')[0].split('_')[:2] 56 | metas.append({'key':str(key), 'subject':subj,'light condition':light, 'training sample':istrain}) 57 | subgrp = data_grp.create_group(str(key)) 58 | tm_dset = subgrp.create_dataset('times' , data=times, dtype=np.uint32) 59 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype=np.uint8) 60 | lbl_dset= subgrp.create_dataset('labels', data=lbls[i]-1, dtype=np.uint8) 61 | subgrp.attrs['meta_info']= str(metas[-1]) 62 | assert lbls[i]-1 in range(11) 63 | key += 1 64 | extra_grp.create_dataset('train_keys', data=train_keys) 65 | extra_grp.create_dataset('test_keys', data=test_keys) 66 | extra_grp.attrs['N'] = len(train_keys) + len(test_keys) 67 | extra_grp.attrs['Ntrain'] = len(train_keys) 68 | extra_grp.attrs['Ntest'] = len(test_keys) 69 | 70 | def gather_aedat(directory, start_id, end_id, filename_prefix = 'user'): 71 | if not os.path.isdir(directory): 72 | raise FileNotFoundError("DVS Gestures Dataset not found, looked at: {}".format(directory)) 73 | import glob 74 | fns = [] 75 | for i in range(start_id,end_id): 76 | search_mask = directory+'/'+filename_prefix+"{0:02d}".format(i)+'*.aedat' 77 | glob_out = glob.glob(search_mask) 78 | if len(glob_out)>0: 79 | fns+=glob_out 80 | return fns 81 | 82 | 83 | -------------------------------------------------------------------------------- /torchneuromorphic/nmnist/create_nmnist_small_noxtgt.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Fri 19 Sep 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv3 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.nmnist.nmnist_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import torchneuromorphic.transforms as transforms 15 | from torch.utils.data import Subset, SubsetRandomSampler, RandomSampler 16 | 17 | def create_dataloader( 18 | root = 'data/nmnist/n_mnist.hdf5', 19 | batch_size = 72 , 20 | chunk_size_train = 300, 21 | chunk_size_test = 300, 22 | dt = 1000, 23 | transform_train = None, 24 | transform_test = None, 25 | target_transform_train = None, 26 | target_transform_test = None, 27 | **dl_kwargs): 28 | 29 | ds = [1,1] 30 | low_crop = [0,0] 31 | high_crop = [32,32] 32 | size = [2, np.ceil((high_crop[0]-low_crop[0])/ds[0]).astype('int'), np.ceil((high_crop[1]-low_crop[1])/ds[1]).astype('int')] 33 | 34 | print(size) 35 | default_transform = lambda chunk_size: transforms.Compose([ 36 | transforms.CropDims(low_crop,high_crop,[2,3]), 37 | transforms.Downsample(factor=[dt,1,ds[0],ds[1]]), 38 | transforms.ToCountFrame(T = chunk_size, size = size), 39 | transforms.ToTensor() 40 | ]) 41 | 42 | 43 | if transform_train is None: 44 | transform_train = default_transform(chunk_size_train) 45 | if transform_test is None: 46 | transform_test = default_transform(chunk_size_test) 47 | 48 | if target_transform_train is None: 49 | target_transform_train = transforms.Compose([transforms.toDtype(torch.long)]) 50 | if target_transform_test is None: 51 | target_transform_test = transforms.Compose([transforms.toDtype(torch.long)]) 52 | 53 | train_d = NMNISTDataset(root, 54 | train=True, 55 | transform = transform_train, 56 | target_transform = target_transform_train, 57 | chunk_size = chunk_size_train) 58 | 59 | 60 | #train_subset_indices = train_d.keys_by_label[:,:1000].reshape(-1) 61 | #train_d.n=1000 62 | 63 | train_dl = torch.utils.data.DataLoader(train_d, batch_size=batch_size, sampler=RandomSampler(range(len(train_d)), num_samples=1000, replacement=True), **dl_kwargs) 64 | 65 | test_d = NMNISTDataset(root, 66 | transform = transform_test, 67 | target_transform = target_transform_test, 68 | train=False, 69 | chunk_size = chunk_size_test, 70 | ) 71 | 72 | 73 | test_subset_indices = test_d.keys_by_label[:,:100].reshape(-1) 74 | test_dl = torch.utils.data.DataLoader(test_d, batch_size=batch_size, sampler=SubsetRandomSampler(test_subset_indices), **dl_kwargs) 75 | return train_dl, test_dl 76 | 77 | if __name__ == "__main__": 78 | train_dl, test_dl = create_dataloader() 79 | 80 | import tqdm 81 | for d,t in tqdm.tqdm(iter(test_dl)): 82 | print(d.shape) 83 | -------------------------------------------------------------------------------- /torchneuromorphic/rosbags/create_hdf5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Jacques Kaiser 4 | # 5 | # Creation Date : Wed 13 May 2020 6 | # 7 | # Copyright : (c) 8 | # Licence : Apache License, Version 2.0 9 | #----------------------------------------------------------------------------- 10 | 11 | import numpy as np 12 | import h5py 13 | from ..events_timeslices import * 14 | from ..utils import * 15 | import torch.utils.data 16 | import glob 17 | import os 18 | from tqdm import tqdm 19 | 20 | def gather_rosbags_class_folders(directory): 21 | # expected folder structure: 22 | # directory 23 | # ├── test_list.csv 24 | # ├── label1 25 | # │__ ├── bag1.bag 26 | # │__ ├── bag2.bag 27 | # ├── label2 28 | # │__ ├── bag1.bag 29 | # │__ ├── bag2.bag 30 | all_bags = glob.glob(os.path.join(directory, '**/*.bag')) 31 | test_list = np.loadtxt(os.path.join(directory, 'test_list.csv'), delimiter=',',dtype=str) 32 | 33 | test_bags = [ bag for bag in all_bags if np.any([ bag.endswith(test) for test in test_list]) ] 34 | train_bags = [ bag for bag in all_bags if bag not in test_bags ] 35 | all_classes = np.unique([os.path.basename(os.path.dirname(b)) for b in all_bags]) 36 | return train_bags, test_bags, all_classes 37 | 38 | 39 | def create_events_hdf5(directory, hdf5_filename, gather_rosbags=gather_rosbags_class_folders): 40 | train_bags, test_bags, all_classes = gather_rosbags(directory) 41 | label_mapping = { label: i 42 | for i,label in enumerate(all_classes) 43 | } 44 | label_mapping_inv = { i: label for label,i in label_mapping.items() } 45 | label_order = [label_mapping_inv[i] for i in range(len(all_classes)) ] 46 | 47 | with h5py.File(hdf5_filename, 'w') as f: 48 | f.clear() 49 | key = 0 50 | metas = [] 51 | data_grp = f.create_group('data') 52 | extra_grp = f.create_group('extra') 53 | train_keys = [] 54 | test_keys = [] 55 | for file_d in train_bags + test_bags: 56 | filename=os.path.basename(file_d) 57 | class_name=os.path.basename(os.path.dirname(file_d)) 58 | label=label_mapping[class_name] 59 | events=rosbag_to_events(file_d) 60 | addrs = np.array([events['pol'], events['x'], events['y']]).T 61 | istrain = file_d in train_bags 62 | if istrain: 63 | train_keys.append(key) 64 | else: 65 | test_keys.append(key) 66 | 67 | metas.append({'key':str(key), 'training sample':istrain}) 68 | 69 | subgrp = data_grp.create_group(str(key)) 70 | tm_dset = subgrp.create_dataset('times' , data=events['ts'], dtype=np.uint32) 71 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype=np.uint8) 72 | lbl_dset= subgrp.create_dataset('labels', data=label, dtype=np.uint8) 73 | subgrp.attrs['meta_info']= str(metas[-1]) 74 | key+=1 75 | extra_grp.create_dataset('train_keys', data=train_keys) 76 | extra_grp.create_dataset('test_keys', data=test_keys) 77 | extra_grp.create_dataset('label_order', data=np.array(label_order, dtype='S10')) 78 | extra_grp.attrs['N'] = len(train_keys) + len(test_keys) 79 | extra_grp.attrs['Ntrain'] = len(train_keys) 80 | extra_grp.attrs['Ntest'] = len(test_keys) 81 | -------------------------------------------------------------------------------- /torchneuromorphic/nmnist/create_nmnist_small.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : test_dvsgestures.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Fri 19 Sep 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv3 11 | #----------------------------------------------------------------------------- 12 | from torchneuromorphic.nmnist.nmnist_dataloaders import * 13 | from torchneuromorphic.utils import plot_frames_imshow 14 | import torchneuromorphic.transforms as transforms 15 | from torch.utils.data import Subset, SubsetRandomSampler 16 | 17 | def create_dataloader( 18 | root = 'data/nmnist/n_mnist.hdf5', 19 | batch_size = 72 , 20 | chunk_size_train = 300, 21 | chunk_size_test = 300, 22 | dt = 1000, 23 | transform_train = None, 24 | transform_test = None, 25 | target_transform_train = None, 26 | target_transform_test = None, 27 | **dl_kwargs): 28 | 29 | ds = [1,1] 30 | low_crop = [0,0] 31 | high_crop = [32,32] 32 | size = [2, np.ceil((high_crop[0]-low_crop[0])/ds[0]).astype('int'), np.ceil((high_crop[1]-low_crop[1])/ds[1]).astype('int')] 33 | 34 | default_transform = lambda chunk_size: transforms.Compose([ 35 | transforms.CropDims(low_crop,high_crop,[2,3]), 36 | transforms.Downsample(factor=[dt,1,ds[0],ds[1]]), 37 | transforms.ToCountFrame(T = chunk_size, size = size), 38 | transforms.ToTensor() 39 | ]) 40 | 41 | 42 | if transform_train is None: 43 | transform_train = default_transform(chunk_size_train) 44 | if transform_test is None: 45 | transform_test = default_transform(chunk_size_test) 46 | 47 | if target_transform_train is None: 48 | target_transform_train = transforms.Compose([ 49 | transforms.Repeat(chunk_size_train), 50 | transforms.toOneHot(10)]) 51 | if target_transform_test is None: 52 | target_transform_test = transforms.Compose([ 53 | transforms.Repeat(chunk_size_test), 54 | transforms.toOneHot(10)]) 55 | 56 | train_d = NMNISTDataset(root, 57 | train=True, 58 | transform = transform_train, 59 | target_transform = target_transform_train, 60 | chunk_size = chunk_size_train) 61 | 62 | 63 | train_subset_indices = train_d.keys_by_label[:,:100].reshape(-1) 64 | 65 | train_dl = torch.utils.data.DataLoader(train_d, batch_size=batch_size, sampler=SubsetRandomSampler(train_subset_indices), **dl_kwargs) 66 | 67 | test_d = NMNISTDataset(root, 68 | transform = transform_test, 69 | target_transform = target_transform_test, 70 | train=False, 71 | chunk_size = chunk_size_test, 72 | ) 73 | 74 | 75 | test_subset_indices = test_d.keys_by_label[:,:100].reshape(-1) 76 | test_dl = torch.utils.data.DataLoader(test_d, batch_size=batch_size, sampler=SubsetRandomSampler(test_subset_indices), **dl_kwargs) 77 | return train_dl, test_dl 78 | 79 | if __name__ == "__main__": 80 | train_dl, test_dl = create_dataloader() 81 | 82 | import tqdm 83 | for d,t in tqdm.tqdm(iter(test_dl)): 84 | print(d.shape) 85 | -------------------------------------------------------------------------------- /torchneuromorphic/nmnist/create_hdf5.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_hdf5.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:15:54 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import numpy as np 13 | from tqdm import tqdm 14 | import scipy.misc 15 | import h5py 16 | import glob 17 | import torch.utils.data 18 | from ..events_timeslices import * 19 | from ..utils import * 20 | import os 21 | 22 | from collections import namedtuple, defaultdict 23 | import torch 24 | import torch.utils.data 25 | from ..utils import load_ATIS_bin, load_jaer 26 | 27 | def nmnist_load_events_from_bin(file_path, max_duration=None): 28 | timestamps, xaddr, yaddr, pol = load_ATIS_bin(file_path) 29 | return np.column_stack([ 30 | np.array(timestamps, dtype=np.uint32), 31 | np.array(pol, dtype=np.uint8), 32 | np.array(xaddr, dtype=np.uint16), 33 | np.array(yaddr, dtype=np.uint16)]) 34 | 35 | def nmnist_get_file_names(dataset_path): 36 | if not os.path.isdir(dataset_path): 37 | raise FileNotFoundError("N-MNIST Dataset not found, looked at: {}".format(dataset_path)) 38 | 39 | train_files = [] 40 | test_files = [] 41 | for digit in range(10): 42 | digit_train = glob.glob(os.path.join(dataset_path, 'Train/{}/*.bin'.format(digit))) 43 | digit_test = glob.glob(os.path.join(dataset_path, 'Test/{}/*.bin'.format(digit))) 44 | train_files.append(digit_train) 45 | test_files.append(digit_test) 46 | 47 | # We need the same number of train and test samples for each digit, let's compute the minimum 48 | max_n_train = min(map(lambda l: len(l), train_files)) 49 | max_n_test = min(map(lambda l: len(l), test_files)) 50 | n_train = max_n_train # we could take max_n_train, but my memory on the shared drive is full 51 | n_test = max_n_test # we test on the whole test set - lets only take 100*10 samples 52 | assert((n_train <= max_n_train) and (n_test <= max_n_test)), 'Requested more samples than present in dataset' 53 | 54 | print("N-MNIST: {} train samples and {} test samples per digit (max: {} train and {} test)".format(n_train, n_test, max_n_train, max_n_test)) 55 | # Crop extra samples of each digits 56 | train_files = map(lambda l: l[:n_train], train_files) 57 | test_files = map(lambda l: l[:n_test], test_files) 58 | 59 | return list(train_files), list(test_files) 60 | 61 | def create_events_hdf5(directory, hdf5_filename): 62 | fns_train, fns_test = nmnist_get_file_names(directory) 63 | fns_train = [val for sublist in fns_train for val in sublist] 64 | fns_test = [val for sublist in fns_test for val in sublist] 65 | test_keys = [] 66 | train_keys = [] 67 | train_label_list = [[] for i in range(10)] 68 | test_label_list = [[] for i in range(10)] 69 | 70 | with h5py.File(hdf5_filename, 'w') as f: 71 | f.clear() 72 | key = 0 73 | metas = [] 74 | data_grp = f.create_group('data') 75 | extra_grp = f.create_group('extra') 76 | for file_d in tqdm(fns_train+fns_test): 77 | istrain = file_d in fns_train 78 | data = nmnist_load_events_from_bin(file_d) 79 | times = data[:,0] 80 | addrs = data[:,1:] 81 | label = int(file_d.split('/')[-2]) 82 | out = [] 83 | 84 | if istrain: 85 | train_keys.append(key) 86 | train_label_list[label].append(key) 87 | else: 88 | test_keys.append(key) 89 | test_label_list[label].append(key) 90 | metas.append({'key':str(key), 'training sample':istrain}) 91 | subgrp = data_grp.create_group(str(key)) 92 | tm_dset = subgrp.create_dataset('times' , data=times, dtype = np.uint32) 93 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype = np.uint8) 94 | lbl_dset= subgrp.create_dataset('labels', data=label, dtype = np.uint8) 95 | subgrp.attrs['meta_info']= str(metas[-1]) 96 | assert label in range(10) 97 | key += 1 98 | extra_grp.create_dataset('train_keys', data = train_keys) 99 | extra_grp.create_dataset('train_keys_by_label', data = train_label_list) 100 | extra_grp.create_dataset('test_keys_by_label', data = test_label_list) 101 | extra_grp.create_dataset('test_keys', data = test_keys) 102 | extra_grp.attrs['N'] = len(train_keys) + len(test_keys) 103 | extra_grp.attrs['Ntrain'] = len(train_keys) 104 | extra_grp.attrs['Ntest'] = len(test_keys) 105 | -------------------------------------------------------------------------------- /torchneuromorphic/events_timeslices.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | # ----------------------------------------------------------------------------- 3 | # File Name : events_timeslices.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu 16 May 2019 02:13:09 PM PDT 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | # ----------------------------------------------------------------------------- 12 | 13 | 14 | from __future__ import print_function 15 | import bisect 16 | import numpy as np 17 | # from scipy.sparse import coo_matrix as sparse_matrix 18 | 19 | def expand_targets(targets, T=500, burnin=0): 20 | y = np.tile(targets.copy(), [T, 1, 1]) 21 | y[:burnin] = 0 22 | return y 23 | 24 | 25 | def one_hot(mbt, num_classes): 26 | out = np.zeros([mbt.shape[0], num_classes]) 27 | out[np.arange(mbt.shape[0], dtype='int'), mbt.astype('int')] = 1 28 | return out 29 | 30 | 31 | def find_first(a, tgt): 32 | return bisect.bisect_left(a, tgt) 33 | 34 | 35 | def cast_evs(evs): 36 | ts = (evs[:, 0] * 1e6).astype('uint64') 37 | ad = (evs[:, 1:]).astype('uint64') 38 | return ts, ad 39 | 40 | # def get_binary_frame(evs, size = (346,260), ds=1): 41 | # tr = sparse_matrix((2*evs[:,3]-1,(evs[:,1]//ds,evs[:,2]//ds)), dtype=np.int8, shape=size) 42 | # return tr.toarray() 43 | 44 | def get_subsampled_coordinates(evs, ds_h, ds_w): 45 | x_coords = evs[:, 1] // ds_w 46 | y_coords = evs[:, 2] // ds_h 47 | if x_coords.dtype != np.int: 48 | x_coords = x_coords.astype(int) 49 | if y_coords.dtype != np.int: 50 | y_coords = y_coords.astype(int) 51 | return x_coords, y_coords 52 | 53 | 54 | def get_binary_frame_np(arr, evs, ds_w=1, ds_h=1): 55 | x_coords, y_coords = get_subsampled_coordinates(evs, ds_h, ds_w) 56 | arr[x_coords, y_coords] = 2 * evs[:, 3] - 1 57 | 58 | 59 | def get_binary_frame(arr, evs, ds_w=1, ds_h=1): 60 | x_coords, y_coords = get_subsampled_coordinates(evs, ds_h, ds_w) 61 | arr[x_coords, y_coords] = 1 62 | 63 | def get_slice(times, addrs, start_time, end_time): 64 | try: 65 | idx_beg = find_first(times, start_time) 66 | idx_end = find_first(times[idx_beg:], end_time)+idx_beg 67 | return times[idx_beg:idx_end]-times[idx_beg], addrs[idx_beg:idx_end] 68 | except IndexError: 69 | raise IndexError("Empty batch found") 70 | 71 | def get_event_slice(times, addrs, start_time, T, size = [128,128], ds = 1, dt = 1000): 72 | try: 73 | idx_beg = find_first(times, start_time) 74 | idx_end = find_first(times[idx_beg:], start_time+T*dt)+idx_beg 75 | return chunk_evs_pol_dvs(times[idx_beg:idx_end], addrs[idx_beg:idx_end], deltat=dt, chunk_size=T, size = size, ds_w=ds, ds_h=ds) 76 | except IndexError: 77 | raise IndexError("Empty batch found") 78 | 79 | def get_tmad_slice(times, addrs, start_time, T): 80 | try: 81 | idx_beg = find_first(times, start_time) 82 | idx_end = find_first(times[idx_beg:], start_time+T)+idx_beg 83 | return np.column_stack([times[idx_beg:idx_end], addrs[idx_beg:idx_end]]) 84 | except IndexError: 85 | raise IndexError("Empty batch found") 86 | 87 | def get_time_surface(evs, invtau=1e-6, size=(346, 260, 2)): 88 | tr = np.zeros(size, 'int64') - np.inf 89 | 90 | for ev in evs: 91 | tr[ev[2], ev[1], ev[3]] = ev[0] 92 | 93 | a = np.exp(tr[:, :, 0] * invtau) - np.exp(tr[:, :, 1] * invtau) 94 | 95 | return a 96 | 97 | 98 | def chunk_evs_dvs(evs, deltat=1000, chunk_size=500, size=[304, 240], ds_w=1, ds_h=1): 99 | t_start = evs[0, 0] 100 | ts = range(t_start + chunk_size, t_start + chunk_size * deltat, deltat) 101 | chunks = np.zeros([len(ts)] + size, dtype='int8') 102 | idx_start = 0 103 | idx_end = 0 104 | for i, t in enumerate(ts): 105 | idx_end += find_first(evs[idx_end:, 0], t) 106 | if idx_end > idx_start: 107 | get_binary_frame_np(chunks[i, ...], evs[idx_start:idx_end], ds_h=ds_h, ds_w=ds_w) 108 | idx_start = idx_end 109 | return chunks 110 | 111 | def frame_evs(times, addrs, deltat=1000, duration=500, size=[240], downsample = [1]): 112 | t_start = times[0] 113 | ts = range(t_start, t_start + duration * deltat, deltat) 114 | chunks = np.zeros([len(ts)] + size, dtype='int8') 115 | idx_start = 0 116 | idx_end = 0 117 | for i, t in enumerate(ts): 118 | idx_end += find_first(times[idx_end:], t) 119 | if idx_end > idx_start: 120 | ee = addrs[idx_start:idx_end] 121 | ev = [(ee[:, i] // d).astype(np.int) for i,d in enumerate(downsample)] 122 | np.add.at(chunks, tuple([i]+ev), 1) 123 | idx_start = idx_end 124 | return chunks 125 | 126 | 127 | def chunk_evs_pol_dvs(times, addrs, deltat=1000, chunk_size=500, size=[2, 304, 240], ds_w=1, ds_h=1): 128 | t_start = times[0] 129 | ts = range(t_start, t_start + chunk_size * deltat, deltat) 130 | chunks = np.zeros([len(ts)] + size, dtype='int8') 131 | idx_start = 0 132 | idx_end = 0 133 | for i, t in enumerate(ts): 134 | idx_end += find_first(times[idx_end:], t) 135 | if idx_end > idx_start: 136 | ee = addrs[idx_start:idx_end] 137 | pol, x, y = ee[:, 0], (ee[:, 1] // ds_w).astype(np.int), (ee[:, 2] // ds_h).astype(np.int) 138 | np.add.at(chunks, (i, pol, x, y), 1) 139 | idx_start = idx_end 140 | return chunks 141 | 142 | 143 | if __name__ == "__main__": 144 | import h5py 145 | 146 | dataset = h5py.File('/home/eneftci_local/Projects/share/data/massiset/massiset_sparse.hdf5', 'r') 147 | evs = dataset.get('backpack')['data_train'].value 148 | cevs = chunk_evs(evs, chunk_size=500, deltat=1000, size=[304 // 4, 240 // 4], ds_w=4, ds_h=4) 149 | -------------------------------------------------------------------------------- /torchneuromorphic/rosbags/rosbags_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Jacques Kaiser 4 | # 5 | # Creation Date : Wed 13 May 2020 6 | # 7 | # Copyright : (c) 8 | # Licence : Apache License, Version 2.0 9 | #----------------------------------------------------------------------------- 10 | import h5py 11 | import torch.utils.data 12 | import numpy as np 13 | import glob 14 | import os 15 | 16 | from .create_hdf5 import create_events_hdf5 17 | from ..neuromorphic_dataset import NeuromorphicDataset 18 | from ..events_timeslices import * 19 | from ..transforms import * 20 | 21 | class RosbagDataset(NeuromorphicDataset): 22 | def __init__( 23 | self, 24 | root, 25 | resources_local=None, 26 | train=True, 27 | transform=None, 28 | target_transform=None, 29 | download_and_create=True, 30 | chunk_size = 500): 31 | 32 | self.directory = 'data/rosbags/' 33 | if resources_local is None: 34 | resources_local = [os.path.join(self.directory, 'raw')] 35 | self.resources_local = resources_local 36 | no_rosbag_found = 'No rosbag found. Place your rosbags in {}/{}'.format(self.directory, self.resources_local[0]) 37 | self.resources_url = [[no_rosbag_found, None, 'your rosbags']] 38 | 39 | self.n = 0 40 | self.download_and_create = download_and_create 41 | self.root = root 42 | self.train = train 43 | self.chunk_size = chunk_size 44 | 45 | super(RosbagDataset, self).__init__( 46 | root, 47 | transform=transform, 48 | target_transform=target_transform ) 49 | 50 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 51 | self.label_order = f['extra']['label_order'][()] 52 | print("Labels in order: {}".format(self.label_order)) 53 | self.n_labels = len(self.label_order) 54 | if train: 55 | self.n = f['extra'].attrs['Ntrain'] 56 | self.keys = f['extra']['train_keys'][()] 57 | else: 58 | self.n = f['extra'].attrs['Ntest'] 59 | self.keys = f['extra']['test_keys'][()] 60 | 61 | def download(self): 62 | super(RosbagDataset, self).download() 63 | 64 | def create_hdf5(self): 65 | create_events_hdf5(self.resources_local[0], self.root) 66 | 67 | def __len__(self): 68 | return self.n 69 | 70 | def __getitem__(self, key): 71 | #Important to open and close in getitem to enable num_workers>0 72 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 73 | if not self.train: 74 | key = key + f['extra'].attrs['Ntrain'] 75 | assert key in self.keys 76 | data, target = sample( 77 | f, 78 | key, 79 | T = self.chunk_size, 80 | shuffle=self.train) 81 | 82 | if self.transform is not None: 83 | data = self.transform(data) 84 | 85 | if self.target_transform is not None: 86 | target = self.target_transform(target) 87 | 88 | return data, target 89 | 90 | 91 | def sample(hdf5_file, 92 | key, 93 | T = 500, 94 | shuffle = False): 95 | dset = hdf5_file['data'][str(key)] 96 | label = dset['labels'][()] 97 | tend = dset['times'][-1] 98 | start_time = 0 99 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, T*1000) 100 | tmad[:,0]-=tmad[0,0] 101 | return tmad, label 102 | 103 | 104 | def create_dataloader( 105 | root = 'data/rosbags/rosbags_build.hdf5', 106 | batch_size = 72 , 107 | chunk_size_train = 500, 108 | chunk_size_test = 1800, 109 | ds = None, 110 | dt = 1000, 111 | transform_train = None, 112 | transform_test = None, 113 | target_transform_train = None, 114 | target_transform_test = None, 115 | n_events_attention=None, 116 | **dl_kwargs): 117 | if ds is None: 118 | ds = 4 119 | size = [2, 128//ds, 128//ds] 120 | center = [64,64] 121 | 122 | default_transform = lambda chunk_size: Compose([ 123 | CropCenter(center, size), 124 | Downsample(factor=[dt,1,1,1]), 125 | ToCountFrame(T = chunk_size, size = size), 126 | ToTensor() 127 | ]) 128 | 129 | if transform_train is None: 130 | transform_train = default_transform(chunk_size_train) 131 | if transform_test is None: 132 | transform_test = default_transform(chunk_size_test) 133 | 134 | if target_transform_train is None: 135 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(4)]) # HACK HACK HACK! Can't get n_labels from here 136 | if target_transform_test is None: 137 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(4)]) 138 | 139 | train_d = RosbagDataset(root, 140 | train=True, 141 | transform = transform_train, 142 | target_transform = target_transform_train, 143 | chunk_size = chunk_size_train) 144 | 145 | train_dl = torch.utils.data.DataLoader(train_d, batch_size=batch_size, shuffle=True, **dl_kwargs) 146 | 147 | test_d = RosbagDataset(root, 148 | transform = transform_test, 149 | target_transform = target_transform_test, 150 | train=False, 151 | chunk_size = chunk_size_test) 152 | 153 | test_dl = torch.utils.data.DataLoader(test_d, batch_size=batch_size, **dl_kwargs) 154 | 155 | return train_dl, test_dl 156 | -------------------------------------------------------------------------------- /torchneuromorphic/nmnist/nmnist_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Sun 29 Jul 2018 01:39:06 PM PDT 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | 12 | import struct 13 | import time, copy 14 | import numpy as np 15 | import scipy.misc 16 | import h5py 17 | import torch.utils.data 18 | from ..neuromorphic_dataset import NeuromorphicDataset 19 | from ..events_timeslices import * 20 | from ..transforms import * 21 | from .create_hdf5 import create_events_hdf5 22 | import os 23 | 24 | mapping = { 0 :'0', 25 | 1 :'1', 26 | 2 :'2', 27 | 3 :'3', 28 | 4 :'4', 29 | 5 :'5', 30 | 6 :'6', 31 | 7 :'7', 32 | 8 :'8', 33 | 9 :'9'} 34 | 35 | class NMNISTDataset(NeuromorphicDataset): 36 | resources_url = [['http://data.mendeley.com/public-files/datasets/468j46mzdv/files/39c25547-014b-4137-a934-9d29fa53c7a0/file_downloaded',None, 'Train.zip'], 37 | ['http://data.mendeley.com/public-files/datasets/468j46mzdv/files/05a4d654-7e03-4c15-bdfa-9bb2bcbea494/file_downloaded', None, 'Test.zip']] 38 | directory = 'data/nmnist/' 39 | resources_local = [directory+'Train', directory+'Test'] 40 | 41 | def __init__( 42 | self, 43 | root, 44 | train=True, 45 | transform=None, 46 | target_transform=None, 47 | download_and_create=True, 48 | chunk_size = 500, 49 | dt = 1000): 50 | 51 | self.n = 0 52 | self.nclasses = self.num_classes = 10 53 | self.download_and_create = download_and_create 54 | self.root = root 55 | self.train = train 56 | self.dt = dt 57 | self.chunk_size = chunk_size 58 | self.directory = root.split('n_mnist.hdf5')[0] 59 | self.resources_local = [self.directory + 'Train', self.directory + 'Test'] 60 | super(NMNISTDataset, self).__init__( 61 | root, 62 | transform=transform, 63 | target_transform=target_transform ) 64 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 65 | try: 66 | if train: 67 | self.n = f['extra'].attrs['Ntrain'] 68 | self.keys = f['extra']['train_keys'][()] 69 | self.keys_by_label = f['extra']['train_keys_by_label'][()] 70 | else: 71 | self.n = f['extra'].attrs['Ntest'] 72 | self.keys = f['extra']['test_keys'][()] 73 | self.keys_by_label = f['extra']['test_keys_by_label'][()] 74 | self.keys_by_label[:,:] -= self.keys_by_label[0,0] #normalize 75 | except AttributeError: 76 | print('Attribute not found in hdf5 file. You may be using an old hdf5 build. Delete {0} and run again'.format(root)) 77 | raise 78 | 79 | 80 | def download(self): 81 | isexisting = super(NMNISTDataset, self).download() 82 | 83 | def create_hdf5(self): 84 | create_events_hdf5(self.directory, self.root) 85 | 86 | 87 | def __len__(self): 88 | return self.n 89 | 90 | def __getitem__(self, key): 91 | #Important to open and close in getitem to enable num_workers>0 92 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 93 | if self.train: 94 | key = f['extra']['train_keys'][key] 95 | else: 96 | key = f['extra']['test_keys'][key] 97 | data, target = sample( 98 | f, 99 | key, 100 | T = self.chunk_size*self.dt) 101 | 102 | if self.transform is not None: 103 | data = self.transform(data) 104 | 105 | if self.target_transform is not None: 106 | target = self.target_transform(target) 107 | return data, target 108 | 109 | def sample(hdf5_file, 110 | key, 111 | T = 300): 112 | dset = hdf5_file['data'][str(key)] 113 | label = dset['labels'][()] 114 | tend = dset['times'][-1] 115 | start_time = 0 116 | ha = dset['times'][()] 117 | 118 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, T*1000) 119 | tmad[:,0]-=tmad[0,0] 120 | return tmad, label 121 | 122 | def create_datasets( 123 | root = 'data/nmnist/n_mnist.hdf5', 124 | batch_size = 72 , 125 | chunk_size_train = 300, 126 | chunk_size_test = 300, 127 | ds = 1, 128 | dt = 1000, 129 | transform_train = None, 130 | transform_test = None, 131 | target_transform_train = None, 132 | target_transform_test = None): 133 | 134 | size = [2, 32//ds, 32//ds] 135 | 136 | if transform_train is None: 137 | transform_train = Compose([ 138 | CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]), 139 | Downsample(factor=[dt,1,1,1]), 140 | ToCountFrame(T = chunk_size_train, size = size), 141 | ToTensor()]) 142 | if transform_test is None: 143 | transform_test = Compose([ 144 | CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]), 145 | Downsample(factor=[dt,1,1,1]), 146 | ToCountFrame(T = chunk_size_test, size = size), 147 | ToTensor()]) 148 | if target_transform_train is None: 149 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(10)]) 150 | if target_transform_test is None: 151 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(10)]) 152 | 153 | train_ds = NMNISTDataset(root,train=True, 154 | transform = transform_train, 155 | target_transform = target_transform_train, 156 | chunk_size = chunk_size_train, 157 | dt = dt) 158 | 159 | test_ds = NMNISTDataset(root, transform = transform_test, 160 | target_transform = target_transform_test, 161 | train=False, 162 | chunk_size = chunk_size_test, 163 | dt = dt) 164 | 165 | return train_ds, test_ds 166 | 167 | def create_dataloader( 168 | root = 'data/nmnist/n_mnist.hdf5', 169 | batch_size = 72 , 170 | chunk_size_train = 300, 171 | chunk_size_test = 300, 172 | ds = 1, 173 | dt = 1000, 174 | transform_train = None, 175 | transform_test = None, 176 | target_transform_train = None, 177 | target_transform_test = None, 178 | **dl_kwargs): 179 | 180 | train_d, test_d = create_datasets( 181 | root = root, 182 | batch_size = batch_size, 183 | chunk_size_train = chunk_size_train, 184 | chunk_size_test = chunk_size_test, 185 | ds = ds, 186 | dt = dt, 187 | transform_train = transform_train, 188 | transform_test = transform_test, 189 | target_transform_train = target_transform_train, 190 | target_transform_test = target_transform_test) 191 | 192 | 193 | train_dl = torch.utils.data.DataLoader(train_d, shuffle=True, batch_size=batch_size, **dl_kwargs) 194 | test_dl = torch.utils.data.DataLoader(test_d, shuffle=False, batch_size=batch_size, **dl_kwargs) 195 | 196 | return train_dl, test_dl 197 | 198 | 199 | 200 | -------------------------------------------------------------------------------- /torchneuromorphic/dvssign/create_hdf5_sign.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_hdf5.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:15:54 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import numpy as np 13 | from tqdm import tqdm 14 | import scipy.misc 15 | import h5py 16 | import glob 17 | import torch.utils.data 18 | from torchneuromorphic.events_timeslices import * 19 | from torchneuromorphic.utils import * 20 | import os 21 | 22 | from collections import namedtuple, defaultdict 23 | import torch 24 | import torch.utils.data 25 | from torchneuromorphic.utils import load_ATIS_bin, load_jaer 26 | 27 | NUM_CLASSES = 24 # A-Y excluding j 28 | 29 | mapping = { 'a':0, 30 | 'b':1, 31 | 'c':2, 32 | 'd':3, 33 | 'e':4, 34 | 'f':5, 35 | 'g':6, 36 | 'h':7, 37 | 'i':8, 38 | 'k':9, 39 | 'l':10, 40 | 'm':11, 41 | 'n':12, 42 | 'o':13, 43 | 'p':14, 44 | 'q':15, 45 | 'r':16, 46 | 's':17, 47 | 't':18, 48 | 'u':19, 49 | 'v':20, 50 | 'w':21, 51 | 'x':22, 52 | 'y':23} 53 | 54 | def sign_load_events_from_mat(file_path, max_duration=None): 55 | timestamps, xaddr, yaddr, pol = load_mat(file_path)#load_ATIS_bin(file_path) 56 | return np.column_stack([ 57 | np.array(timestamps, dtype=np.uint32), 58 | np.array(pol, dtype=np.uint8), 59 | np.array(xaddr, dtype=np.uint16), 60 | np.array(yaddr, dtype=np.uint16)]) 61 | 62 | def sign_get_file_names(dataset_path): 63 | if not os.path.isdir(dataset_path): 64 | raise FileNotFoundError("DVSSign Dataset not found, looked at: {}".format(dataset_path)) 65 | 66 | sign_dict_train = {} 67 | 68 | sign_dict_test = {} 69 | 70 | # need to create train test split 71 | # simple 80/20 split based on number of gestures per class on a fraction of the dataset 72 | num_train = 400 #3360 73 | num_test = 100 #840 74 | cwd = os.getcwd() 75 | os.chdir(dataset_path+'/a') 76 | for key in mapping.keys(): 77 | #if mapping[key] > 9: 78 | # continue 79 | os.chdir('../'+key) 80 | curr_dir = os.getcwd() 81 | # each class has 4200 samples. 82 | # if doing 80/20 split, 3360 train 840 test 83 | num_samples = 1 84 | for file in glob.glob("*.mat"): 85 | label = file.split('_')[0] 86 | 87 | if num_samples < num_train: 88 | if label in sign_dict_train.keys(): 89 | sign_dict_train[label].append(curr_dir+'/'+file) 90 | else: 91 | sign_dict_train[label] = [] 92 | sign_dict_train[label].append(curr_dir+'/'+file) 93 | elif num_samples < num_train+num_test: 94 | if label in sign_dict_test.keys(): 95 | sign_dict_test[label].append(curr_dir+'/'+file) 96 | else: 97 | sign_dict_test[label] = [] 98 | sign_dict_test[label].append(curr_dir+'/'+file) 99 | num_samples += 1 100 | 101 | # We need the same number of train and test samples for each digit, let's compute the minimum 102 | #max_n_train = min(map(lambda l: len(l), train_files)) 103 | #max_n_test = min(map(lambda l: len(l), test_files)) 104 | #n_train = max_n_train # we could take max_n_train, but my memory on the shared drive is full 105 | #n_test = max_n_test # we test on the whole test set - lets only take 100*10 samples 106 | #assert((n_train <= max_n_train) and (n_test <= max_n_test)), 'Requested more samples than present in dataset' 107 | 108 | print("DVSSign: {} train samples and {} test samples per digit (max: {} train and {} test)".format(num_train*NUM_CLASSES, num_test*NUM_CLASSES, num_train*NUM_CLASSES, num_test*NUM_CLASSES)) 109 | # Crop extra samples of each digits 110 | #train_files = map(lambda l: l[:n_train], train_files) 111 | #test_files = map(lambda l: l[:n_test], test_files) 112 | os.chdir(cwd) 113 | return sign_dict_train, sign_dict_test 114 | 115 | def create_events_hdf5(directory, hdf5_filename): 116 | fns_train, fns_test = sign_get_file_names(directory) 117 | #fns_train = [val for sublist in fns_train for val in sublist] 118 | #fns_test = [val for sublist in fns_test for val in sublist] 119 | test_keys = [] 120 | train_keys = [] 121 | train_label_list = [[] for i in range(NUM_CLASSES)] 122 | test_label_list = [[] for i in range(NUM_CLASSES)] 123 | 124 | with h5py.File(hdf5_filename, 'w') as f: 125 | f.clear() 126 | #key = 0 127 | num = 0 128 | metas = [] 129 | data_grp = f.create_group('data') 130 | extra_grp = f.create_group('extra') 131 | is_train = True 132 | for key in tqdm(fns_train.keys()): 133 | print("This is the key", key) 134 | #if mapping[key] > 9: 135 | # break 136 | for file_d in fns_train[key]:#.values(): 137 | #for i in range(len(file_d)): 138 | data = sign_load_events_from_mat(file_d) 139 | times = data[:,0] 140 | addrs = data[:,1:] 141 | label = mapping[key] #int(file_d.split('/')[-2]) 142 | 143 | train_keys.append(num) 144 | 145 | train_label_list[mapping[key]].append(num) 146 | 147 | metas.append({'key':str(num), 'training sample':True}) 148 | subgrp = data_grp.create_group(str(num)) 149 | tm_dset = subgrp.create_dataset('times' , data=times, dtype = np.uint32) 150 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype = np.uint8) 151 | lbl_dset= subgrp.create_dataset('labels', data=label, dtype = np.uint8) 152 | subgrp.attrs['meta_info']= str(metas[-1]) 153 | num += 1 154 | 155 | for key in tqdm(fns_test.keys()): 156 | print("This is the key test", key) 157 | #if mapping[key] > 9: 158 | # break 159 | for file_d in fns_test[key]:#.values(): 160 | #for i in range(len(file_d)): 161 | data = sign_load_events_from_mat(file_d) 162 | times = data[:,0] 163 | addrs = data[:,1:] 164 | label = mapping[key] 165 | 166 | test_keys.append(num) 167 | 168 | test_label_list[mapping[key]].append(num) 169 | 170 | metas.append({'key':str(num), 'testing sample':True}) 171 | subgrp = data_grp.create_group(str(num)) 172 | tm_dset = subgrp.create_dataset('times' , data=times, dtype = np.uint32) 173 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype = np.uint8) 174 | lbl_dset= subgrp.create_dataset('labels', data=label, dtype = np.uint8) 175 | subgrp.attrs['meta_info']= str(metas[-1]) 176 | num += 1 177 | 178 | print(len(train_keys)) 179 | print(len(test_keys)) 180 | 181 | extra_grp.create_dataset('train_keys', data = train_keys) 182 | extra_grp.create_dataset('train_keys_by_label', data = train_label_list) 183 | extra_grp.create_dataset('test_keys_by_label', data = test_label_list) 184 | extra_grp.create_dataset('test_keys', data = test_keys) 185 | extra_grp.attrs['N'] = len(train_keys) + len(test_keys) 186 | extra_grp.attrs['Ntrain'] = len(train_keys) 187 | extra_grp.attrs['Ntest'] = len(test_keys) 188 | -------------------------------------------------------------------------------- /torchneuromorphic/shd/shd_dataloaders.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import time 3 | import numpy as np 4 | import scipy.misc 5 | import h5py 6 | import torch.utils.data 7 | from ..neuromorphic_dataset import NeuromorphicDataset 8 | from ..events_timeslices import * 9 | from ..transforms import * 10 | import os 11 | 12 | mapping = { 0 :'E0', 13 | 1 :'E1', 14 | 2 :'E2', 15 | 3 :'E3', 16 | 4 :'E4', 17 | 5 :'E5', 18 | 6 :'E6', 19 | 7 :'E7', 20 | 8 :'E8', 21 | 9 :'E9', 22 | 10: 'G0', 23 | 11: 'G1', 24 | 12: 'G2', 25 | 13: 'G3', 26 | 14: 'G4', 27 | 15: 'G5', 28 | 16: 'G6', 29 | 17: 'G7', 30 | 18: 'G8', 31 | 19: 'G9', 32 | } 33 | 34 | def one_hot1d(mbt, num_classes): 35 | out = np.zeros([num_classes], dtype='float32') 36 | out[int(mbt)] = 1 37 | return out 38 | 39 | def create_events_hdf5(directory, hdf5_filename): 40 | train_evs, train_labels_isolated = load_shd_hdf5(directory+'/shd_train.h5') 41 | test_evs, test_labels_isolated = load_shd_hdf5(directory+'/shd_test.h5') 42 | border = len(train_labels_isolated) 43 | 44 | tmad = train_evs + test_evs 45 | labels = train_labels_isolated + test_labels_isolated 46 | test_keys = [] 47 | train_keys = [] 48 | 49 | with h5py.File(hdf5_filename, 'w') as f: 50 | f.clear() 51 | key = 0 52 | metas = [] 53 | data_grp = f.create_group('data') 54 | extra_grp = f.create_group('extra') 55 | for i,data in enumerate(tmad): 56 | times = data[:,0] 57 | addrs = data[:,1:] 58 | label = labels[i] 59 | out = [] 60 | istrain = i0 140 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 141 | if not self.train: 142 | key = key + f['extra'].attrs['Ntrain'] 143 | data, target = sample( 144 | f, 145 | key, 146 | T = self.chunk_size, 147 | shuffle=self.train) 148 | 149 | if self.transform is not None: 150 | data = self.transform(data) 151 | 152 | if self.target_transform is not None: 153 | target = self.target_transform(target) 154 | 155 | return data, target 156 | 157 | def sample(hdf5_file, 158 | key, 159 | T = 500, 160 | shuffle = False): 161 | dset = hdf5_file['data'][str(key)] 162 | label = dset['labels'][()] 163 | tend = dset['times'][-1] 164 | start_time = 0 165 | 166 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, T*1000) 167 | tmad[:,0]-=tmad[0,0] 168 | return tmad, label 169 | 170 | def create_dataloader( 171 | root = 'data/shd/shd.hdf5', 172 | batch_size = 72 , 173 | chunk_size_train = 1000, 174 | chunk_size_test = 1000, 175 | ds = 1, 176 | dt = 500, 177 | transform_train = None, 178 | transform_test = None, 179 | target_transform_train = None, 180 | target_transform_test = None, 181 | **dl_kwargs): 182 | 183 | size = [700//ds, 1, 1] 184 | 185 | if transform_train is None: 186 | transform_train = Compose([ 187 | Downsample(factor=[dt,ds]), 188 | ToChannelHeightWidth(), 189 | ToCountFrame(T = chunk_size_train, size = size), 190 | ToTensor() 191 | ]) 192 | if transform_test is None: 193 | transform_test = Compose([ 194 | Downsample(factor=[dt,ds]), 195 | ToChannelHeightWidth(), 196 | ToCountFrame(T = chunk_size_test, size = size), 197 | ToTensor() 198 | ]) 199 | if target_transform_train is None: 200 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(len(mapping))]) 201 | if target_transform_test is None: 202 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(len(mapping))]) 203 | 204 | collate_fn = None 205 | #collate_fn = lambda x: zip(*[x[i] for i in range(len(x))]) 206 | 207 | train_d = SHDDataset(root,train=True, 208 | transform = transform_train, 209 | target_transform = target_transform_train, 210 | chunk_size = chunk_size_train) 211 | 212 | train_dl = torch.utils.data.DataLoader(train_d, shuffle=True, batch_size=batch_size, collate_fn = collate_fn,**dl_kwargs) 213 | 214 | test_d = SHDDataset(root, transform = transform_test, 215 | target_transform = target_transform_test, 216 | train=False, 217 | chunk_size = chunk_size_test) 218 | 219 | test_dl = torch.utils.data.DataLoader(test_d, shuffle=False, batch_size=batch_size, collate_fn = collate_fn,**dl_kwargs) 220 | print(dl_kwargs) 221 | 222 | 223 | return train_dl, test_dl 224 | -------------------------------------------------------------------------------- /torchneuromorphic/doublenmnist/doublenmnist_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Sun 29 Jul 2018 01:39:06 PM PDT 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | import struct 12 | import time, copy 13 | import numpy as np 14 | import scipy.misc 15 | import h5py 16 | import torch.utils.data 17 | from ..nmnist.nmnist_dataloaders import NMNISTDataset, sample, create_datasets 18 | from ..neuromorphic_dataset import NeuromorphicDataset 19 | from ..events_timeslices import * 20 | from ..transforms import * 21 | import os 22 | 23 | mapping = { 0 :'0', 24 | 1 :'1', 25 | 2 :'2', 26 | 3 :'3', 27 | 4 :'4', 28 | 5 :'5', 29 | 6 :'6', 30 | 7 :'7', 31 | 8 :'8', 32 | 9 :'9'} 33 | 34 | class DoubleNMNISTDataset(NeuromorphicDataset): 35 | 36 | def __init__( 37 | self, 38 | root, 39 | train=True, 40 | transform=None, 41 | target_transform=None, 42 | download_and_create=True, 43 | chunk_size = 500, 44 | nclasses = 5, 45 | samples_per_class = 2, 46 | labels_u = range(5)): 47 | 48 | self.n = samples_per_class * nclasses 49 | self.nclasses = nclasses 50 | self.download_and_create = download_and_create 51 | self.root = root 52 | self.train = train 53 | self.chunk_size = chunk_size 54 | self.labels_u = labels_u 55 | labels_left = self.labels_u // 10 56 | labels_right = self.labels_u % 10 57 | self.labels = np.repeat(self.labels_u, samples_per_class) 58 | self.labels_map = dict(zip(np.unique(self.labels),np.arange(nclasses))) 59 | 60 | 61 | super(DoubleNMNISTDataset, self).__init__(root = None, 62 | transform=transform, 63 | target_transform=target_transform ) 64 | 65 | self.data_train = NMNISTDataset( root, 66 | train=True, 67 | transform=transform, 68 | target_transform=target_transform, 69 | download_and_create=download_and_create, 70 | chunk_size = chunk_size) 71 | 72 | 73 | keys_filt_left = np.array([ np.random.choice(s, samples_per_class) for s in self.data_train.keys_by_label[labels_left]]).reshape(-1) 74 | keys_filt_right = np.array([ np.random.choice(s, samples_per_class) for s in self.data_train.keys_by_label[labels_right]]).reshape(-1) 75 | self.keys = list(zip(keys_filt_left, keys_filt_right)) 76 | 77 | def __len__(self): 78 | return self.n 79 | 80 | def __getitem__(self, key): 81 | key_l, key_r = self.keys[key] 82 | data_l, label_l = self.data_train[key_l] 83 | data_r, label_r = self.data_train[key_r] 84 | size_x, size_y = data_r.shape[2:4] 85 | data = torch.zeros(data_r.shape[:2]+(size_x*2,size_y*2)) 86 | r1 = np.random.randint(0,size_y) 87 | r2 = np.random.randint(0,size_y) 88 | data[:, :, :size_x, r1:r1+size_y] = data_l 89 | data[:, :, size_x:, r2:r2+size_y] = data_r 90 | target = self.labels_map[self.labels[key]] 91 | return data, target 92 | 93 | def create_datasets( 94 | root = 'data/nmnist/n_mnist.hdf5', 95 | batch_size = 72 , 96 | chunk_size_train = 300, 97 | chunk_size_test = 300, 98 | ds = 1, 99 | dt = 1000, 100 | transform_train = None, 101 | transform_test = None, 102 | target_transform_train = None, 103 | target_transform_test = None, 104 | nclasses = 5, 105 | samples_per_class = 2, 106 | samples_per_test = 2, 107 | classes_meta = np.arange(100, dtype='int')): 108 | 109 | size = [2, 32//ds, 32//ds] 110 | 111 | if transform_train is None: 112 | transform_train = Compose([ 113 | CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]), 114 | Downsample(factor=[dt,1,ds,ds]), 115 | ToCountFrame(T = chunk_size_train, size = size), 116 | ToTensor()]) 117 | if transform_test is None: 118 | transform_test = Compose([ 119 | CropDims(low_crop=[0,0], high_crop=[32,32], dims=[2,3]), 120 | Downsample(factor=[dt,1,ds,ds]), 121 | ToCountFrame(T = chunk_size_test, size = size), 122 | ToTensor()]) 123 | if target_transform_train is None: 124 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(nclasses)]) 125 | if target_transform_test is None: 126 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(nclasses)]) 127 | 128 | 129 | labels_u = np.random.choice(classes_meta, nclasses,replace=False) #100 here becuase we have two pairs of digits between 0 and 9 130 | 131 | train_ds = DoubleNMNISTDataset(root,train=True, 132 | transform = transform_train, 133 | target_transform = target_transform_train, 134 | chunk_size = chunk_size_train, 135 | nclasses = nclasses, 136 | samples_per_class = samples_per_class, 137 | labels_u = labels_u) 138 | 139 | test_ds = DoubleNMNISTDataset(root, transform = transform_test, 140 | target_transform = target_transform_test, 141 | train=False, 142 | chunk_size = chunk_size_test, 143 | nclasses = nclasses, 144 | samples_per_class = samples_per_test, 145 | labels_u = labels_u) 146 | 147 | return train_ds, test_ds 148 | 149 | 150 | def create_dataloader( 151 | root = 'data/nmnist/n_mnist.hdf5', 152 | batch_size = 72 , 153 | chunk_size_train = 300, 154 | chunk_size_test = 300, 155 | ds = 1, 156 | dt = 1000, 157 | transform_train = None, 158 | transform_test = None, 159 | target_transform_train = None, 160 | target_transform_test = None, 161 | nclasses = 5, 162 | samples_per_class = 2, 163 | samples_per_test = 2, 164 | classes_meta = np.arange(100, dtype='int'), 165 | **dl_kwargs): 166 | 167 | 168 | train_d, test_d = create_datasets( 169 | root = root, 170 | batch_size = batch_size, 171 | chunk_size_train = chunk_size_train, 172 | chunk_size_test = chunk_size_test, 173 | ds = ds, 174 | dt = dt, 175 | transform_train = transform_train, 176 | transform_test = transform_test, 177 | target_transform_train = target_transform_train, 178 | target_transform_test = target_transform_test, 179 | classes_meta = classes_meta, 180 | nclasses = nclasses, 181 | samples_per_class = samples_per_class, 182 | samples_per_test = samples_per_test) 183 | 184 | 185 | train_dl = torch.utils.data.DataLoader(train_d, shuffle=True, batch_size=batch_size, **dl_kwargs) 186 | test_dl = torch.utils.data.DataLoader(test_d, shuffle=False, batch_size=batch_size, **dl_kwargs) 187 | 188 | return train_dl, test_dl 189 | 190 | 191 | 192 | 193 | def sample_double_mnist_task( N = 5, 194 | K = 2, 195 | K_test = 2, 196 | meta_split = [range(64), range(64,80), range(80,100)], 197 | meta_dataset_type = 'train', 198 | **kwargs): 199 | classes_meta = {} 200 | classes_meta['train'] = np.array(meta_split[0], dtype='int') 201 | classes_meta['val'] = np.array(meta_split[1], dtype='int') 202 | classes_meta['test'] = np.array(meta_split[2], dtype='int') 203 | 204 | assert meta_dataset_type in ['train', 'val', 'test'] 205 | return create_dataloader(classes_meta = classes_meta[meta_dataset_type], nclasses= N, samples_per_class = K, samples_per_test = K_test, **kwargs) 206 | -------------------------------------------------------------------------------- /torchneuromorphic/dvs_gestures/dvsgestures_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Sun 29 Jul 2018 01:39:06 PM PDT 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | import struct 12 | import time 13 | import numpy as np 14 | import scipy.misc 15 | import h5py 16 | import torch.utils.data 17 | from .create_hdf5 import create_events_hdf5 18 | from ..neuromorphic_dataset import NeuromorphicDataset 19 | from ..events_timeslices import * 20 | from ..transforms import * 21 | import os 22 | 23 | mapping = { 0 :'Hand Clapping' , 24 | 1 :'Right Hand Wave', 25 | 2 :'Left Hand Wave' , 26 | 3 :'Right Arm CW' , 27 | 4 :'Right Arm CCW' , 28 | 5 :'Left Arm CW' , 29 | 6 :'Left Arm CCW' , 30 | 7 :'Arm Roll' , 31 | 8 :'Air Drums' , 32 | 9 :'Air Guitar' , 33 | 10:'Other'} 34 | 35 | class DVSGestureDataset(NeuromorphicDataset): 36 | directory = 'data/dvsgesture/' 37 | resources_url = [['Manually Download dataset here: https://ibm.ent.box.com/s/3hiq58ww1pbbjrinh367ykfdf60xsfm8/file/211521748942?sb=/details and place under {0}'.format(directory),None, 'DvsGesture.tar.gz']] 38 | resources_local = [directory+'raw'] 39 | 40 | def __init__( 41 | self, 42 | root, 43 | train=True, 44 | transform=None, 45 | target_transform=None, 46 | download_and_create=True, 47 | chunk_size = 500, 48 | deltat = 1000, 49 | return_meta = False, 50 | time_shuffle=False): 51 | 52 | self.n = 0 53 | self.deltat=deltat 54 | self.download_and_create = download_and_create 55 | self.root = root 56 | self.train = train 57 | self.chunk_size = chunk_size 58 | self.return_meta = return_meta 59 | self.time_shuffle = time_shuffle 60 | 61 | super(DVSGestureDataset, self).__init__( 62 | root, 63 | transform=transform, 64 | target_transform=target_transform ) 65 | 66 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 67 | if train: 68 | self.n = f['extra'].attrs['Ntrain'] 69 | self.keys = f['extra']['train_keys'][()] 70 | else: 71 | self.n = f['extra'].attrs['Ntest'] 72 | self.keys = f['extra']['test_keys'][()] 73 | 74 | def download(self): 75 | super(DVSGestureDataset, self).download() 76 | 77 | def create_hdf5(self): 78 | create_events_hdf5(self.resources_local[0], self.root) 79 | 80 | def __len__(self): 81 | return self.n 82 | 83 | def __getitem__(self, key): 84 | #Important to open and close in getitem to enable num_workers>0 85 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 86 | if not self.train: 87 | key = key + f['extra'].attrs['Ntrain'] 88 | assert key in self.keys 89 | data, target, meta_info_light, meta_info_user = sample( 90 | f, 91 | key, 92 | T = self.chunk_size, 93 | deltat = self.deltat, 94 | shuffle=self.time_shuffle)#self.train) 95 | 96 | if self.transform is not None: 97 | data = self.transform(data) 98 | 99 | if self.target_transform is not None: 100 | target = self.target_transform(target) 101 | 102 | if self.return_meta is True: 103 | return data, target, meta_info_light, meta_info_user 104 | else: 105 | return data, target 106 | 107 | def sample(hdf5_file, 108 | key, 109 | T = 500, 110 | deltat = 1000, 111 | shuffle = False): 112 | dset = hdf5_file['data'][str(key)] 113 | label = dset['labels'][()] 114 | tbegin = dset['times'][0] 115 | tend = np.maximum(0,dset['times'][-1]- 2*T*deltat ) 116 | start_time = np.random.randint(tbegin, tend+1) if shuffle else 0 117 | #print(start_time) 118 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, T*deltat) 119 | tmad[:,0]-=tmad[0,0] 120 | meta = eval(dset.attrs['meta_info']) 121 | return tmad[:, [0,3,1,2]], label, meta['light condition'], meta['subject'] 122 | 123 | def create_dataloader( 124 | root = 'data/dvsgesture/dvs_gestures_build19.hdf5', 125 | batch_size = 72 , 126 | chunk_size_train = 500, 127 | chunk_size_test = 1800, 128 | ds = None, 129 | dt = 1000, 130 | transform_train = None, 131 | transform_test = None, 132 | target_transform_train = None, 133 | target_transform_test = None, 134 | n_events_attention=None, 135 | return_meta=False, 136 | sample_shuffle=True, 137 | time_shuffle=True, 138 | channel_first = True, 139 | **dl_kwargs): 140 | if ds is None: 141 | ds = 4 142 | if isinstance(ds,int): 143 | ds = [ds,ds] 144 | 145 | size = [2, 128//ds[0], 128//ds[1]] 146 | 147 | if n_events_attention is None: 148 | if channel_first: 149 | default_transform = lambda chunk_size: Compose([ 150 | Downsample(factor=[dt,1,ds[0],ds[1]]), 151 | ToCountFrame(T = chunk_size, size = size), 152 | ToTensor() 153 | ]) 154 | else: 155 | default_transform = lambda chunk_size: Compose([ 156 | Downsample(factor=[dt,1,ds[0],ds[1]]), 157 | ToCountFrame(T = chunk_size, size = size), 158 | ToTensor(), 159 | lambda x: x.permute(0,2,3,1) 160 | ]) 161 | else: 162 | if channel_first: 163 | default_transform = lambda chunk_size: Compose([ 164 | Downsample(factor=[dt,1,1,1]), 165 | Attention(n_events_attention, size=[2,128//2,128//2]), 166 | Downsample(factor=[1,1,ds[0],ds[1]]), 167 | ToCountFrame(T = chunk_size, size = [2,128//2//ds[0],128//2//ds[1]]), 168 | ToTensor() 169 | ]) 170 | else: 171 | default_transform = lambda chunk_size: Compose([ 172 | Downsample(factor=[dt,1,1,1]), 173 | Attention(n_events_attention, size=[2,128//2,128//2]), 174 | Downsample(factor=[1,1,ds[0],ds[1]]), 175 | ToCountFrame(T = chunk_size, size = [2,128//2//ds[0],128//2//ds[1]]), 176 | ToTensor(), 177 | lambda x: x.permute(0,2,3,1) 178 | ]) 179 | 180 | if transform_train is None: 181 | transform_train = default_transform(chunk_size_train) 182 | if transform_test is None: 183 | transform_test = default_transform(chunk_size_test) 184 | 185 | if target_transform_train is None: 186 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(11)]) 187 | if target_transform_test is None: 188 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(11)]) 189 | 190 | train_d = DVSGestureDataset(root, 191 | train = True, 192 | transform = transform_train, 193 | target_transform = target_transform_train, 194 | chunk_size = chunk_size_train, 195 | deltat = dt, 196 | return_meta = return_meta, 197 | time_shuffle=time_shuffle) 198 | 199 | train_dl = torch.utils.data.DataLoader(train_d, batch_size=batch_size, shuffle=sample_shuffle, **dl_kwargs) 200 | 201 | test_d = DVSGestureDataset(root, 202 | transform = transform_test, 203 | target_transform = target_transform_test, 204 | train = False, 205 | chunk_size = chunk_size_test, 206 | deltat = dt, 207 | return_meta = return_meta, 208 | time_shuffle=time_shuffle) # WAS FALSE 209 | 210 | test_dl = torch.utils.data.DataLoader(test_d, batch_size=batch_size, **dl_kwargs) 211 | 212 | return train_dl, test_dl 213 | 214 | 215 | 216 | -------------------------------------------------------------------------------- /torchneuromorphic/doublenmnist_torchmeta/doublenmnist_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Mon 22 Apr 2024 12:08:57 PM CEST 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | import struct 12 | import time, copy 13 | import numpy as np 14 | import scipy.misc 15 | import h5py 16 | import torch.utils.data 17 | from ..nmnist.nmnist_dataloaders import NMNISTDataset, sample, create_datasets 18 | from ..neuromorphic_dataset import NeuromorphicDataset 19 | from ..events_timeslices import * 20 | from ..transforms import * 21 | import os 22 | import torchmeta 23 | from torchmeta.transforms import Categorical 24 | 25 | mapping = { 0 :'0', 26 | 1 :'1', 27 | 2 :'2', 28 | 3 :'3', 29 | 4 :'4', 30 | 5 :'5', 31 | 6 :'6', 32 | 7 :'7', 33 | 8 :'8', 34 | 9 :'9'} 35 | 36 | # Splits used for the double MNIST task https://github.com/shaohua0116/MultiDigitMNIST 37 | splits = {} 38 | splits['train'] = ['00', '01', '04', '05', '06', '08', '09', '11', '12', '13', '14', '15', '16', '18', '19', '20', '21', '23', '24', '26', '28', '29', '30', '31', '33', '35', '37', '38', '41', '42', '43', '44', '45', '50', '51', '53', '54', '56', '59', '60', '62', '63', '65', '69', '70', '72', '74', '75', '76', '77', '79', '81', '82', '84', '85', '87', '88', '89', '90', '91', '94', '95', '97', '98'] 39 | splits['val'] = ['03', '07', '10', '22', '27', '34', '39', '40', '48', '52', '58', '61', '64', '71', '93', '99'] 40 | splits['test'] = ['02', '17', '25', '32', '36', '46', '47', '49', '55', '57', '66', '67', '68', '73', '78', '80', '83', '86', '92', '96'] 41 | 42 | class DoubleNMNISTClassDataset(NeuromorphicDataset): 43 | def __init__( 44 | self, 45 | root : str, 46 | train : bool = True, 47 | transform : object = None , 48 | post_transform = lambda x: x, 49 | target_transform=None, 50 | download_and_create=True, 51 | chunk_size = 500, 52 | samples_per_class = 1, 53 | label_u = 0, 54 | dt = 1000): 55 | 56 | self.transform = transform 57 | if post_transform is None: 58 | self.post_transform = lambda x: x 59 | else: 60 | self.post_transform = post_transform 61 | self.target_transform = target_transform 62 | 63 | self.samples_per_class = samples_per_class 64 | self.download_and_create = download_and_create 65 | self.root = root 66 | self.train = train 67 | 68 | self.chunk_size = chunk_size 69 | self.label_u = label_u 70 | 71 | 72 | lu = self.label_u 73 | self.labels_left = lu // 10 74 | self.labels_right = lu % 10 75 | 76 | ll = self.labels_left 77 | lr = self.labels_right 78 | 79 | #self.labels.append( np.repeat(self.labels_u, self.samples_per_class)) 80 | #self.labels_map.append( dict(zip(np.unique(self.labels[i]),np.arange(self.nclasses)))) 81 | 82 | super(DoubleNMNISTClassDataset, self).__init__(root = None, 83 | transform=transform, 84 | target_transform=target_transform ) 85 | 86 | self.data_orig = NMNISTDataset( root, 87 | dt = dt, 88 | train=train, 89 | transform=transform, 90 | target_transform=None, 91 | download_and_create=download_and_create, 92 | chunk_size = chunk_size) 93 | 94 | self.nl = len(self.data_orig.keys_by_label[ll]) 95 | self.nr = len(self.data_orig.keys_by_label[lr]) 96 | self.n = self.nl * self.nr 97 | 98 | 99 | def __len__(self): 100 | return 1000 #self.n 101 | 102 | def __getitem__(self, key): 103 | ll = self.labels_left 104 | lr = self.labels_right 105 | key_l = self.data_orig.keys_by_label[ll][ key // self.nl] 106 | key_r = self.data_orig.keys_by_label[lr][ key % self.nl ] 107 | 108 | data_l, label_l = self.data_orig[key_l] 109 | data_r, label_r = self.data_orig[key_r] 110 | 111 | size_x, size_y = data_r.shape[2:4] 112 | data = torch.zeros(data_r.shape[:2]+(size_x*2,size_y)) 113 | data[:, :, :size_x, :] = data_l 114 | data[:, :, size_x:, :] = data_r 115 | target = self.label_u 116 | #Note that data is already transformed in the base dataset class (data_orig) 117 | return self.post_transform(data), self.target_transform(target) 118 | 119 | 120 | 121 | 122 | class ClassNMNISTDataset(torchmeta.utils.data.ClassDataset): 123 | def __init__(self, root = 'data/nmnist/n_mnist.hdf5', dt=1000, chunk_size=300, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, post_transform=None, download=False, class_augmentations=None): 124 | self.root=root 125 | self.chunk_size = chunk_size 126 | if meta_train is True: 127 | train = True 128 | else: 129 | train = False 130 | 131 | if meta_train: 132 | split_name = 'train' 133 | if meta_val: 134 | split_name = 'val' 135 | if meta_test: 136 | split_name = 'test' 137 | self.split_name = split_name 138 | self.dt = dt 139 | self.transform = transform 140 | self.target_transform = target_transform 141 | self.post_transform = post_transform 142 | 143 | 144 | super(ClassNMNISTDataset, self).__init__( 145 | meta_train=meta_train, 146 | meta_val=meta_val, 147 | meta_test=meta_test, 148 | meta_split=meta_split, 149 | class_augmentations=class_augmentations) 150 | 151 | 152 | self._labels = [int(s) for s in splits[split_name]] 153 | self._num_classes = len(self._labels) 154 | 155 | @property 156 | def labels(self): 157 | return self._labels 158 | 159 | @property 160 | def num_classes(self): 161 | return self._num_classes 162 | 163 | def __getitem__(self, index): 164 | label = self._labels[index] 165 | d = DoubleNMNISTClassDataset(root =self.root, 166 | dt = self.dt, 167 | train= self.meta_train, 168 | label_u = label, 169 | transform = self.transform, 170 | post_transform = self.post_transform, 171 | target_transform = self.target_transform, 172 | chunk_size = self.chunk_size) 173 | d.index = index 174 | #d.target_transform_append = lambda x: None 175 | return d 176 | 177 | def create_class_dataset(dset, meta_split = 'train'): 178 | ds = [] 179 | for n in range(dset.nclasses): 180 | indices = dset.keys_by_label[n] 181 | d = torch.utils.data.Subset(dset, indices) 182 | ds.append(d) 183 | return ds 184 | 185 | class DoubleNMNIST(torchmeta.utils.data.CombinationMetaDataset): 186 | def __init__(self, root, num_classes_per_task=None, meta_train=False, dt=1000, 187 | meta_val=False, meta_test=False, meta_split=None, 188 | transform=None, target_transform=None, post_transform=None, dataset_transform=None, 189 | class_augmentations=None, download=False, chunk_size=300): 190 | 191 | if target_transform is None: 192 | target_tranform = Categorical(num_classes_per_task) 193 | 194 | dataset = ClassNMNISTDataset(root, dt=dt, 195 | meta_train=meta_train, meta_val=meta_val, 196 | meta_test=meta_test, meta_split=meta_split, transform=transform, post_transform=post_transform, 197 | class_augmentations=class_augmentations, download=download,chunk_size=chunk_size) 198 | 199 | super(DoubleNMNIST, self).__init__(dataset, 200 | num_classes_per_task, 201 | target_transform=target_transform, 202 | dataset_transform=dataset_transform) 203 | 204 | -------------------------------------------------------------------------------- /torchneuromorphic/neuromorphic_dataset.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : neuromorphic_dataset.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Thu Nov 7 11:50:48 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import os 13 | import torch 14 | import torch.utils.data as data 15 | from torchvision.datasets.utils import extract_archive, verify_str_arg, check_integrity 16 | from .transforms import Compose 17 | 18 | DEFAULT_ROOT = 'data/' 19 | 20 | def download_url(url, root, filename=None, md5=None): 21 | """Download a file from a url and place it in root. 22 | Args: 23 | url (str): URL to download file from 24 | root (str): Directory to place downloaded file in 25 | filename (str, optional): Name to save the file under. If None, use the basename of the URL 26 | md5 (str, optional): MD5 checksum of the download. If None, do not check 27 | """ 28 | from six.moves import urllib 29 | 30 | root = os.path.expanduser(root) 31 | if not filename: 32 | filename = os.path.basename(url) 33 | fpath = os.path.join(root, filename) 34 | 35 | 36 | os.makedirs(root, exist_ok=True) 37 | 38 | # check if file is already present locally 39 | if check_integrity(fpath, md5): 40 | print('Using downloaded and verified file: ' + fpath) 41 | else: # download the file 42 | try: 43 | if 'dropbox' in url or 'mendeley' in url: 44 | # Handle dropbox links differently 45 | import requests 46 | headers = {'user-agent': 'Wget/1.16 (linux-gnu)'} 47 | r = requests.get(url, stream=True, headers=headers) 48 | with open(fpath, 'wb') as f: 49 | for chunk in r.iter_content(chunk_size=1024): 50 | if chunk: 51 | f.write(chunk) 52 | elif 'Manual' in url: 53 | raise urllib.error.URLError(url) 54 | else: 55 | print('Downloading ' + url + ' to ' + fpath) 56 | urllib.request.urlretrieve( 57 | url, fpath, 58 | ) 59 | except (urllib.error.URLError, IOError) as e: 60 | if url[:5] == 'https': 61 | url = url.replace('https:', 'http:') 62 | print('Failed download. Trying https -> http instead.' 63 | ' Downloading ' + url + ' to ' + fpath) 64 | urllib.request.urlretrieve( 65 | url, fpath, 66 | ) 67 | else: 68 | raise e 69 | # check integrity of downloaded file 70 | if not check_integrity(fpath, md5): 71 | raise RuntimeError("File not found or corrupted.") 72 | 73 | def download_and_extract_archive(url, download_root, extract_root=None, filename=None, 74 | md5=None, remove_finished=False): 75 | download_root = os.path.expanduser(download_root) 76 | if extract_root is None: 77 | extract_root = download_root 78 | if not filename: 79 | filename = os.path.basename(url) 80 | 81 | download_url(url, download_root, filename, md5) 82 | 83 | archive = os.path.join(download_root, filename) 84 | print("Extracting {} to {}".format(archive, extract_root)) 85 | #workaround for rar 86 | if archive[-4:] != '.rar': 87 | extract_archive(archive, extract_root, remove_finished) 88 | else: 89 | os.system('unrar x {0} {1}/'.format(archive,extract_root)+"/") 90 | 91 | def identity(x): 92 | return x 93 | 94 | #TODO make it a hdf5 dataset, implementing generic class NeuromorphicDataset 95 | class NeuromorphicDataset(data.Dataset): 96 | _repr_indent = 4 97 | 98 | def __init__(self, root=None, transforms = None, transform=None, target_transform=None): 99 | if isinstance(root, str): 100 | root = os.path.expanduser(root) 101 | self.root = root 102 | 103 | if root is not None: 104 | if not os.path.isfile(root): 105 | if self.download_and_create: 106 | self.download() 107 | self.create_hdf5() 108 | else: 109 | raise Exception("File {} does not exist and download_and_create is False".format(root)) 110 | 111 | 112 | 113 | 114 | has_transforms = transforms is not None 115 | has_separate_transform = transform is not None or target_transform is not None 116 | if has_transforms and has_separate_transform: 117 | raise ValueError("Only transforms or transform/target_transform can " 118 | "be passed as argument") 119 | 120 | # for backwards-compatibility 121 | if transform is None: 122 | transform = identity 123 | if target_transform is None: 124 | target_transform = identity 125 | 126 | self.transform = transform 127 | self.target_transform = target_transform 128 | 129 | if has_separate_transform: 130 | transforms = StandardTransform(transform, target_transform) 131 | self.transforms = transforms 132 | 133 | def __getitem__(self, index): 134 | raise NotImplementedError 135 | 136 | def __len__(self): 137 | raise NotImplementedError 138 | 139 | def __repr__(self): 140 | head = "Dataset " + self.__class__.__name__ 141 | body = ["Number of datapoints: {}".format(self.__len__())] 142 | if self.root is not None: 143 | body.append("Root location: {}".format(self.root)) 144 | body += self.extra_repr().splitlines() 145 | if hasattr(self, "transforms") and self.transforms is not None: 146 | body += [repr(self.transforms)] 147 | lines = [head] + [" " * self._repr_indent + line for line in body] 148 | return '\n'.join(lines) 149 | 150 | def _format_transform_repr(self, transform, head): 151 | lines = transform.__repr__().splitlines() 152 | return (["{}{}".format(head, lines[0])] + 153 | ["{}{}".format(" " * len(head), line) for line in lines[1:]]) 154 | 155 | def extra_repr(self): 156 | return "" 157 | 158 | def check_exists(self): 159 | res_ = [os.path.exists(d) for d in self.resources_local] 160 | res = all(res_) 161 | if res is False: 162 | print('The following files did not exist, will attempt download:') 163 | for i,r in enumerate(res_): 164 | if not r: print(self.resources_local[i]) 165 | return res 166 | 167 | 168 | def download(self): 169 | if self.check_exists(): 170 | return True 171 | else: 172 | os.makedirs(self.directory, exist_ok=True) 173 | for url, md5, filename in self.resources_url: 174 | download_and_extract_archive(url, download_root=self.directory, filename=filename, md5=md5) 175 | return False 176 | 177 | def create_hdf5(self): 178 | raise NotImplementedError() 179 | 180 | def target_transform_append(self, transform): 181 | if transform is None: 182 | return 183 | if self.target_transform is None: 184 | self.target_transform = transform 185 | else: 186 | self.target_transform = Compose([self.target_transform, transform]) 187 | 188 | 189 | def transform_append(self, transform): 190 | if transform is None: 191 | return 192 | if self.transform is None: 193 | self.transform = transform 194 | else: 195 | self.transform = Compose([self.transform, transform]) 196 | 197 | 198 | 199 | 200 | class StandardTransform(object): 201 | def __init__(self, transform=None, target_transform=None): 202 | self.transform = transform 203 | self.target_transform = target_transform 204 | 205 | def __call__(self, input, target): 206 | if self.transform is not None: 207 | input = self.transform(input) 208 | if self.target_transform is not None: 209 | target = self.target_transform(target) 210 | return input, target 211 | 212 | def _format_transform_repr(self, transform, head): 213 | lines = transform.__repr__().splitlines() 214 | return (["{}{}".format(head, lines[0])] + 215 | ["{}{}".format(" " * len(head), line) for line in lines[1:]]) 216 | 217 | def __repr__(self): 218 | body = [self.__class__.__name__] 219 | if self.transform is not None: 220 | body += self._format_transform_repr(self.transform, 221 | "Transform: ") 222 | if self.target_transform is not None: 223 | body += self._format_transform_repr(self.target_transform, 224 | "Target transform: ") 225 | 226 | return '\n'.join(body) 227 | -------------------------------------------------------------------------------- /torchneuromorphic/nomniglot/create_hdf5_omniglot.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : create_hdf5.py 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:15:54 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import numpy as np 13 | from tqdm import tqdm 14 | import scipy.misc 15 | import h5py 16 | import glob 17 | import torch.utils.data 18 | from torchneuromorphic.events_timeslices import * 19 | from torchneuromorphic.utils import * 20 | import os 21 | 22 | from collections import namedtuple, defaultdict 23 | import torch 24 | import torch.utils.data 25 | from dv import AedatFile 26 | import pandas 27 | import itertools 28 | 29 | NUM_CLASSES = 50 # each language included in the complete set 30 | 31 | train_dir = 'data/nomniglot/dvs_background_1' 32 | validation_dir = 'data/nomniglot/dvs_background_2' 33 | test_dir = 'data/nomniglot/dvs_evaluation' 34 | 35 | train_mapping = { 36 | 'Alphabet_of_the_Magi': 0, 37 | 'Anglo-Saxon_Futhorc': 1, 38 | 'Arcadian': 2, 39 | 'Armenian': 3, 40 | 'Asomtavruli_(Georgian)': 4, 41 | 'Balinese': 5, 42 | 'Bengali': 6, 43 | 'Blackfoot_(Canadian_Aboriginal_Syllabics)': 7, 44 | 'Braille': 8, 45 | 'Burmese_(Myanmar)': 9, 46 | 'Cyrillic': 10, 47 | 'Early_Aramaic': 11, 48 | 'Futurama': 12, 49 | 'Grantha': 13, 50 | 'Greek': 14, 51 | 'Gujarati': 15 52 | } 53 | 54 | validation_mapping = { 55 | 'Hebrew': 16, 56 | 'Inuktitut_(Canadian_Aboriginal_Syllabics)': 17, 57 | 'Japanese_(hiragana)': 18, 58 | 'Japanese_(katakana)': 19, 59 | 'Korean': 20, 60 | 'Latin': 21, 61 | 'Malay_(Jawi_-_Arabic)': 22, 62 | 'Mkhedruli_(Georgian)': 23, 63 | 'N_Ko': 24, 64 | 'Ojibwe_(Canadian_Aboriginal_Syllabics)': 25, 65 | 'Sanskrit': 26, 66 | 'Syriac_(Estrangelo)': 27, 67 | 'Tagalog': 28, 68 | 'Tifinagh': 29 69 | } 70 | test_mapping = { 71 | 'Angelic': 30, 72 | 'Atemayar_Qelisayer': 31, 73 | 'Atlantean': 32, 74 | 'Aurek-Besh': 33, 75 | 'Avesta': 34, 76 | 'Ge_ez': 35, 77 | 'Glagolitic': 36, 78 | 'Gurmukhi': 37, 79 | 'Kannada': 38, 80 | 'Keble': 39, 81 | 'Malayalam': 40, 82 | 'Manipuri': 41, 83 | 'Mongolian': 42, 84 | 'Old_Church_Slavonic_(Cyrillic)': 43, 85 | 'Oriya': 44, 86 | 'Sylheti': 45, 87 | 'Syriac_(Serto)': 46, 88 | 'Tengwar': 47, 89 | 'Tibetan': 48, 90 | 'ULOG': 49 91 | } 92 | 93 | def nomniglot_load_events_from_aedat(aedat_file_path, csv_file_path): 94 | # each aedat has 20 samples so this will give a list of 20 samples 95 | # instead of just 1 96 | 97 | print(aedat_file_path) 98 | 99 | timestamp, polarity, x, y = np.array([], dtype=np.uint64),np.array([], dtype=np.uint8),np.array([], dtype=np.uint8),np.array([], np.uint8), 100 | samples_per_character = 20 101 | lst = [] 102 | 103 | # readout and store the information from the aedat file 104 | with AedatFile(aedat_file_path) as f: # read aedat4 105 | for e in f['events'].numpy(): 106 | timestamp = np.concatenate([timestamp,e['timestamp'].astype(np.uint64)]) 107 | polarity = np.concatenate([polarity,e['polarity'].astype(np.uint8)]) 108 | x = np.concatenate([x,e['x'].astype(np.uint16)]) 109 | y = np.concatenate([y,e['y'].astype(np.uint16)]) 110 | 111 | # each aedat has 20 samples, deliniated by timestamps in the csv file 112 | start_end_timestamp = pandas.read_csv(csv_file_path).values 113 | for i in range(samples_per_character): 114 | start_index = find_first(timestamp, start_end_timestamp[i][1]) 115 | end_index = find_first(timestamp,(start_end_timestamp[i][2])) 116 | ts = np.array(timestamp[start_index:end_index], dtype=np.uint64) 117 | ts -= ts[0]#normalize 118 | tmp = np.column_stack([ts, polarity[start_index:end_index], x[start_index:end_index], y[start_index:end_index]]) 119 | 120 | if tmp.size!=0: 121 | lst.append(tmp) 122 | else: 123 | print("empty sample") 124 | return lst 125 | 126 | 127 | def get_file_names(dataset_path): 128 | ''' 129 | num_per_class: number of characters per class. 130 | ''' 131 | if not os.path.isdir(dataset_path): 132 | raise FileNotFoundError("NOmniglot Dataset not found, looked at: {}".format(dataset_path)) 133 | 134 | dict_files = {} 135 | # dict_validation = {} 136 | # dict_test = {} 137 | 138 | # in case fewer samples are needed for memory reasons or something 139 | # num_train = 140 | # num_validation = 141 | # num_test = 142 | 143 | samples = 0 # samples are really multiplied by 20 because each file has 20 samples 144 | 145 | for root, subdirectories, files in os.walk(dataset_path): 146 | for subdirectory in subdirectories: 147 | if "character" in os.path.join(root, subdirectory) and ".ipynb" not in os.path.join(root, subdirectory): 148 | dir_path = os.path.join(root, subdirectory).split('/') 149 | 150 | if dir_path[3] in dict_files.keys(): 151 | dict_files[dir_path[3]].append(os.path.join(root, subdirectory)) 152 | else: 153 | dict_files[dir_path[3]] = [os.path.join(root, subdirectory)] 154 | 155 | samples += 1 156 | samples = 0 157 | 158 | 159 | 160 | 161 | return dict_files #train, dict_validation, dict_test 162 | 163 | def create_events_hdf5(directory='data/nomniglot/', hdf5_filename='nomniglot.hdf5'):#, num_instances=): 164 | 165 | directory = directory 166 | hdf5_filename = hdf5_filename 167 | dict_train = get_file_names(directory+'dvs_background_1') 168 | dict_train.update(get_file_names(directory+'dvs_background_2')) 169 | dict_validation = get_file_names(directory+'dvs_evaluation') 170 | 171 | train_keys = [] 172 | train_label_list = [] 173 | validation_keys = [] 174 | validation_label_list = [] 175 | 176 | # test to make sure an hdf5 can be made ok 177 | # this looks like it will take a long time for all the data so try just one alphabets 178 | print('Writing '+hdf5_filename) 179 | with h5py.File(hdf5_filename, 'w') as f: 180 | f.clear() 181 | key = 0 182 | metas = [] 183 | data_grp = f.create_group('data') 184 | extra_grp = f.create_group('extra') 185 | 186 | char = 0 187 | print('Processing Train Data') 188 | for (k, v) in tqdm(dict_train.items()): 189 | for path in v: 190 | train_label_list.append([]) 191 | for f in os.listdir(path): 192 | if os.path.isfile(os.path.join(path, f)): 193 | if ".aedat4" in f: 194 | aedat_path = os.path.join(path, f) 195 | elif ".csv" in f: 196 | csv_path = os.path.join(path,f) 197 | 198 | samples = nomniglot_load_events_from_aedat(aedat_path, csv_path) 199 | 200 | for data in samples: 201 | times = data[:,0] 202 | addrs = data[:,1:] 203 | 204 | train_keys.append(key) 205 | 206 | train_label_list[char].append(key) 207 | 208 | metas.append({'key':str(key), 'training sample':True}) 209 | subgrp = data_grp.create_group(str(key)) 210 | tm_dset = subgrp.create_dataset('times' , data=times, dtype = np.uint32) 211 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype = np.uint8) 212 | lbl_dset= subgrp.create_dataset('labels', data=char, dtype = np.uint16) 213 | subgrp.attrs['meta_info']= str(metas[-1]) 214 | key += 1 215 | char +=1 216 | 217 | 218 | print(len(train_keys), char) 219 | CHAR_OFFSET = char 220 | 221 | print('Processing Validation Data') 222 | for k, v in tqdm(dict_validation.items()): 223 | #if k == "Braille": 224 | for path in v: 225 | validation_label_list.append([]) 226 | for f in os.listdir(path): 227 | if os.path.isfile(os.path.join(path, f)): 228 | if ".aedat4" in f: 229 | aedat_path = os.path.join(path, f) 230 | elif ".csv" in f: 231 | csv_path = os.path.join(path,f) 232 | else: 233 | print("non aedat4 file and csv file found") 234 | 235 | samples = nomniglot_load_events_from_aedat(aedat_path, csv_path) 236 | 237 | for data in samples: 238 | times = data[:,0] 239 | addrs = data[:,1:] 240 | 241 | validation_keys.append(key) 242 | validation_label_list[char-CHAR_OFFSET].append(key) 243 | 244 | metas.append({'key':str(key), 'validation sample':True}) 245 | subgrp = data_grp.create_group(str(key)) 246 | tm_dset = subgrp.create_dataset('times' , data=times, dtype = np.uint32) 247 | ad_dset = subgrp.create_dataset('addrs' , data=addrs, dtype = np.uint8) 248 | lbl_dset= subgrp.create_dataset('labels', data=char, dtype = np.uint8) 249 | subgrp.attrs['meta_info']= str(metas[-1]) 250 | key += 1 251 | char +=1 252 | 253 | extra_grp.create_dataset('train_keys', data = train_keys) 254 | extra_grp.create_dataset('train_keys_by_label', data = train_label_list) 255 | extra_grp.create_dataset('validation_keys_by_label', data = validation_label_list) 256 | extra_grp.create_dataset('validation_keys', data = validation_keys) 257 | extra_grp.attrs['N'] = len(train_keys) + len(validation_keys) + len(validation_keys) 258 | extra_grp.attrs['Ntrain'] = len(train_keys) 259 | extra_grp.attrs['Nvalidation'] = len(validation_keys) 260 | 261 | if __name__=="__main__": 262 | # For Testing purposes 263 | create_events_hdf5() -------------------------------------------------------------------------------- /torchneuromorphic/utils.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 13:16:48 2019 7 | # Last Modified : 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import struct 13 | import numpy as np 14 | import scipy.io as sio 15 | 16 | # For loading data from matlab files, in this case dvssign data 17 | def load_mat(filename): 18 | data_dict = sio.loadmat(filename) 19 | 20 | # The keys are: ['__globals__', '__header__', '__version__', 'pol', 'ts', 'x', 'y'] 21 | all_ts = data_dict['ts'].squeeze() 22 | all_x = data_dict['x'].squeeze() 23 | all_y = data_dict['y'].squeeze() 24 | all_p = data_dict['pol'].squeeze() 25 | 26 | return all_ts, all_x, all_y, all_p 27 | 28 | 29 | # adapted from https://github.com/gorchard/event-Python/blob/master/eventvision.py 30 | def load_ATIS_bin(filename): 31 | """Reads in the TD events contained in the N-MNIST/N-CALTECH101 dataset file specified by 'filename'""" 32 | f = open(filename, 'rb') 33 | raw_data = np.fromfile(f, dtype=np.uint8) 34 | f.close() 35 | raw_data = np.uint32(raw_data) 36 | 37 | all_y = raw_data[1::5] 38 | all_x = raw_data[0::5] 39 | all_p = (raw_data[2::5] & 128) >> 7 #bit 7 40 | all_ts = ((raw_data[2::5] & 127) << 16) | (raw_data[3::5] << 8) | (raw_data[4::5]) 41 | 42 | #Process time stamp overflow events 43 | time_increment = 2 ** 13 44 | overflow_indices = np.where(all_y == 240)[0] 45 | for overflow_index in overflow_indices: 46 | all_ts[overflow_index:] += time_increment 47 | 48 | #Everything else is a proper td spike 49 | td_indices = np.where(all_y != 240)[0] 50 | return all_ts, all_x, all_y, all_p 51 | 52 | def load_jaer(datafile='/tmp/aerout.dat', length=0, version='aedat', debug=1, camera='DVS128'): 53 | """ 54 | load AER data file and parse these properties of AE events: 55 | - timestamps (in us), 56 | - x,y-position [0..127] 57 | - polarity (0/1) 58 | 59 | @param datafile - path to the file to read 60 | @param length - how many bytes(B) should be read; default 0=whole file 61 | @param version - which file format version is used: "aedat" = v2, "dat" = v1 (old) 62 | @param debug - 0 = silent, 1 (default) = print summary, >=2 = print all debug 63 | @param camera='DVS128' or 'DAVIS240' 64 | @return (ts, xpos, ypos, pol) 4-tuple of lists containing data of all events; 65 | """ 66 | # constants 67 | aeLen = 8 # 1 AE event takes 8 bytes 68 | readMode = '>II' # struct.unpack(), 2x ulong, 4B+4B 69 | td = 0.000001 # timestep is 1us 70 | if (camera == 'DVS128'): 71 | xmask = 0x00fe 72 | xshift = 1 73 | ymask = 0x7f00 74 | yshift = 8 75 | pmask = 0x1 76 | pshift = 0 77 | elif (camera == 'DAVIS240'): # values take from scripts/matlab/getDVS*.m 78 | xmask = 0x003ff000 79 | xshift = 12 80 | ymask = 0x7fc00000 81 | yshift = 22 82 | pmask = 0x800 83 | pshift = 11 84 | eventtypeshift = 31 85 | else: 86 | raise ValueError("Unsupported camera: %s" % (camera)) 87 | 88 | if (version == 'dat'): 89 | print ("using the old .dat format") 90 | aeLen = 6 91 | readMode = '>HI' # ushot, ulong = 2B+4B 92 | 93 | aerdatafh = open(datafile, 'rb') 94 | k = 0 # line number 95 | p = 0 # pointer, position on bytes 96 | statinfo = os.stat(datafile) 97 | if length == 0: 98 | length = statinfo.st_size 99 | 100 | # header 101 | lt = aerdatafh.readline() 102 | while lt and lt[0] == '#': 103 | p += len(lt) 104 | k += 1 105 | lt = aerdatafh.readline() 106 | if debug >= 2: 107 | print (str(lt)) 108 | continue 109 | 110 | # variables to parse 111 | timestamps = [] 112 | xaddr = [] 113 | yaddr = [] 114 | pol = [] 115 | 116 | # read data-part of file 117 | aerdatafh.seek(p) 118 | s = aerdatafh.read(aeLen) 119 | p += aeLen 120 | 121 | # print (xmask, xshift, ymask, yshift, pmask, pshift) 122 | while p < length: 123 | addr, ts = struct.unpack(readMode, s) 124 | # parse event type 125 | if (camera == 'DAVIS240'): 126 | eventtype = (addr >> eventtypeshift) 127 | else: # DVS128 128 | eventtype = 0 129 | 130 | # parse event's data 131 | if (eventtype == 0): # this is a DVS event 132 | x_addr = (addr & xmask) >> xshift 133 | y_addr = (addr & ymask) >> yshift 134 | a_pol = (addr & pmask) >> pshift 135 | 136 | if debug >= 3: 137 | print("ts->", ts) # ok 138 | print("x-> ", x_addr) 139 | print("y-> ", y_addr) 140 | print("pol->", a_pol) 141 | 142 | timestamps.append(ts) 143 | xaddr.append(x_addr) 144 | yaddr.append(y_addr) 145 | pol.append(a_pol) 146 | 147 | aerdatafh.seek(p) 148 | s = aerdatafh.read(aeLen) 149 | p += aeLen 150 | 151 | if debug > 0: 152 | try: 153 | print ("read %i (~ %.2fM) AE events, duration= %.2fs" % ( 154 | len(timestamps), len(timestamps) / float(10 ** 6), (timestamps[-1] - timestamps[0]) * td)) 155 | n = 5 156 | print ("showing first %i:" % (n)) 157 | print ("timestamps: %s \nX-addr: %s\nY-addr: %s\npolarity: %s" % ( 158 | timestamps[0:n], xaddr[0:n], yaddr[0:n], pol[0:n])) 159 | except: 160 | print ("failed to print statistics") 161 | 162 | return np.array(timestamps), np.array(xaddr), np.array(yaddr), np.array(pol) 163 | 164 | 165 | 166 | 167 | def plot_frames_imshow(images, labels=None, nim=11, avg=50, interval=1, do1h = True, transpose=False, label_mapping=None): 168 | from matplotlib.colors import ListedColormap, LinearSegmentedColormap 169 | from matplotlib.pyplot import Normalize 170 | colors = ['red', 'black', 'green'] 171 | cmap = LinearSegmentedColormap.from_list('name', colors) 172 | if avg>images.shape[1]: 173 | avg = images.shape[1] 174 | 175 | rnge = range(0,np.maximum(images.shape[1]//avg,1),interval) 176 | 177 | import pylab as plt 178 | plt.figure(figsize = [nim+2,16]) 179 | import matplotlib.gridspec as gridspec 180 | if not transpose: 181 | gs = gridspec.GridSpec(len(rnge), nim) 182 | else: 183 | gs = gridspec.GridSpec(nim, len(rnge)) 184 | plt.subplots_adjust(left=0, bottom=0, right=1, top=0.95, wspace=.0, hspace=.04) 185 | if labels is not None: 186 | if do1h: 187 | categories = labels.argmax(axis=1) 188 | else: 189 | categories = labels 190 | else: 191 | categories = range(len(images)) 192 | s=[] 193 | for j in range(nim): 194 | norm = Normalize(-.1,.1) 195 | for e,i in enumerate(rnge): 196 | if not transpose: 197 | ax = plt.subplot(gs[e, j]) 198 | else: 199 | ax = plt.subplot(gs[j, e]) 200 | plt.imshow(images[j,i*avg:(i*avg+avg),0,:,:].mean(axis=0).T -images[j,i*avg:(i*avg+avg),1,:,:].mean(axis=0).T, cmap=cmap, norm= norm) 201 | plt.xticks([]) 202 | 203 | if i==0 and label_mapping is not None: 204 | plt.title(label_mapping[int(categories[j])], fontsize=10) 205 | plt.yticks([]) 206 | s.append(images[j].sum()) 207 | 208 | def legacy_aedat_to_events(filename, normalize_time = True): 209 | ''' 210 | Uses the dv package to extract events from aedat 2 and aedat 3 211 | ''' 212 | from dv import LegacyAedatFile 213 | events=[] 214 | 215 | with LegacyAedatFile(filename) as f: 216 | for event in f: 217 | events.append([event.timestamp,event.polarity,event.x,event.y]) 218 | 219 | events = np.column_stack(np.array(events, dtype='uint32')).T 220 | if normalize_time: 221 | events[:,0] -= events[0,0] 222 | 223 | return events 224 | 225 | 226 | def aedat_to_events(filename): 227 | ''' 228 | Used for aedat 3.1 229 | ''' 230 | label_filename = filename[:-6] +'_labels.csv' 231 | labels = np.loadtxt(label_filename, skiprows=1, delimiter=',',dtype='uint32') 232 | events=[] 233 | with open(filename, 'rb') as f: 234 | for i in range(5): 235 | f.readline() 236 | while True: 237 | data_ev_head = f.read(28) 238 | if len(data_ev_head)==0: break 239 | 240 | eventtype = struct.unpack('H', data_ev_head[0:2])[0] 241 | eventsource = struct.unpack('H', data_ev_head[2:4])[0] 242 | eventsize = struct.unpack('I', data_ev_head[4:8])[0] 243 | eventoffset = struct.unpack('I', data_ev_head[8:12])[0] 244 | eventtsoverflow = struct.unpack('I', data_ev_head[12:16])[0] 245 | eventcapacity = struct.unpack('I', data_ev_head[16:20])[0] 246 | eventnumber = struct.unpack('I', data_ev_head[20:24])[0] 247 | eventvalid = struct.unpack('I', data_ev_head[24:28])[0] 248 | 249 | if(eventtype == 1): 250 | event_bytes = np.frombuffer(f.read(eventnumber*eventsize), 'uint32') 251 | event_bytes = event_bytes.reshape(-1,2) 252 | 253 | x = (event_bytes[:,0] >> 17) & 0x00001FFF 254 | y = (event_bytes[:,0] >> 2 ) & 0x00001FFF 255 | p = (event_bytes[:,0] >> 1 ) & 0x00000001 256 | t = event_bytes[:,1] 257 | events.append([t,x,y,p]) 258 | 259 | else: 260 | f.read(eventnumber*eventsize) 261 | events = np.column_stack(events) 262 | events = events.astype('uint32') 263 | clipped_events = np.zeros([4,0],'uint32') 264 | for l in labels: 265 | start = np.searchsorted(events[0,:], l[1]) 266 | end = np.searchsorted(events[0,:], l[2]) 267 | clipped_events = np.column_stack([clipped_events,events[:,start:end]]) 268 | return clipped_events.T, labels 269 | 270 | def rosbag_to_events(filename, topic='/dvs_right/events'): 271 | try: 272 | from importRosbag.importRosbag import importRosbag 273 | except ImportError as exc: 274 | print("This function requires the importRosbag library from https://github.com/event-driven-robotics") 275 | raise(exc) 276 | all_events = [] 277 | data = importRosbag(filename)[topic] 278 | data['ts'] -= data['ts'][0] # align at 0 279 | data['ts'] *= 1000000. # second to microsecond 280 | return data 281 | -------------------------------------------------------------------------------- /torchneuromorphic/dvssign/dvssign_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Kenneth Stewart and Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Sun 29 Jul 2018 01:39:06 PM PDT 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | 12 | import struct 13 | import time, copy 14 | import numpy as np 15 | import scipy.misc 16 | import h5py 17 | import torch.utils.data 18 | from ..neuromorphic_dataset import NeuromorphicDataset 19 | from ..events_timeslices import * 20 | from ..transforms import * 21 | from .create_hdf5_sign import create_events_hdf5 22 | import os 23 | 24 | NUM_CLASSES = 24 # A-Y excluding j 25 | 26 | # dataset is huge so only use first 10 letters 27 | mapping = { 'a':0, 28 | 'b':1, 29 | 'c':2, 30 | 'd':3, 31 | 'e':4, 32 | 'f':5, 33 | 'g':6, 34 | 'h':7, 35 | 'i':8, 36 | 'k':9, 37 | 'l':10, 38 | 'm':11, 39 | 'n':12, 40 | 'o':13, 41 | 'p':14, 42 | 'q':15, 43 | 'r':16, 44 | 's':17, 45 | 't':18, 46 | 'u':19, 47 | 'v':20, 48 | 'w':21, 49 | 'x':22, 50 | 'y':23} 51 | 52 | class DVSSignDataset(NeuromorphicDataset): 53 | resources_url = [['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAB0jgWqXDn3sZB_YXEjZLv4a/Yin%20Bi%20-%20a.zip?dl=0',None, 'Yin Bi - a.zip'], 54 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAC-671H-Z7XTAQcT7GJXFsGa/Yin%20Bi%20-%20b.zip?dl=0', None, 'Yin Bi - b.zip'], 55 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AADa8hkEbpgnNbBtmRIuAw3ha/Yin%20Bi%20-%20c.zip?dl=0', None, 'Yin Bi - c.zip'], 56 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AACUrdhDl_tYnNkb8OpAJ5k4a/Yin%20Bi%20-%20d.zip?dl=0', None, 'Yin Bi - d.zip'], 57 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AABmXnWZ2hI2dPQYn3FOClnba/Yin%20Bi%20-%20e.zip?dl=0', None, 'Yin Bi - e.zip'], 58 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAAzopIcHXTmjPuYomjAiPtfa/Yin%20Bi%20-%20f.zip?dl=0', None, 'Yin Bi - f.zip'], 59 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAB0PMA-VwZMM1PpJXg6q4efa/Yin%20Bi%20-%20g.zip?dl=0', None, 'Yin Bi - g.zip'], 60 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAD9-W_I4n3lTSCMgFGgJ5Dra/Yin%20Bi%20-%20h.zip?dl=0', None, 'Yin Bi - h.zip'], 61 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AADCm3yMnJcwGK70bYk-ycF0a/Yin%20Bi%20-%20i.zip?dl=0', None, 'Yin Bi - i.zip'], 62 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAChe4QTo2DduVOVuT5hN9fxa/Yin%20Bi%20-%20k.zip?dl=0', None, 'Yin Bi - k.zip'], 63 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAAeUazC7PHK85V6wkEub1iMa/Yin%20Bi%20-%20l.zip?dl=0', None, 'Yin Bi - l.zip'], 64 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AADrzW_ts5UxlulXaItiNuCSa/Yin%20Bi%20-%20m.zip?dl=0', None, 'Yin Bi - m.zip'], 65 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AABlYTbweHA22nA6PFujaBKFa/Yin%20Bi%20-%20n.zip?dl=0', None, 'Yin Bi - n.zip'], 66 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AABzMSVYuZ0hb5FsUoHB53xBa/Yin%20Bi%20-%20o.zip?dl=0', None, 'Yin Bi - o.zip'], 67 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AACOJ3z96KaLAMxgNLP1eOwga/Yin%20Bi%20-%20p.zip?dl=0', None, 'Yin Bi - p.zip'], 68 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAC6B6UajjSuf6aYYOfOY3I7a/Yin%20Bi%20-%20q.zip?dl=0', None, 'Yin Bi - q.zip'], 69 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAC-Gq6qzv0yiAnvozEozqoaa/Yin%20Bi%20-%20r.zip?dl=0', None, 'Yin Bi - r.zip'], 70 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AABDbuK8B0Mferpf0x3xbDJQa/Yin%20Bi%20-%20s.zip?dl=0', None, 'Yin Bi - s.zip'], 71 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAAEgawjqMHuY_TvqCYNC-uIa/Yin%20Bi%20-%20t.zip?dl=0', None, 'Yin Bi - t.zip'], 72 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AABTIlScbaSqFIahMy_NRNUna/Yin%20Bi%20-%20u.zip?dl=0', None, 'Yin Bi - u.zip'], 73 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAAp8JvzLPfhGlG5jL943W_pa/Yin%20Bi%20-%20v.zip?dl=0', None, 'Yin Bi - v.zip'], 74 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AAA4qvsatKEDeoykc2I4a6FRa/Yin%20Bi%20-%20w.zip?dl=0', None, 'Yin Bi - w.zip'], 75 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AABfnDu7rtZve1w9VVQQwuFia/Yin%20Bi%20-%20x.zip?dl=0', None, 'Yin Bi - x.zip'], 76 | ['https://www.dropbox.com/sh/ibq0jsicatn7l6r/AADKEQBAXFQ9P0GoGHTY4ig8a/Yin%20Bi%20-%20y.zip?dl=0', None, 'Yin Bi - y.zip'] 77 | ] 78 | directory = 'data/ASL-DVS/'#'data/nmnist/' 79 | resources_local = [directory+'a', directory+'b', directory+'c', directory+'d',directory+'e',directory+'f',directory+'g',directory+'h',directory+'i', 80 | directory+'k',directory+'l',directory+'m',directory+'n',directory+'o',directory+'p',directory+'q',directory+'r',directory+'s', 81 | directory+'t',directory+'u',directory+'v',directory+'w',directory+'x',directory+'y'] 82 | 83 | def __init__( 84 | self, 85 | root, 86 | train=True, 87 | transform=None, 88 | target_transform=None, 89 | download_and_create=True, 90 | chunk_size = 100): 91 | 92 | self.n = 0 93 | self.nclasses = self.num_classes = 10 94 | self.download_and_create = download_and_create 95 | self.root = root 96 | self.train = train 97 | self.chunk_size = chunk_size 98 | 99 | super(DVSSignDataset, self).__init__( 100 | root, 101 | transform=transform, 102 | target_transform=target_transform ) 103 | 104 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 105 | try: 106 | if train: 107 | self.n = f['extra'].attrs['Ntrain'] 108 | self.keys = f['extra']['train_keys'][()] 109 | self.keys_by_label = f['extra']['train_keys_by_label'][()] 110 | else: 111 | self.n = f['extra'].attrs['Ntest'] 112 | self.keys = f['extra']['test_keys'][()] 113 | self.keys_by_label = f['extra']['test_keys_by_label'][()] 114 | #self.keys_by_label[:,:] -= self.keys_by_label[0,0] #normalize 115 | except AttributeError: 116 | print('Attribute not found in hdf5 file. You may be using an old hdf5 build. Delete {0} and run again'.format(root)) 117 | raise 118 | 119 | 120 | def download(self): 121 | isexisting = super(DVSSignDataset, self).download() 122 | 123 | def create_hdf5(self): 124 | create_events_hdf5(self.directory, self.root) 125 | 126 | 127 | def __len__(self): 128 | return self.n 129 | 130 | def __getitem__(self, key): 131 | #Important to open and close in getitem to enable num_workers>0 132 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 133 | if self.train: 134 | key = f['extra']['train_keys'][key] 135 | else: 136 | key = f['extra']['test_keys'][key] 137 | data, target = sample( 138 | f, 139 | key, 140 | T = self.chunk_size) 141 | 142 | if self.transform is not None: 143 | data = self.transform(data) 144 | 145 | if self.target_transform is not None: 146 | target = self.target_transform(target) 147 | 148 | return data, target 149 | 150 | def sample(hdf5_file, 151 | key, 152 | T = 300): 153 | dset = hdf5_file['data'][str(key)] 154 | label = dset['labels'][()] 155 | tend = dset['times'][-1] 156 | start_time = 0 157 | 158 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, T*1000) 159 | tmad[:,0]-=tmad[0,0] 160 | return tmad, label 161 | 162 | def create_datasets( 163 | root = 'data/ASL-DVS/dvssign.hdf5', 164 | batch_size = 72 , 165 | chunk_size_train = 300, 166 | chunk_size_test = 300, 167 | ds = 1, 168 | dt = 1000, 169 | transform_train = None, 170 | transform_test = None, 171 | target_transform_train = None, 172 | target_transform_test = None): 173 | 174 | size = [2, 240//ds, 180//ds] 175 | 176 | if transform_train is None: 177 | transform_train = Compose([ 178 | CropDims(low_crop=[0,0], high_crop=[240,180], dims=[2,3]), 179 | Downsample(factor=[dt,1,1,1]), 180 | ToCountFrame(T = chunk_size_train, size = size), 181 | ToTensor()]) 182 | if transform_test is None: 183 | transform_test = Compose([ 184 | CropDims(low_crop=[0,0], high_crop=[240,180], dims=[2,3]), 185 | Downsample(factor=[dt,1,1,1]), 186 | ToCountFrame(T = chunk_size_test, size = size), 187 | ToTensor()]) 188 | if target_transform_train is None: 189 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(NUM_CLASSES)]) 190 | if target_transform_test is None: 191 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(NUM_CLASSES)]) 192 | 193 | train_ds = DVSSignDataset(root,train=True, 194 | transform = transform_train, 195 | target_transform = target_transform_train, 196 | chunk_size = chunk_size_train) 197 | 198 | test_ds = DVSSignDataset(root, transform = transform_test, 199 | target_transform = target_transform_test, 200 | train=False, 201 | chunk_size = chunk_size_test) 202 | 203 | return train_ds, test_ds 204 | 205 | def create_dataloader( 206 | root = 'data/ASL-DVS/dvssign.hdf5', 207 | batch_size = 72 , 208 | chunk_size_train = 100, 209 | chunk_size_test = 100, 210 | ds = 1, 211 | dt = 1000, 212 | transform_train = None, 213 | transform_test = None, 214 | target_transform_train = None, 215 | target_transform_test = None, 216 | **dl_kwargs): 217 | 218 | train_d, test_d = create_datasets( 219 | root = root, 220 | batch_size = batch_size, 221 | chunk_size_train = chunk_size_train, 222 | chunk_size_test = chunk_size_test, 223 | ds = ds, 224 | dt = dt, 225 | transform_train = transform_train, 226 | transform_test = transform_test, 227 | target_transform_train = target_transform_train, 228 | target_transform_test = target_transform_test) 229 | 230 | 231 | train_dl = torch.utils.data.DataLoader(train_d, shuffle=True, batch_size=batch_size, **dl_kwargs) 232 | test_dl = torch.utils.data.DataLoader(test_d, shuffle=False, batch_size=batch_size, **dl_kwargs) 233 | 234 | return train_dl, test_dl 235 | 236 | 237 | 238 | -------------------------------------------------------------------------------- /torchneuromorphic/ntidigits/ntidigits_dataloaders.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import time 3 | import numpy as np 4 | import scipy.misc 5 | import h5py 6 | import torch.utils.data 7 | from ..neuromorphic_dataset import NeuromorphicDataset 8 | from ..events_timeslices import * 9 | from ..transforms import * 10 | import os 11 | 12 | mapping = { 0 :'0', 13 | 1 :'1', 14 | 2 :'2', 15 | 3 :'3', 16 | 4 :'4', 17 | 5 :'5', 18 | 6 :'6', 19 | 7 :'7', 20 | 8 :'8', 21 | 9 :'9', 22 | 10: '10'} 23 | 24 | def one_hot1d(mbt, num_classes): 25 | out = np.zeros([num_classes], dtype='float32') 26 | out[int(mbt)] = 1 27 | return out 28 | 29 | def create_events_hdf5(directory, hdf5_filename): 30 | train_evs, train_labels_isolated = load_tidigit_hdf5(directory+'/n-tidigits.hdf5', train=True) 31 | test_evs, test_labels_isolated = load_tidigit_hdf5(directory+'/n-tidigits.hdf5', train=False) 32 | border = len(train_labels_isolated) 33 | 34 | tmad = train_evs + test_evs 35 | labels = train_labels_isolated + test_labels_isolated 36 | test_keys = [] 37 | train_keys = [] 38 | 39 | with h5py.File(hdf5_filename, 'w') as f: 40 | f.clear() 41 | key = 0 42 | metas = [] 43 | data_grp = f.create_group('data') 44 | extra_grp = f.create_group('extra') 45 | for i,data in enumerate(tmad): 46 | times = data[:,0] 47 | addrs = data[:,1:] 48 | label = labels[i] 49 | out = [] 50 | istrain = i1e-3 142 | # learn_mask = sample.reshape(-1,self.size[0]).sum(axis=1)>0 143 | # targets = np.ones(T)*11 144 | # targets[learn_mask] = self.labels[key] 145 | # return np.array(sample, dtype='float32'), one_hot(targets, self.nclasses+1)[:,:self.nclasses].astype('float32') 146 | # 147 | # def __len__(self): 148 | # return len(self.evs) 149 | 150 | # 151 | #def create_data(filename = 'data/tidigits/n-tidigits.hdf5', 152 | # chunk_size_train=10, 153 | # chunk_size_test=20, 154 | # batch_size=50, 155 | # size=[2, 128, 128], 156 | # dt = 1000, 157 | # ds = 1, 158 | # **dl_kwargs): 159 | # 160 | # """ 161 | # chunk_size_train: number of samples in the time axis for the training set 162 | # chunk_size_test: number of samples in the time axis for the testing set 163 | # batch_size: batch_size on each iteration 164 | # size: expected input size, list in the format [channels, dimx] 165 | # dt: framing delta t in microseconds 166 | # ds: downsampling factor in the dimx dimension. 1 means no downsampling 167 | # """ 168 | # if not os.path.isfile(filename): 169 | # raise Exception("File {} does not exist".format(filename)) 170 | # 171 | # train_d = NTIdigitsDataset(filename, train=True, ds = ds, size = size, dt = dt, max_duration = chunk_size_train) 172 | # train_dl = torch.utils.data.DataLoader(train_d, 173 | # batch_size=batch_size, 174 | # shuffle=True, **dl_kwargs) 175 | # 176 | # test_d = NTIdigitsDataset(filename, train=False, ds = ds, size = size, dt = dt, max_duration = chunk_size_test) 177 | # test_dl = torch.utils.data.DataLoader( test_d, 178 | # batch_size=batch_size, 179 | # shuffle=True, **dl_kwargs) 180 | # 181 | # return train_dl, test_dl 182 | 183 | class NTIdigitsDataset(NeuromorphicDataset): 184 | resources_url = [['https://www.dropbox.com/s/vfwwrhlyzkax4a2/n-tidigits.hdf5?dl=1',None, 'n-tidigits.hdf5']] 185 | directory = 'data/tidigits/' 186 | resources_local = [directory+'/n-tidigits.hdf5'] 187 | 188 | 189 | def __init__( 190 | self, 191 | root, 192 | train=True, 193 | transform=None, 194 | target_transform=None, 195 | download_and_create=True, 196 | chunk_size = 500): 197 | 198 | self.n = 0 199 | self.download_and_create = download_and_create 200 | self.root = root 201 | self.train = train 202 | self.chunk_size = chunk_size 203 | 204 | super(NTIdigitsDataset, self).__init__( 205 | root, 206 | transform=transform, 207 | target_transform=target_transform ) 208 | 209 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 210 | if train: 211 | self.n = f['extra'].attrs['Ntrain'] 212 | self.keys = f['extra']['train_keys'] 213 | else: 214 | self.n = f['extra'].attrs['Ntest'] 215 | self.keys = f['extra']['test_keys'] 216 | 217 | def download(self): 218 | isexisting = super(NTIdigitsDataset, self).download() 219 | 220 | def create_hdf5(self): 221 | create_events_hdf5(self.directory, self.root) 222 | 223 | 224 | def __len__(self): 225 | return self.n 226 | 227 | def __getitem__(self, key): 228 | #Important to open and close in getitem to enable num_workers>0 229 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 230 | if not self.train: 231 | key = key + f['extra'].attrs['Ntrain'] 232 | data, target = sample( 233 | f, 234 | key, 235 | T = self.chunk_size, 236 | shuffle=self.train) 237 | 238 | if self.transform is not None: 239 | data = self.transform(data) 240 | 241 | if self.target_transform is not None: 242 | target = self.target_transform(target) 243 | 244 | return data, target 245 | 246 | def sample(hdf5_file, 247 | key, 248 | T = 500, 249 | shuffle = False): 250 | dset = hdf5_file['data'][str(key)] 251 | label = dset['labels'][()] 252 | tend = dset['times'][-1] 253 | start_time = 0 254 | 255 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, T*1000) 256 | tmad[:,0]-=tmad[0,0] 257 | return tmad, label 258 | 259 | def create_dataloader( 260 | root = 'data/tidigits/ntidigits_isolated.hdf5', 261 | batch_size = 72 , 262 | chunk_size_train = 1000, 263 | chunk_size_test = 1000, 264 | ds = 1, 265 | dt = 1000, 266 | transform_train = None, 267 | transform_test = None, 268 | target_transform_train = None, 269 | target_transform_test = None, 270 | **dl_kwargs): 271 | 272 | size = [64//ds[0], 1, 1] 273 | 274 | if transform_train is None: 275 | transform_train = Compose([ 276 | Downsample(factor=[dt,ds[0]]), 277 | ToChannelHeightWidth(), 278 | ToCountFrame(T = chunk_size_train, size = size), 279 | ToTensor()]) 280 | if transform_test is None: 281 | transform_test = Compose([ 282 | Downsample(factor=[dt,ds[0]]), 283 | ToChannelHeightWidth(), 284 | ToCountFrame(T = chunk_size_test, size = size), 285 | ToTensor()]) 286 | if target_transform_train is None: 287 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(len(mapping))]) 288 | if target_transform_test is None: 289 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(len(mapping))]) 290 | 291 | train_d = NTIdigitsDataset(root,train=True, 292 | transform = transform_train, 293 | target_transform = target_transform_train, 294 | chunk_size = chunk_size_train) 295 | 296 | train_dl = torch.utils.data.DataLoader(train_d, shuffle=True, batch_size=batch_size, **dl_kwargs) 297 | 298 | test_d = NTIdigitsDataset(root, transform = transform_test, 299 | target_transform = target_transform_test, 300 | train=False, 301 | chunk_size = chunk_size_test) 302 | 303 | test_dl = torch.utils.data.DataLoader(test_d, shuffle=False, batch_size=batch_size, **dl_kwargs) 304 | 305 | return train_dl, test_dl 306 | -------------------------------------------------------------------------------- /torchneuromorphic/double_dvssign/doubledvssign_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Kenneth Stewart and Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Sun 29 Jul 2018 01:39:06 PM PDT 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | 12 | import struct 13 | import time, copy 14 | import numpy as np 15 | import scipy.misc 16 | import h5py 17 | import torch.utils.data 18 | from ..dvssign.dvssign_dataloaders import DVSSignDataset, sample, create_datasets 19 | from ..neuromorphic_dataset import NeuromorphicDataset 20 | from ..events_timeslices import * 21 | from ..transforms import * 22 | #from .create_hdf5_sign import create_events_hdf5 23 | import os 24 | import torchmeta 25 | from torchmeta.transforms import Categorical 26 | 27 | import random 28 | 29 | # Data taken from https://github.com/PIX2NVS/NVS2Graph 30 | 31 | 32 | NUM_CLASSES = 24 # A-Y excluding j 33 | 34 | mapping = { 'a':0, 35 | 'b':1, 36 | 'c':2, 37 | 'd':3, 38 | 'e':4, 39 | 'f':5, 40 | 'g':6, 41 | 'h':7, 42 | 'i':8, 43 | 'k':9, 44 | 'l':10, 45 | 'm':11, 46 | 'n':12, 47 | 'o':13, 48 | 'p':14, 49 | 'q':15, 50 | 'r':16, 51 | 's':17, 52 | 't':18, 53 | 'u':19, 54 | 'v':20, 55 | 'w':21, 56 | 'x':22, 57 | 'y':23} 58 | 59 | double_digit_letters = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] 60 | 61 | 62 | # NOTE: These splits only use the first 10 classes. Will need to incorporate the double digit classes for more possible meta tasks 63 | #splits = {} 64 | # splits['train'] = ['00', '01', '04', '05', '06', '08', '09', '11', '12', '13', '14', '15', '16', '18', '19', '20', '21', '23', '24', '26', '28', '29', '30', '31', '33', '35', '37', '38', '41', '42', '43', '44', '45', '50', '51', '53', '54', '56', '59', '60', '62', '63', '65', '69', '70', '72', '74', '75', '76', '77', '79', '81', '82', '84', '85', '87', '88', '89', '90', '91', '94', '95', '97', '98'] 65 | # splits['val'] = ['03', '07', '10', '22', '27', '34', '39', '40', '48', '52', '58', '61', '64', '71', '93', '99'] 66 | # splits['test'] = ['02', '17', '25', '32', '36', '46', '47', '49', '55', '57', '66', '67', '68', '73', '78', '80', '83', '86', '92', '96'] 67 | 68 | # These splits use all of the letters i.e. all 24 classes for a possible 576 tasks. Actually, why don't I make a way to randomize the splits to make it so that all 576 possible tasks really are used and not a subset. 69 | 70 | def split_generator(): 71 | splits = {} 72 | 73 | splits['train'] = [] 74 | splits['val'] = [] 75 | splits['test'] = [] 76 | 77 | label_combos = [] 78 | 79 | # create list of possible digit label combos 80 | for i in range(24): 81 | for j in range(24): 82 | combo = str(i) + "." + str(j) 83 | 84 | label_combos.append(combo) 85 | 86 | print(len(label_combos)) 87 | 88 | # randomly shuffle the combos 89 | random.shuffle(label_combos) 90 | 91 | # generate a train, val, and test dataset from the possible class configurations 92 | for i in range(len(label_combos)): 93 | if i < 369: 94 | # put in training 95 | splits['train'].append(label_combos[i]) 96 | 97 | elif i < (369+92): 98 | # put in val 99 | splits['val'].append(label_combos[i]) 100 | else: 101 | # put in test 102 | splits['test'].append(label_combos[i]) 103 | 104 | return splits 105 | 106 | 107 | class DoubleDVSSignClassDataset(NeuromorphicDataset): 108 | def __init__( 109 | self, 110 | root : str, 111 | train : bool = False, 112 | transform : object = None , 113 | target_transform=None, 114 | download_and_create=True, 115 | chunk_size = 100, 116 | samples_per_class = 1, 117 | label_u = 0): 118 | 119 | self.transform = transform 120 | self.target_transform = target_transform 121 | 122 | self.samples_per_class = samples_per_class 123 | self.download_and_create = download_and_create 124 | self.root = root 125 | self.train = train 126 | 127 | self.chunk_size = chunk_size 128 | self.label_u = label_u 129 | 130 | 131 | lu = self.label_u 132 | 133 | lu = lu.split(".") 134 | self.labels_left = int(lu[0]) 135 | self.labels_right = int(lu[1]) 136 | # self.labels_left = lu // 10 137 | # self.labels_right = lu % 10 138 | 139 | ll = self.labels_left 140 | #print("labels_left", ll) 141 | lr = self.labels_right 142 | 143 | super(DoubleDVSSignClassDataset, self).__init__( 144 | root=None, 145 | transform=transform, 146 | target_transform=target_transform ) 147 | 148 | self.data_orig = DVSSignDataset( root, 149 | train=True, 150 | transform=transform, 151 | target_transform=target_transform, 152 | download_and_create=download_and_create, 153 | chunk_size = chunk_size) 154 | 155 | self.nl = len(self.data_orig.keys_by_label[ll]) 156 | #print(f"nl{self.nl}") 157 | self.nr = len(self.data_orig.keys_by_label[lr]) 158 | #print(f"nr{self.nr}") 159 | self.n = self.nl * self.nr 160 | 161 | 162 | def __len__(self): 163 | return 1000 #not sure why it's 1000, that's from DoubleNMNIST #self.n 164 | 165 | def __getitem__(self, key): 166 | ll = self.labels_left 167 | lr = self.labels_right 168 | key_l = self.data_orig.keys_by_label[ll][ key // self.nl] # // nl 169 | key_r = self.data_orig.keys_by_label[lr][ key % self.nl] # % nl 170 | 171 | data_l, label_l = self.data_orig[key_l] # This is a hack because for some reason it's trying to find keys outside of where it's supposed to (all train and test instead of just train for some reason) 172 | data_r, label_r = self.data_orig[key_r] 173 | 174 | size_x, size_y = data_r.shape[2:4] 175 | data = torch.zeros(data_r.shape[:2]+(size_x*2,size_y)) 176 | data[:, :, :size_x, :] = data_l 177 | data[:, :, size_x:, :] = data_r 178 | target = self.label_u 179 | return data, self.target_transform(target) 180 | 181 | def create_datasets( 182 | root = 'data/ASL-DVS/dvssign.hdf5', 183 | batch_size = 72 , 184 | chunk_size_train = 100, 185 | chunk_size_test = 100, 186 | ds = 1, 187 | dt = 1000, 188 | transform_train = None, 189 | transform_test = None, 190 | target_transform_train = None, 191 | target_transform_test = None, 192 | nclasses = 5, 193 | samples_per_class = 2, 194 | samples_per_test = 2, 195 | classes_meta = np.arange(100, dtype='int')): 196 | 197 | size = [2, 240//ds, 180//ds] 198 | 199 | if transform_train is None: 200 | transform_train = Compose([ 201 | CropDims(low_crop=[0,0], high_crop=[240,180], dims=[2,3]), 202 | Downsample(factor=[dt,1,ds,ds]), 203 | ToCountFrame(T = chunk_size_train, size = size), 204 | ToTensor()]) 205 | if transform_test is None: 206 | transform_test = Compose([ 207 | CropDims(low_crop=[0,0], high_crop=[240,180], dims=[2,3]), 208 | Downsample(factor=[dt,1,ds,ds]), 209 | ToCountFrame(T = chunk_size_test, size = size), 210 | ToTensor()]) 211 | if target_transform_train is None: 212 | target_transform_train =Compose([Repeat(chunk_size_train), toOneHot(NUM_CLASSES)]) 213 | if target_transform_test is None: 214 | target_transform_test = Compose([Repeat(chunk_size_test), toOneHot(NUM_CLASSES)]) 215 | 216 | 217 | labels_u = np.random.choice(classes_meta, nclasses,replace=False) #100 (10*10) here becuase we have two pairs of gestures between A and Y but not J/. only using first 10 letters 218 | 219 | 220 | train_ds = DoubleDVSSignDataset(root,train=True, 221 | transform = transform_train, 222 | target_transform = target_transform_train, 223 | chunk_size = chunk_size_train, 224 | nclasses = nclasses, 225 | samples_per_class = samples_per_class, 226 | labels_u = labels_u) 227 | 228 | test_ds = DoubleDVSSignDataset(root, transform = transform_test, 229 | target_transform = target_transform_test, 230 | train=False, 231 | chunk_size = chunk_size_test, 232 | nclasses = nclasses, 233 | samples_per_class = samples_per_test, 234 | labels_u = labels_u) 235 | 236 | 237 | 238 | return train_ds, test_ds 239 | 240 | class ClassDVSSignDataset(torchmeta.utils.data.ClassDataset): 241 | def __init__(self, root = 'data/ASL-DVS/dvssign.hdf5', chunk_size=100, meta_train=False, meta_val=False, meta_test=False, meta_split='', transform=None, target_transform=None, download=False, class_augmentations=None): 242 | self.root=root 243 | self.chunk_size = chunk_size 244 | if meta_train is True: 245 | train = True 246 | else: 247 | train = False 248 | 249 | if meta_train: 250 | split_name = 'train' 251 | if meta_val: 252 | split_name = 'val' 253 | if meta_test: 254 | split_name = 'test' 255 | self.split_name = split_name 256 | 257 | self.transform = transform 258 | 259 | 260 | super(ClassDVSSignDataset, self).__init__( 261 | meta_train=meta_train, 262 | meta_val=meta_val, 263 | meta_test=meta_test, 264 | meta_split=meta_split, 265 | class_augmentations=class_augmentations) 266 | 267 | if meta_split: 268 | # load the splits from file i.e. meta_split is a json file that contains the split that will be saved with the model and config file in logs 269 | # importing the module 270 | import json 271 | 272 | # Opening JSON file 273 | with open(meta_split) as json_file: 274 | splits = json.load(json_file) 275 | 276 | # print for now just to make sure that it works 277 | # Print the type of data variable 278 | print("Type:", type(splits)) 279 | 280 | # Print the data of dictionary 281 | print(f"\n{split_name}:", splits[split_name]) 282 | else: 283 | splits = split_generator() 284 | 285 | import json 286 | 287 | # save the splits for future use 288 | json = json.dumps(splits) 289 | f = open("doubledvssign_splits_full.json","w") 290 | f.write(json) 291 | f.close() 292 | 293 | 294 | self._labels = [s for s in splits[split_name]] 295 | self._num_classes = len(self._labels) 296 | 297 | @property 298 | def labels(self): 299 | return self._labels 300 | 301 | @property 302 | def num_classes(self): 303 | return self._num_classes 304 | 305 | def __getitem__(self, index): 306 | label = self._labels[index] 307 | d = DoubleDVSSignClassDataset(root =self.root, train= self.meta_train, label_u = label, transform = self.transform, target_transform = None, chunk_size = self.chunk_size) 308 | d.index = index 309 | #d.target_transform_append = lambda x: None 310 | return d 311 | 312 | def create_class_dataset(dset, meta_split = 'train'): 313 | ds = [] 314 | for n in range(dset.nclasses): 315 | indices = dset.keys_by_label[n] 316 | d = torch.utils.data.Subset(dset, indices) 317 | ds.append(d) 318 | return ds 319 | 320 | class DoubleDVSSign(torchmeta.utils.data.CombinationMetaDataset): 321 | def __init__(self, root, num_classes_per_task=None, meta_train=False, 322 | meta_val=False, meta_test=False, meta_split=None, 323 | transform=None, target_transform=None, dataset_transform=None, 324 | class_augmentations=None, download=False,chunk_size=100): 325 | 326 | target_transform = Categorical(num_classes_per_task) 327 | dataset = ClassDVSSignDataset(root, 328 | meta_train=meta_train, meta_val=meta_val, 329 | meta_test=meta_test, meta_split=meta_split, transform=transform, 330 | class_augmentations=class_augmentations, download=download,chunk_size=100) 331 | 332 | super(DoubleDVSSign, self).__init__(dataset, num_classes_per_task, 333 | target_transform=target_transform, 334 | dataset_transform=dataset_transform) 335 | 336 | 337 | 338 | -------------------------------------------------------------------------------- /torchneuromorphic/transforms.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | #----------------------------------------------------------------------------- 3 | # File Name : 4 | # Author: Emre Neftci 5 | # 6 | # Creation Date : Tue Nov 5 16:26:06 2019 7 | # Last Modified : Mon 22 Apr 2024 10:16:08 AM CEST 8 | # 9 | # Copyright : (c) UC Regents, Emre Neftci 10 | # Licence : GPLv2 11 | #----------------------------------------------------------------------------- 12 | import numpy as np 13 | import pandas as pd 14 | import torch, bisect 15 | import warnings 16 | from typing import List 17 | from torchvision.transforms import Compose,ToTensor,Normalize,Lambda 18 | 19 | def find_first(a, tgt): 20 | return bisect.bisect_left(a, tgt) 21 | 22 | class Jitter(object): 23 | def __init__(self, xs=2,ys=2,th=30, size=[2, 32, 32]): 24 | self.xs = xs 25 | self.ys = ys 26 | self.th = th 27 | self.size = size 28 | 29 | def __call__(self, data): 30 | if self.xs == 0 and self.ys==0 and self.th==0: 31 | return data # not jittering events 32 | 33 | xjitter = np.random.randint(2 * self.xs) - self.xs # random jitter in x direction 34 | yjitter = np.random.randint(2 * self.ys) - self.ys # random jitter in y direction 35 | ajitter = (np.random.rand() - 0.5) * self.th / 180 * np.pi # amplitude? random jitter for rotation 36 | sinTh = np.sin(ajitter) 37 | cosTh = np.cos(ajitter) 38 | 39 | jittered = torch.zeros((data.shape[0],data.shape[1],self.size[0],self.size[1],self.size[2])) 40 | 41 | for x in range(self.size[1]): 42 | for y in range(self.size[2]): 43 | xnew = round(x*cosTh - y*sinTh + xjitter) 44 | if xnew < 0: 45 | xnew = 0 46 | if xnew > self.size[1]-1: 47 | xnew=self.size[1]-1 48 | ynew = round(x*sinTh + y*cosTh + yjitter) 49 | if ynew < 0: 50 | ynew = 0 51 | if ynew > self.size[2]-1: 52 | ynew=self.size[2]-1 53 | 54 | jittered[:,:,:,xnew,ynew] = data[:,:,:,x,y] 55 | return jittered 56 | 57 | def shuffle_along_axis(a, axis): 58 | idx = np.random.rand(*a.shape).argsort(axis=axis) 59 | return np.take_along_axis(a,idx,axis=axis) 60 | 61 | class toOneHot(object): 62 | def __init__(self, num_classes): 63 | self.num_classes = num_classes 64 | 65 | def __call__(self, integers): 66 | y_onehot = torch.FloatTensor(integers.shape[0], self.num_classes) 67 | y_onehot.zero_() 68 | return y_onehot.scatter_(1, torch.LongTensor(integers), 1) 69 | 70 | class toDtype(object): 71 | def __init__(self, dtype): 72 | self.dtype = dtype 73 | 74 | def __call__(self, integers): 75 | return torch.tensor(integers, dtype=self.dtype) 76 | 77 | class Downsample(object): 78 | """Resize the address event Tensor to the given size. 79 | 80 | Args: 81 | factor: : Desired resize factor. Applied to all dimensions including time 82 | """ 83 | def __init__(self, factor): 84 | assert isinstance(factor, int) or hasattr(factor, '__iter__') 85 | if hasattr(factor, '__iter__'): 86 | for f in factor: 87 | assert isinstance(f, int) 88 | self.factor = factor 89 | 90 | def __call__(self, tmad): 91 | return tmad//self.factor 92 | 93 | def __repr__(self): 94 | return self.__class__.__name__ + '(dt = {0}, dp = {1}, dx = {2}, dy = {3})' 95 | 96 | #class Crop(object): 97 | # def __init__(self, low_crop, high_crop): 98 | # ''' 99 | # Crop all dimensions 100 | # ''' 101 | # self.low = low_crop 102 | # self.high = high_crop 103 | # 104 | # def __call__(self, tmad): 105 | # idx = np.where(np.any(tmad>high_crop, axis=1)) 106 | # tmad = np.delete(tmad,idx,0) 107 | # idx = np.where(np.any(tmad=self.t_min) * (tmad[:,0]=self.high_crop[i]) 145 | tmad = np.delete(tmad,idx,0) 146 | idx = np.where(tmad[:,d]=self.att_shape, axis=1)) 164 | tmad = np.delete(tmad,idx,0) 165 | idx = np.where(np.any(tmad[:, 2:]<[0,0], axis=1)) 166 | tmad = np.delete(tmad,idx,0) 167 | return tmad 168 | 169 | def __repr__(self): 170 | return self.__class__.__name__ + '()' 171 | 172 | class Attention(object): 173 | def __init__(self, n_attention_events, size): 174 | ''' 175 | Crop around the median event in the last n_events. 176 | ''' 177 | self.att_shape = np.array(size[1:], dtype=np.int64) 178 | self.n_att_events = n_attention_events 179 | 180 | def __call__(self, tmad): 181 | df = pd.DataFrame(tmad, columns=['t', 'p', 'x', 'y']) 182 | # compute centroid in x and y 183 | centroids = df.loc[:, ['x', 'y']].rolling(window=self.n_att_events, 184 | min_periods=1).median().astype(int) 185 | # re-address (translate) events with respect to centroid corner 186 | df.loc[:, ['x', 'y']] -= centroids - self.att_shape // 2 187 | # remove out of range events 188 | df = df.loc[(df.x >= 0) & (df.x < self.att_shape[1]) & (df.y >= 0) & (df.y < self.att_shape[0])] 189 | return df.to_numpy() 190 | 191 | def __repr__(self): 192 | return self.__class__.__name__ + '()' 193 | 194 | class ToChannelHeightWidth(object): 195 | def __call__(self, tmad): 196 | n = tmad.shape[1] 197 | if n==2: 198 | o = np.zeros(tmad.shape[0], dtype=tmad.dtype) 199 | return np.column_stack([tmad, o, o]) 200 | 201 | elif n==4: 202 | return tmad 203 | 204 | else: 205 | raise TypeError('Wrong number of dimensions. Found {0}, expected 1 or 3'.format(n-1)) 206 | 207 | def __repr__(self): 208 | return self.__class__.__name__ + '()' 209 | 210 | class ToCountFrame(object): 211 | """Convert Address Events to Binary tensor. 212 | 213 | Converts a numpy.ndarray (T x C x H x W) to a torch.FloatTensor of shape (T x C x H x W) in the range [0., 1., ...] 214 | """ 215 | def __init__(self, T=500, size=[2, 32, 32]): 216 | self.T = T 217 | self.size = size 218 | self.ndim = len(size) 219 | 220 | def __call__(self, tmad): 221 | times = tmad[:,0] 222 | t_start = times[0] 223 | t_end = times[-1] 224 | addrs = tmad[:,1:] 225 | 226 | ts = range(0, self.T) 227 | chunks = np.zeros([len(ts)] + self.size, dtype='int16') 228 | idx_start = 0 229 | idx_end = 0 230 | for i, t in enumerate(ts): 231 | idx_end += find_first(times[idx_end:], t+1) 232 | if idx_end > idx_start: 233 | ee = addrs[idx_start:idx_end] 234 | i_pol_x_y = tuple([i] + [ee[:, j] for j in range(self.ndim)]) 235 | np.add.at(chunks, i_pol_x_y, 1) 236 | idx_start = idx_end 237 | return chunks 238 | 239 | def __repr__(self): 240 | return self.__class__.__name__ + '(T={0})'.format(self.T) 241 | 242 | 243 | class ToEventSum(object): 244 | """Convert Address Events to Image By Summing. 245 | 246 | Converts a numpy.ndarray (T x H x W x C) to a torch.FloatTensor of shape (C x H x W) in the range [0., 1., ...] 247 | """ 248 | def __init__(self, T=500, size=[2, 32, 32], bins=1): 249 | self.T = T 250 | self.size = size 251 | self.bins = bins 252 | self.ndim = len(size) 253 | 254 | def __call__(self, tmad): 255 | times = tmad[:,0] 256 | t_start = times[0] 257 | t_end = times[-1] 258 | addrs = tmad[:,1:] 259 | 260 | ts = range(0, self.T) 261 | chunks = np.zeros([self.bins]+[len(ts)//self.bins] + self.size, dtype='int8') 262 | #print(chunks.shape) 263 | idx_start = 0 264 | idx_end = 0 265 | j = 0 266 | for i, t in enumerate(ts): 267 | #print(t) 268 | idx_end += find_first(times[idx_end:], t) 269 | if idx_end > idx_start: 270 | ee = addrs[idx_start:idx_end] 271 | # i_pol_x_y = (i-(self.T//self.bins)*j, ee[:, 0], ee[:, 1], ee[:, 2]) 272 | # np.add.at(chunks[j], i_pol_x_y, 1) 273 | i_pol_x_y = tuple([i] + [ee[:, j] for j in range(self.ndim)]) 274 | np.add.at(chunks, i_pol_x_y, 1) 275 | idx_start = idx_end 276 | if (t+1)%(self.T//self.bins)==0: 277 | #chunks = chunks.sum(axis=1, keepdims=False) 278 | j+=1 279 | #print(j) 280 | #print(chunks.shape) 281 | 282 | return chunks.sum(axis=1, keepdims=False) 283 | 284 | def __repr__(self): 285 | return self.__class__.__name__ + '()' 286 | 287 | class FilterEvents(object): 288 | def __init__(self, kernel = None, groups = 1, tpad = None): 289 | self.kernel = kernel 290 | self.groups = groups 291 | if tpad is None: 292 | self.tpad = self.kernel.shape[2]//2 293 | else: 294 | self.tpad = tpad 295 | 296 | def __call__(self, chunks): 297 | chunks = chunks.to(self.kernel.device) 298 | if len(chunks.shape)==4: 299 | data = chunks.permute([1,0,2,3]) 300 | data = data.unsqueeze(0) 301 | else: 302 | data = chunks.permute([0,2,1,3,4]) 303 | 304 | Y = torch.nn.functional.conv3d(data, self.kernel, groups=self.groups, padding= [self.tpad,0,0]) 305 | Y = Y.transpose(1,2) 306 | if len(chunks.shape)==4: 307 | return Y[0] 308 | else: 309 | return Y 310 | 311 | class ExpFilterEvents(FilterEvents): 312 | def __init__(self, length, tau=200, channels=2, tpad = None, device='cpu', **kwargs): 313 | t = torch.arange(0.,length,1.) 314 | kernel = torch.ones(channels, 1, len(t), 1, 1) 315 | exp_kernel = torch.exp(-t/tau) 316 | exp_kernel/=exp_kernel.sum() 317 | exp_kernel=torch.flip(exp_kernel,[0]) #Conv3d is cross correlation not convolution 318 | groups = 2 319 | 320 | for i in range(channels): 321 | kernel[i,0,:,0,0]=exp_kernel 322 | kernel = kernel.to(device) 323 | 324 | super(ExpFilterEvents, self).__init__(kernel, groups, tpad, **kwargs) 325 | 326 | class Rescale(object): 327 | """Rescale the event sum Tensor by the given factor. 328 | 329 | Args: 330 | factor: : Desired rescale factor. 331 | """ 332 | def __init__(self, factor): 333 | self.factor = factor 334 | 335 | def __call__(self, chunks): 336 | return chunks*self.factor 337 | 338 | def __repr__(self): 339 | return self.__class__.__name__ + '({0})'.format(self.factor) 340 | 341 | 342 | 343 | class Repeat(object): 344 | ''' 345 | Replicate np.array (C) as (n_repeat X C). This is useful to transform sample labels into sequences 346 | ''' 347 | def __init__(self, n_repeat): 348 | self.n_repeat = n_repeat 349 | 350 | def __call__(self, target): 351 | return np.tile(np.expand_dims(target,0),[self.n_repeat,1]) 352 | 353 | def __repr__(self): 354 | return self.__class__.__name__ + '()' 355 | 356 | class ToTensor(object): 357 | """Convert a ``numpy.ndarray`` to tensor. 358 | 359 | Converts a numpy.ndarray to a torch.FloatTensor of the same shape 360 | """ 361 | 362 | def __init__(self, device='cpu'): 363 | self.device=device 364 | 365 | def __call__(self, frame): 366 | """ 367 | Args: 368 | frame (numpy.ndarray): numpy array of frames 369 | 370 | Returns: 371 | Tensor: Converted data. 372 | """ 373 | return torch.FloatTensor(frame).to(self.device) 374 | 375 | def __repr__(self): 376 | return self.__class__.__name__ + '(device:{0})'.format(self.device) 377 | 378 | class ToJNPTensor(object): 379 | """Convert a ``numpy.ndarray`` to tensor. 380 | 381 | Converts a numpy.ndarray to a torch.FloatTensor of the same shape 382 | """ 383 | 384 | def __init__(self): 385 | from jax import numpy as jnp 386 | self._jaxf = jnp.array 387 | 388 | def __call__(self, frame): 389 | """ 390 | Args: 391 | frame (numpy.ndarray): numpy array of frames 392 | 393 | Returns: 394 | Jax ND Array Tensor: Converted data. 395 | """ 396 | return self._jaxf(frame) 397 | 398 | def __repr__(self): 399 | return self.__class__.__name__ 400 | -------------------------------------------------------------------------------- /torchneuromorphic/nomniglot/nomniglot_dataloaders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #----------------------------------------------------------------------------- 3 | # Author: Kenneth Stewart and Emre Neftci 4 | # 5 | # Creation Date : Fri 01 Dec 2017 10:05:17 PM PST 6 | # Last Modified : Sun 29 Jul 2018 01:39:06 PM PDT 7 | # 8 | # Copyright : (c) 9 | # Licence : Apache License, Version 2.0 10 | #----------------------------------------------------------------------------- 11 | 12 | import struct 13 | import time, copy 14 | import numpy as np 15 | import scipy.misc 16 | import h5py 17 | import torch.utils.data 18 | from ..neuromorphic_dataset import NeuromorphicDataset 19 | from ..events_timeslices import * 20 | from ..transforms import * 21 | from .create_hdf5_omniglot import create_events_hdf5 22 | import os 23 | import torchmeta 24 | from torchmeta.transforms import Categorical 25 | import pdb 26 | 27 | NUM_CLASSES = 50 # for each writing system 28 | 29 | 30 | train_mapping = { 31 | 'Alphabet_of_the_Magi': 0, 32 | 'Anglo-Saxon_Futhorc': 1, 33 | 'Arcadian': 2, 34 | 'Armenian': 3, 35 | 'Asomtavruli_(Georgian)': 4, 36 | 'Balinese': 5, 37 | 'Bengali': 6, 38 | 'Blackfoot_(Canadian_Aboriginal_Syllabics)': 7, 39 | 'Braille': 8, 40 | 'Burmese_(Myanmar)': 9, 41 | 'Cyrillic': 10, 42 | 'Early_Aramaic': 11, 43 | 'Futurama': 12, 44 | 'Grantha': 13, 45 | 'Greek': 14, 46 | 'Gujarati': 15 47 | } 48 | 49 | validation_mapping = { 50 | 'Hebrew': 16, 51 | 'Inuktitut_(Canadian_Aboriginal_Syllabics)': 17, 52 | 'Japanese_(hiragana)': 18, 53 | 'Japanese_(katakana)': 19, 54 | 'Korean': 20, 55 | 'Latin': 21, 56 | 'Malay_(Jawi_-_Arabic)': 22, 57 | 'Mkhedruli_(Georgian)': 23, 58 | 'N_Ko': 24, 59 | 'Ojibwe_(Canadian_Aboriginal_Syllabics)': 25, 60 | 'Sanskrit': 26, 61 | 'Syriac_(Estrangelo)': 27, 62 | 'Tagalog': 28, 63 | 'Tifinagh': 29 64 | } 65 | test_mapping = { 66 | 'Angelic': 30, 67 | 'Atemayar_Qelisayer': 31, 68 | 'Atlantean': 32, 69 | 'Aurek-Besh': 33, 70 | 'Avesta': 34, 71 | 'Ge_ez': 35, 72 | 'Glagolitic': 36, 73 | 'Gurmukhi': 37, 74 | 'Kannada': 38, 75 | 'Keble': 39, 76 | 'Malayalam': 40, 77 | 'Manipuri': 41, 78 | 'Mongolian': 42, 79 | 'Old_Church_Slavonic_(Cyrillic)': 43, 80 | 'Oriya': 44, 81 | 'Sylheti': 45, 82 | 'Syriac_(Serto)': 46, 83 | 'Tengwar': 47, 84 | 'Tibetan': 48, 85 | 'ULOG': 49 86 | } 87 | 88 | def sample(hdf5_file, 89 | key, 90 | chunk_size = 300): 91 | ''' 92 | 93 | ''' 94 | dset = hdf5_file['data'][str(key)] 95 | label = dset['labels'][()] 96 | tend = dset['times'][-1] 97 | start_time = dset['times'][0] #0 98 | 99 | tmad = get_tmad_slice(dset['times'][()], dset['addrs'][()], start_time, chunk_size) 100 | if tmad.size!=0: 101 | tmad[:,0]-=tmad[0,0] 102 | 103 | return tmad, label 104 | 105 | class NOmniglotDataset(NeuromorphicDataset): 106 | resources_url = [['https://figshare.com/ndownloader/files/31104472',None, 'dvs_background_1.rar'], 107 | ['https://figshare.com/ndownloader/files/31104475', None, 'dvs_background_2.rar'], 108 | ['https://figshare.com/ndownloader/files/31104481', None, 'dvs_evaluation.rar'],] 109 | directory = 'data/nomniglot/'#'data/nmnist/' 110 | resources_local = [directory+'dvs_background_1', directory+'dvs_background_2', directory+'dvs_evaluation'] 111 | 112 | def __init__( 113 | self, 114 | root, 115 | train=True, 116 | valid=False, 117 | test=False, 118 | transform=None, 119 | target_transform=None, 120 | label=None, 121 | download=True, 122 | chunk_size = 100): 123 | 124 | self.label = label # If not None, do meta learning 125 | 126 | self.n = 0 127 | 128 | self.download_and_create = download 129 | self.root = root 130 | self.train = train 131 | self.chunk_size = chunk_size 132 | 133 | self.train = train 134 | self.valid = valid 135 | self.test = test 136 | 137 | self.target_transform = target_transform 138 | 139 | #print("TARGET TRANSFORM", target_transform) 140 | 141 | super(NOmniglotDataset, self).__init__( 142 | root, 143 | transform=transform, 144 | target_transform=target_transform) 145 | 146 | 147 | #if label is None: 148 | with h5py.File(root, 'r', swmr=True, libver="latest") as f: 149 | try: 150 | if self.train: 151 | self.n = f['extra'].attrs['Ntrain'] 152 | self.keys = f['extra']['train_keys'][()] 153 | self.keys_by_label = f['extra']['train_keys_by_label'][()] 154 | elif self.valid or self.test: 155 | self.n = f['extra'].attrs['Nvalidation'] 156 | self.keys = f['extra']['validation_keys'][()] 157 | self.keys_by_label = f['extra']['validation_keys_by_label'][()] 158 | #self.keys_by_label[:,:] -= self.keys_by_label[0,0] #normalize 159 | self._num_classes = len(self.keys_by_label) 160 | 161 | except AttributeError: 162 | print('Attribute not found in hdf5 file. You may be using an old hdf5 build. Delete {0} and run again'.format(root)) 163 | raise 164 | 165 | if label is not None: 166 | self.n = len(self.keys_by_label[self.label]) 167 | 168 | # 169 | 170 | 171 | def download(self): 172 | isexisting = super(NOmniglotDataset, self).download() 173 | 174 | def create_hdf5(self): 175 | create_events_hdf5(self.directory, self.root) 176 | 177 | def __len__(self): 178 | return self.n 179 | 180 | def __getitem__(self, index): 181 | 182 | if self.label != None: 183 | #print("N",self.n) 184 | ind = self.keys_by_label[self.label][index%self.n]#//self.n] 185 | #print("THE KEY IS", key) 186 | #Important to open and close in getitem to enable num_workers>0 187 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 188 | if self.train: 189 | key = f['extra']['train_keys'][ind] 190 | elif self.valid or self.test: 191 | key = f['extra']['validation_keys'][ind] 192 | data, target = sample( 193 | f, 194 | key, 195 | chunk_size = self.chunk_size) 196 | else: 197 | with h5py.File(self.root, 'r', swmr=True, libver="latest") as f: 198 | if self.train: 199 | key = f['extra']['train_keys'][index] 200 | elif self.valid or self.test: 201 | key = f['extra']['validation_keys'][index] 202 | data, target = sample( 203 | f, 204 | key, 205 | chunk_size = self.chunk_size) 206 | 207 | f.close() 208 | 209 | if data.size==0: 210 | with h5py.File(self.root, 'a', libver="latest") as f: 211 | if self.train: 212 | del f['extra']['train_keys'][index] 213 | elif self.valid or self.test: 214 | del f['extra']['validation_keys'][index] 215 | print("REMOVED BAD DATA") 216 | f.close() 217 | i=1/0 218 | 219 | if self.transform is not None: 220 | data = self.transform(data) 221 | 222 | return data, self.target_transform(target.astype('int')) 223 | 224 | 225 | 226 | 227 | class ClassNOmniglotDataset(torchmeta.utils.data.ClassDataset): 228 | def __init__(self, root = 'data/nomniglot/nomniglot.hdf5', 229 | chunk_size=300, 230 | meta_train=False, 231 | meta_val=False, 232 | meta_test=False, 233 | meta_split=None, 234 | transform=None, 235 | target_transform=None, 236 | download=False, 237 | class_augmentations=None): 238 | 239 | super(ClassNOmniglotDataset, self).__init__( 240 | meta_train=meta_train, 241 | meta_val=meta_val, 242 | meta_test=meta_test, 243 | meta_split=meta_split, 244 | class_augmentations=class_augmentations) 245 | 246 | 247 | self.root=root 248 | self.download = download 249 | 250 | if meta_train is True: 251 | self.train = True 252 | self.valid = False 253 | self.test = False 254 | elif meta_val is True: 255 | self.train = False 256 | self.valid = True 257 | self.test = False 258 | elif meta_test is True: 259 | self.train = False 260 | self.valid = False 261 | self.test = True 262 | 263 | if meta_train: 264 | split_name = 'train' 265 | if meta_val: 266 | split_name = 'val' 267 | if meta_test: 268 | split_name = 'test' 269 | self.split_name = split_name 270 | 271 | self.transform = transform 272 | 273 | self.chunk_size = chunk_size 274 | 275 | self.target_transform = target_transform 276 | 277 | self.dataset = NOmniglotDataset(root =self.root, 278 | label=None, 279 | transform = self.transform, 280 | target_transform = self.target_transform, 281 | chunk_size = self.chunk_size) 282 | 283 | @property 284 | def labels(self): 285 | return np.arange(self.n, dtype='int') 286 | 287 | @property 288 | def num_classes(self): 289 | return self.dataset._num_classes 290 | 291 | def __getitem__(self, index): 292 | label = index 293 | 294 | #print("label is", label) 295 | 296 | d = NOmniglotDataset(root =self.root, 297 | label=label, 298 | transform = self.transform, 299 | target_transform = self.target_transform, 300 | chunk_size = self.chunk_size) 301 | d.index = index 302 | #d.target_transform_append = lambda x: None 303 | return d 304 | 305 | 306 | 307 | class NOmniglot(torchmeta.utils.data.CombinationMetaDataset): 308 | def __init__(self, root, num_classes_per_task=None, meta_train=False, 309 | meta_val=False, meta_test=False, meta_split=None, 310 | transform=None, target_transform=None, dataset_transform=None, 311 | class_augmentations=None, download=False,chunk_size=300): 312 | 313 | if target_transform is None: 314 | target_tranform = Categorical(num_classes_per_task) 315 | 316 | print("NUM PER TASK", num_classes_per_task) 317 | 318 | dataset = ClassNOmniglotDataset(root, 319 | meta_train=meta_train, meta_val=meta_val, 320 | meta_test=meta_test, meta_split=meta_split, transform=transform, 321 | class_augmentations=class_augmentations, download=download,chunk_size=chunk_size) 322 | 323 | super(NOmniglot, self).__init__(dataset, 324 | num_classes_per_task, 325 | target_transform=target_transform, 326 | dataset_transform=dataset_transform) 327 | 328 | 329 | 330 | 331 | def create_class_dataset(dset, meta_split = 'train'): 332 | ds = [] 333 | for n in range(dset.nclasses): 334 | indices = dset.keys_by_label[n] 335 | d = torch.utils.data.Subset(dset, indices) 336 | ds.append(d) 337 | return ds 338 | 339 | def create_datasets( 340 | root = 'data/nomniglot/nomniglot.hdf5', 341 | batch_size = 72 , 342 | chunk_size_train = 300, 343 | chunk_size_test = 300, 344 | ds = 1, 345 | dt = 1000, 346 | transform_train = None, 347 | transform_valid = None, 348 | transform_test = None, 349 | target_transform_train = None, 350 | target_transform_valid = None, 351 | target_transform_test = None): 352 | 353 | size = [2, 346//ds, 260//ds] 354 | 355 | if transform_train is None: 356 | transform_train = Compose([ 357 | CropDims(low_crop=[0,0], high_crop=[346,260], dims=[2,3]), 358 | Downsample(factor=[dt,1,ds,ds]), 359 | ToCountFrame(T = chunk_size_train, size = size), 360 | ToTensor()]) 361 | if transform_valid is None: 362 | transform_test = Compose([ 363 | CropDims(low_crop=[0,0], high_crop=[346,260], dims=[2,3]), 364 | Downsample(factor=[dt,1,ds,ds]), 365 | ToCountFrame(T = chunk_size_test, size = size), 366 | ToTensor()]) 367 | if transform_test is None: 368 | transform_test = Compose([ 369 | CropDims(low_crop=[0,0], high_crop=[346,260], dims=[2,3]), 370 | Downsample(factor=[dt,1,ds,ds]), 371 | ToCountFrame(T = chunk_size_test, size = size), 372 | ToTensor()]) 373 | 374 | if target_transform_train is None: 375 | target_transform_train =Compose([Repeat(chunk_size_train)]) 376 | if target_transform_valid is None: 377 | target_transform_valid = None 378 | if target_transform_test is None: 379 | target_transform_test = None 380 | 381 | train_ds = NOmniglotDataset(root,train=True, 382 | transform = transform_train, 383 | target_transform = target_transform_train, 384 | chunk_size = chunk_size_train*dt) 385 | 386 | valid_ds = NOmniglotDataset(root, transform = transform_test, 387 | target_transform = target_transform_test, 388 | train=False, 389 | valid=True, 390 | test=False, 391 | chunk_size = chunk_size_test*dt) 392 | 393 | test_ds = NOmniglotDataset(root, transform = transform_test, 394 | target_transform = target_transform_test, 395 | train=False, 396 | valid=False, 397 | test=True, 398 | chunk_size = chunk_size_test*dt) 399 | 400 | return train_ds, valid_ds , test_ds 401 | 402 | def create_dataloader( 403 | root = 'data/nomniglot/nomniglot.hdf5', 404 | batch_size = 72 , 405 | chunk_size_train = 100, 406 | chunk_size_test = 100, 407 | ds = 1, 408 | dt = 1000, 409 | transform_train = None, 410 | transform_valid = None, 411 | transform_test = None, 412 | target_transform_train = None, 413 | target_transform_valid = None, 414 | target_transform_test = None, 415 | **dl_kwargs): 416 | 417 | train_d, valid_d, test_d = create_datasets( #, test_d = create_datasets( 418 | root = root, 419 | batch_size = batch_size, 420 | chunk_size_train = chunk_size_train, 421 | chunk_size_test = chunk_size_test, 422 | ds = ds, 423 | dt = dt, 424 | transform_train = transform_train, 425 | transform_valid = transform_valid, 426 | transform_test = transform_test, 427 | target_transform_train = target_transform_train, 428 | target_transform_valid = target_transform_valid, 429 | target_transform_test = target_transform_test) 430 | 431 | 432 | train_dl = torch.utils.data.DataLoader(train_d, shuffle=True, batch_size=batch_size, **dl_kwargs) 433 | valid_dl = torch.utils.data.DataLoader(valid_d, shuffle=False, batch_size=batch_size, **dl_kwargs) 434 | test_dl = torch.utils.data.DataLoader(test_d, shuffle=False, batch_size=batch_size, **dl_kwargs) 435 | 436 | return train_dl, valid_dl, test_dl --------------------------------------------------------------------------------