├── .env
├── .gitignore
├── .gitmodules
├── LICENSE.txt
├── README.md
├── bimvee
├── __init__.py
├── container.py
├── events.py
├── exportHdf5.py
├── exportIitYarp.py
├── exportPoseRpgEsimCsv.py
├── exportRpgDvsRos.py
├── filter.py
├── geometry.py
├── importAe.py
├── importAer2.py
├── importAerdat.py
├── importBoundingBoxes.py
├── importEs.py
├── importExportBatches.py
├── importEyeTracking.py
├── importFrames.py
├── importHdf5.py
├── importIitNumpy.py
├── importIitVicon.py
├── importIitYarp.py
├── importInivationNumpy.py
├── importProph.py
├── importRpgDvsRos.py
├── importSecDvs.py
├── importSkeleton.py
├── importUdsAedat.py
├── info.py
├── player.py
├── plot.py
├── plotCorrelogram.py
├── plotDvsContrast.py
├── plotDvsLastTs.py
├── plotDvsSpaceTime.py
├── plotEventRate.py
├── plotFlow.py
├── plotFrame.py
├── plotImu.py
├── plotPose.py
├── plotSpikeogram.py
├── pose.py
├── samplesToEvents.py
├── split.py
├── timestamps.py
├── visualiser.py
└── visualisers
│ ├── __init__.py
│ ├── visualiserBase.py
│ ├── visualiserBoundingBoxes.py
│ ├── visualiserDvs.py
│ ├── visualiserEyeTracking.py
│ ├── visualiserFrame.py
│ ├── visualiserImu.py
│ ├── visualiserOpticFlow.py
│ ├── visualiserPoint3.py
│ ├── visualiserPose6q.py
│ └── visualiserSkeleton.py
├── examples
├── examples.py
├── examplesBatchImportExport.py
├── examplesImportThirdPartyDatasets.py
├── examplesPoseAndVicon.py
└── examplesSamplesToEvents.py
├── images
├── dvslastts.png
├── eventrate.png
├── events.png
├── frames.png
├── imu.png
└── pose.png
├── setup.cfg
└── setup.py
/.env:
--------------------------------------------------------------------------------
1 | PYTHONPATH=${PYTHONPATH}:/home/miacono/code/mustard:/home/miacono/code/bimvee
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | *.pyc
3 | bimvee.egg-info
4 | build
5 | /bimvee/.pylint.d/*.stats
6 | .idea/
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "bimvee/importRosbag"]
2 | path = bimvee/importRosbag
3 | url = https://github.com/event-driven-robotics/importRosbag
4 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (C) 2019- Event-driven Perception for Robotics
2 | Authors: Sim Bamford
3 | Suman Ghosh
4 | Aiko Dinale
5 | Massimiliano Iacono
6 | Additional code contributions from:
7 | Vadim Tikhanoff
8 | Marco Monforte
9 | Contains several third-party contributions esp. geometry.py
10 | This program is free software: you can redistribute it and/or modify it under
11 | the terms of the GNU General Public License as published by the Free Software
12 | Foundation, either version 3 of the License, or (at your option) any later version.
13 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
15 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
16 | You should have received a copy of the GNU General Public License along with
17 | this program. If not, see .
18 |
--------------------------------------------------------------------------------
/bimvee/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/bimvee/__init__.py
--------------------------------------------------------------------------------
/bimvee/container.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Author: Sim Bamford
5 |
6 | This program is free software: you can redistribute it and/or modify it under
7 | the terms of the GNU General Public License as published by the Free Software
8 | Foundation, either version 3 of the License, or (at your option) any later version.
9 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 | You should have received a copy of the GNU General Public License along with
13 | this program. If not, see .
14 |
15 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
16 |
17 | A Container class which encapsulates some basic container-level manipulations.
18 |
19 | Firstly, the loosely enforced 3-tier hierarchy of data-channel-datatype within
20 | an imported dict has the advantage that channels can help organise data but
21 | it has the disadvantage that when there is just a single dict for each datatype,
22 | the user may not know or care about the channel names, yet needs them in order
23 | to navigate the data.
24 | Therefore the getDataType() method allows the user to get the dict for a chosen
25 | dataType providing only that there's a single example.
26 | """
27 |
28 | class Container():
29 |
30 | def __init__(self, container):
31 | self.container = container
32 |
33 | # This code assumes the 3-level hierarchy
34 | def getDataType(self, dataType):
35 | dataTypeDicts = []
36 | for channelKey in self.container['data'].keys():
37 | for dataTypeKey in self.container['data'][channelKey]:
38 | if dataTypeKey == dataType:
39 | dataTypeDicts.append(self.container['data'][channelKey][dataTypeKey])
40 | if len(dataTypeDicts) == 0:
41 | raise ValueError('No dicts found for dataType ' + dataType)
42 | if len(dataTypeDicts) > 1:
43 | raise ValueError('More than one dict found for dataType: ' + dataType)
44 | return dataTypeDicts[0]
45 |
46 | def getAllDataOfType(self, dataType):
47 | dataTypeDicts = {}
48 | for channelKey in self.container['data'].keys():
49 | for dataTypeKey in self.container['data'][channelKey]:
50 | if dataTypeKey == dataType:
51 | dataTypeDicts[channelKey] = self.container['data'][channelKey][dataTypeKey]
52 | return dataTypeDicts
53 |
54 |
--------------------------------------------------------------------------------
/bimvee/events.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Author: Sim Bamford
5 |
6 | This program is free software: you can redistribute it and/or modify it under
7 | the terms of the GNU General Public License as published by the Free Software
8 | Foundation, either version 3 of the License, or (at your option) any later version.
9 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 | You should have received a copy of the GNU General Public License along with
13 | this program. If not, see .
14 |
15 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
16 | Basic manipulations specific to event streams
17 | """
18 |
19 | import numpy as np
20 | from tqdm import trange
21 | import cv2
22 |
23 | from bimvee.plotDvsContrast import getEventImage
24 | from bimvee.split import selectByBool
25 |
26 | '''
27 | removes events from pixels whose number of events is more than n * std above
28 | mean, where n is the 'threshold kwarg, with default value 3
29 | '''
30 | def removeHotPixels(inDict, **kwargs):
31 | # boilerplate to get down to dvs container
32 | if isinstance(inDict, list):
33 | return [removeHotPixels(inDictSingle, **kwargs)
34 | for inDictSingle in inDict]
35 | if not isinstance(inDict, dict):
36 | return inDict
37 | if 'ts' not in inDict:
38 | outDict = {}
39 | for key in inDict.keys():
40 | outDict[key] = removeHotPixels(inDict[key], **kwargs)
41 | return outDict
42 | # From this point onwards, it's a data-type container
43 | if 'pol' not in inDict:
44 | return
45 | # From this point onwards, it's a dvs container
46 | events = inDict
47 | eventImage = getEventImage(events, contrast=np.inf, polarised=False)
48 | contrast1d = eventImage.flatten()
49 | mean = np.mean(contrast1d)
50 | std = np.std(contrast1d)
51 | threshold = mean + kwargs.get('threshold', 3) * std
52 | (y, x) = np.where(eventImage > threshold)
53 | dimY = kwargs.get('dimY', events.get('dimY', events['y'].max() + 1))
54 | #dimX = kwargs.get('dimX', events.get('dimX', events['x'].max()))
55 | addrsToRemove = x * dimY + y
56 | eventAddrs = events['x'] * dimY + events['y']
57 | toKeep = np.logical_not(np.isin(eventAddrs, addrsToRemove))
58 | return selectByBool(events, toKeep)
59 |
60 | '''
61 | Iterates event by event, using a prevTs array to keep track of each pixel.
62 | From a brief trial, dissecting into different arrays,
63 | doing the filter and then merging again is quite a lot slower than this.
64 | '''
65 | def refractoryPeriod(inDict, refractoryPeriod=0.001, **kwargs):
66 | # boilerplate to get down to dvs container
67 | if isinstance(inDict, list):
68 | return [refractoryPeriod(inDictSingle, **kwargs)
69 | for inDictSingle in inDict]
70 | if not isinstance(inDict, dict):
71 | return inDict
72 | if 'ts' not in inDict:
73 | outDict = {}
74 | for key in inDict.keys():
75 | outDict[key] = refractoryPeriod(inDict[key], **kwargs)
76 | return outDict
77 | # From this point onwards, it's a data-type container
78 | if 'pol' not in inDict:
79 | return
80 | # From this point onwards, it's a dvs container
81 | events = inDict
82 | ts = events['ts']
83 | x = events['y']
84 | y = events['x']
85 | numEvents = len(ts)
86 | maxX = x.max()
87 | maxY = y.max()
88 | prevTs = np.zeros((maxY+1, maxX+1))
89 | toKeep = np.ones((numEvents), dtype=bool)
90 | for idx in trange(numEvents, leave=True, position=0):
91 | if ts[idx] >= prevTs[y[idx], x[idx]] + refractoryPeriod:
92 | prevTs[y[idx], x[idx]] = ts[idx]
93 | else:
94 | toKeep[idx] = False
95 | outDict = selectByBool(inDict, toKeep)
96 | return outDict
97 |
98 | '''
99 | if dict does not already have dimX and dimY fields, then add them, using the
100 | maximum value in each dimension.
101 | NOTE: This is a hap-hazard method which relies on pixels actually producing
102 | events; only to be used if camera dimension is not otherwise available.
103 | '''
104 | def findDims(inDict):
105 | if 'dimX' in inDict and 'dimY' in inDict:
106 | return inDict
107 | outDict = inDict.copy()
108 | if 'dimX' not in inDict:
109 | outDict['dimX'] = np.max(inDict['x']) + 1
110 | if 'dimY' not in inDict:
111 | outDict['dimY'] = np.max(inDict['y']) + 1
112 | return outDict
113 |
114 | '''
115 | Converts the underlying representation of address-events to a single ndarray
116 | n x 3, where the first col is x, the second col is y, and the third col is all
117 | ones; the dtype is float. The field is called 'xyh'.
118 | x and y fields become 1d views of the appropriate rows.
119 | '''
120 | def convertToHomogeneousCoords(inDict):
121 | outDict = inDict.copy()
122 | outDict['xyh'] = np.concatenate((
123 | inDict['x'][:, np.newaxis].astype(float),
124 | inDict['y'][:, np.newaxis].astype(float),
125 | np.ones((inDict['x'].shape[0], 1), dtype=float)
126 | ), axis=1)
127 | outDict['x'] = outDict['xyh'][:, 0]
128 | outDict['y'] = outDict['xyh'][:, 1]
129 | return outDict
130 |
131 | '''
132 | Uses opencv functions to create an undistortion map and undistort events.
133 | k is the intrinsic matrix; d is the distortion coefficients.
134 | Returns a new container with the events undistorted.
135 | By default, events get turned into float64; keep them as int16 with kwarg 'asInt'=True
136 | By default, it uses the same intrinsic matrix in the remapping - change this
137 | by passing in the kwarg 'kNew'
138 | '''
139 | def undistortEvents(inDict, k, d, **kwargs):
140 | inDict = findDims(inDict)
141 | kNew = kwargs.get('kNew', k)
142 | yGrid, xGrid = np.meshgrid(range(inDict['dimY']), range(inDict['dimX']))
143 | xGrid = xGrid.reshape(-1).astype(float)
144 | yGrid = yGrid.reshape(-1).astype(float)
145 | xyGrid = np.concatenate((xGrid[:, np.newaxis], yGrid[:, np.newaxis]), axis=1)
146 | undistortedPoints = cv2.undistortPoints(xyGrid, k, d, None, kNew)
147 | undistortionMap = undistortedPoints.reshape(inDict['dimX'], inDict['dimY'], 2)
148 | undistortionMap = np.swapaxes(undistortionMap, 0, 1)
149 | xy = undistortionMap[inDict['y'], inDict['x'], :]
150 | if kwargs.get('asInt', False):
151 | xy = np.round(xy).astype('int')
152 | outDict = inDict.copy()
153 | outDict['x'] = xy[:, 0]
154 | outDict['y'] = xy[:, 1]
155 | return outDict
--------------------------------------------------------------------------------
/bimvee/exportHdf5.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 |
16 | Using hickle to add hierarchical lists and dicts to hdf5 automatically
17 | https://github.com/telegraphic/hickle
18 | In fact, these are just thin wrappers around hickle.dump/load,
19 | to offer a similar export function to other export calls.
20 | """
21 |
22 | #%%
23 |
24 | import hickle
25 | import os
26 |
27 | def exportHdf5(data, exportFilePathAndName='./temp.hdf5', **kwargs):
28 | if exportFilePathAndName[-5:] != '.hdf5':
29 | exportFilePathAndName = exportFilePathAndName + '.hdf5'
30 | print('exportHdf5 called, targeting file path and name' + exportFilePathAndName)
31 | absPath = os.path.dirname(os.path.abspath(exportFilePathAndName))
32 | if not os.path.exists(absPath):
33 | os.mkdir(absPath)
34 | hickle.dump(data, exportFilePathAndName)
35 |
36 |
37 |
--------------------------------------------------------------------------------
/bimvee/exportPoseRpgEsimCsv.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Simeon Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | A function which exports a pose6q dataType dict as a csv for use by Rpg's eSim
16 | simulator (https://github.com/uzh-rpg/rpg_esim)
17 |
18 | Note:
19 | a) conversion of ts to ns
20 | b) switch of quaternion to qx, qy, qz, qw format
21 | """
22 |
23 |
24 | def exportPoseRpgEsimCsv(poseDict, filePathAndName='poses.csv'):
25 | ts = poseDict['ts']
26 | point = poseDict['point']
27 | rotation = poseDict['rotation']
28 | with open(filePathAndName, 'w') as file:
29 | file.write('# timestamp, x, y, z, qx, qy, qz, qw\n')
30 | for idx in range(ts.shape[0]):
31 | file.write('%f, %f, %f, %f, %f, %f, %f, %f\n' % (
32 | ts[idx] * 1000000000,
33 | point[idx, 0],
34 | point[idx, 1],
35 | point[idx, 2],
36 | rotation[idx, 1],
37 | rotation[idx, 2],
38 | rotation[idx, 3],
39 | rotation[idx, 0],
40 | ))
41 |
--------------------------------------------------------------------------------
/bimvee/exportRpgDvsRos.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Thu Sep 24 11:29:15 2020
4 |
5 | @author: sbamford
6 | code contributions from Henri Rebecq
7 | """
8 |
9 |
10 | import sys
11 | sys.path.append('/usr/local/lib/python2.7/site-packages')
12 | sys.path.append('/opt/ros/kinetic/lib')
13 | sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
14 | sys.path.append('/home/sim/catkin_ws/devel/lib/python2.7/dist-packages')
15 | import numpy as np
16 | #import cv2
17 | import rosbag
18 | #from dvs_msgs.msg import Event, EventArray
19 | #from sensor_msgs.msg import CameraInfo
20 | #from sensor_msgs.msg import Image
21 | #from sensor_msgs.msg import Imu
22 | #from geometry_msgs.msg import Vector3
23 | from geometry_msgs.msg import PoseStamped
24 | from cv_bridge import CvBridge #, CvBridgeError
25 | import rospy
26 | from tqdm import tqdm
27 |
28 | def exportPose6q(inDict, topicName, bagFile):
29 | point = inDict['point']
30 | rotation = inDict['rotation']
31 | for poseIdx in tqdm(range(inDict['ts'].shape[0])):
32 | poseMsg = PoseStamped()
33 | ts = rospy.Time(secs=inDict['ts'][poseIdx])
34 | poseMsg.header.stamp = ts
35 | poseMsg.pose.position.x = point[poseIdx, 0]
36 | poseMsg.pose.position.y = point[poseIdx, 1]
37 | poseMsg.pose.position.z = point[poseIdx, 2]
38 | poseMsg.pose.orientation.w = rotation[poseIdx, 0]
39 | poseMsg.pose.orientation.x = rotation[poseIdx, 1]
40 | poseMsg.pose.orientation.y = rotation[poseIdx, 2]
41 | poseMsg.pose.orientation.z = rotation[poseIdx, 3]
42 | bagFile.write(topic=topicName, msg=poseMsg, t=ts)
43 |
44 |
45 | def exportFrame(inDict, topicName, bagFile):
46 | bridge = CvBridge()
47 | for frameIdx in tqdm(range(inDict['ts'].shape[0])):
48 | img = inDict['frames'][frameIdx]
49 | assert img.dtype == np.uint8 # TODO: Handle type conversion here
50 | # For example by doing right shifts from 10-bit encoding to 8 bits: img = np.right_shift(img, 2)
51 | img = img.astype('uint8')
52 | ts = rospy.Time(secs=inDict['ts'][frameIdx])
53 | imgMsg = bridge.cv2_to_imgmsg(img, 'mono8')
54 | imgMsg.header.stamp = ts
55 | bagFile.write(topic=topicName, msg=imgMsg, t=ts)
56 |
57 | def exportRpgDvsRos(inDict, **kwargs):
58 | # Open bag
59 | exportFilePathAndName = kwargs.get('exportFilePathAndName', './')
60 | bagFile = rosbag.Bag(exportFilePathAndName, 'w')
61 |
62 | #Descend data hierarchy, looking for dataTypes to export
63 | for channelName in inDict['data']:
64 | for dataTypeName in inDict['data'][channelName]:
65 | dataTypeDict = inDict['data'][channelName][dataTypeName]
66 | topicName = '/' + channelName + '/' + dataTypeName
67 | if dataTypeName == 'pose6q':
68 | # For now, we'll make an arbitrary decision which ros message type to use
69 | exportPose6q(dataTypeDict, topicName, bagFile)
70 | elif dataTypeName == 'frame':
71 | # For now, we'll make an arbitrary decision which ros message type to use
72 | exportFrame(dataTypeDict, topicName, bagFile)
73 | else:
74 | print('Skipping dataType "' + dataTypeName + '" from channel "' + channelName + '"')
75 | bagFile.close()
76 |
77 | '''
78 | Legacy code from AedatTools repo
79 | #%% DVS
80 |
81 | # Put several events into an array in a single ros message, for efficiency
82 |
83 | if 'polarity' in aedat['data'] \
84 | and ('dataTypes' not in aedat['info'] or 'polarity' in aedat['info']['dataTypes']):
85 | legacy code by Henri Rebecq, from AedatTools repo
86 | countMsgs = 0
87 | numEventsPerArray = 25000 # Could be a parameter
88 | numEvents = aedat['data']['polarity']['numEvents']
89 | numArrays = - (- numEvents / numEventsPerArray) # The subtraction allows rounding up
90 |
91 | # Construct the event array object - a definition from rpg_dvs_ros
92 | # Use this repeatedly for each message
93 | eventArrayObject = EventArray()
94 | # The following properties don't change
95 | eventArrayObject.width = 240 # HARDCODED CONSTANT - RESOLVE ON A RAINY DAY
96 | eventArrayObject.height = 180 # HARDCODED CONSTANT - RESOLVE ON A RAINY DAY
97 | # Use the following object array repeatedly to construct the contents
98 | # of each ros message
99 | eventArray = np.empty(-(-numEventsPerArray), 'object')
100 | # Outer loop over arrays or ros messages
101 | for startPointer in range(0, numEvents, numEventsPerArray):
102 | countMsgs = countMsgs + 1
103 | print 'Writing event array message', countMsgs, 'of', numArrays, ' ...'
104 | endPointer = min(startPointer + numEventsPerArray, numEvents)
105 | # Break the data vectors out of the dict for efficiency,
106 | # but do this message by message to avoid memory problems
107 | arrayX = aedat['data']['polarity']['x'][startPointer : endPointer]
108 | arrayY = aedat['data']['polarity']['y'][startPointer : endPointer]
109 | arrayPolarity = aedat['data']['polarity']['polarity'][startPointer : endPointer]
110 | # Convert timestamps to seconds (ros, however, stores timestamps to ns precision)
111 | arrayTimeStamp = aedat['data']['polarity']['timeStamp'][startPointer : endPointer]/1000000.0
112 |
113 | # Iterate through all the events in the intended event array
114 | for eventIndex in range (0, endPointer - startPointer):
115 | # The Event object definition comes from rpg_dvs_ros
116 | e = Event()
117 | e.x = 239 - arrayX[eventIndex] # Flip X - I don't know why this is necessary
118 | e.y = arrayY[eventIndex]
119 | e.ts = rospy.Time(arrayTimeStamp[eventIndex])
120 | e.polarity = arrayPolarity[eventIndex]
121 | eventArray[eventIndex] = e;
122 | # The last array may be smaller than numEventsPerArray, so clip the object array
123 | if endPointer == numEvents:
124 | eventArray = eventArray[0 : endPointer - startPointer]
125 | # Assume that the ros message is sent at the time of the last event in the message
126 | eventArrayObject.header.stamp = e.ts
127 | eventArrayObject.events = eventArray
128 | bag.write(topic='/dvs/events', msg=eventArrayObject, t=e.ts)
129 |
130 | #%% IMU6
131 |
132 | # Put several events into an array in a single ros message, for efficiency
133 | if 'imu6' in aedat['data'] \
134 | and ('dataTypes' not in aedat['info'] or 'imu6' in aedat['info']['dataTypes']):
135 | # Break the IMU events out of the dict, for efficiency
136 | # Accel is imported as g; we want m/s^2
137 | arrayAccelX = aedat['data']['imu6']['accelX'] * 9.8
138 | arrayAccelY = aedat['data']['imu6']['accelY'] * 9.8
139 | arrayAccelZ = aedat['data']['imu6']['accelZ'] * 9.8
140 | # Angular velocity is imported as deg/s; we want rad/s
141 | arrayGyroX = aedat['data']['imu6']['gyroX'] * 0.01745
142 | arrayGyroY = aedat['data']['imu6']['gyroY'] * 0.01745
143 | arrayGyroZ = aedat['data']['imu6']['gyroZ'] * 0.01745
144 | # Convert timestamps to seconds (ros, however, stores timestamps to ns precision)
145 | arrayTimeStamp = aedat['data']['imu6']['timeStamp']/1000000.0
146 | numEvents = aedat['data']['imu6']['numEvents']
147 | # Use the following containers repeatedly during the export
148 | imuMsg = Imu()
149 | accel = Vector3()
150 | gyro = Vector3()
151 | # I guess these assignments only need to be made once
152 | imuMsg.linear_acceleration = accel
153 | imuMsg.angular_velocity = gyro
154 | for eventIndex in range(0, numEvents):
155 | imuMsg.header.stamp = rospy.Time(arrayTimeStamp[eventIndex])
156 | accel.x = arrayAccelX[eventIndex]
157 | accel.y = arrayAccelY[eventIndex]
158 | accel.z = arrayAccelZ[eventIndex]
159 | gyro.x = arrayGyroX[eventIndex]
160 | gyro.y = arrayGyroY[eventIndex]
161 | gyro.z = arrayGyroZ[eventIndex]
162 | bag.write(topic='/dvs/imu', msg=imuMsg, t=imuMsg.header.stamp)
163 | '''
164 |
--------------------------------------------------------------------------------
/bimvee/filter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) 2024 Event-driven Perception for Robotics
4 | Authors: Sim Bamford, Mohammadreza Koolani
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 |
16 | filter_spatiotemporal:
17 | Also known as a salt-and-pepper filter,
18 | lets an event pass if there has been a previous event
19 | within a certain spatio-temporal window.
20 | '''
21 |
22 | import numpy as np
23 |
24 | def filter_spatiotemporal_single(events, time_window=0.05, neighbourhood=1):
25 | xs = events['x']
26 | ys = events['y']
27 | ts = events['ts']
28 | pol = events['pol']
29 |
30 | offset_neg = neighbourhood
31 | offset_pos = neighbourhood + 1
32 |
33 | xs = xs + neighbourhood
34 | ys = ys + neighbourhood
35 | keep = np.zeros_like(ts, dtype=bool)
36 | last_ts = np.full((np.max(ys) + neighbourhood * 2,
37 | np.max(xs) + + neighbourhood * 2),
38 | -np.inf)
39 | for i, (x, y, t) in enumerate(zip(xs, ys, ts)):
40 | if t - last_ts[y, x] <= time_window:
41 | keep[i] = True
42 | last_ts[y - offset_neg : y + offset_pos,
43 | x - offset_neg : x + offset_pos] = t #update
44 | return {
45 | 'x' : events['x'][keep],
46 | 'y' : events['y'][keep],
47 | 'ts' : events['ts'][keep],
48 | 'pol' : events['pol'][keep],
49 | }
50 |
51 | def filter_spatiotemporal(in_dict, **kwargs):
52 | # check to see if this is dvs type:
53 | if (in_dict.get('data_type', '') == 'dvs'
54 | or ('x' in in_dict.keys()
55 | and 'y' in in_dict.keys()
56 | and 'pol' in in_dict.keys()
57 | and 'ts' in in_dict.keys())):
58 | return filter_spatiotemporal_single(in_dict, **kwargs)
59 | else:
60 | new_dict = {}
61 | for key, value in in_dict.items():
62 | if key == 'dvs':
63 | new_dict[key] = filter_spatiotemporal_single(value, **kwargs)
64 | elif type(value) == dict:
65 | new_dict[key] = filter_spatiotemporal(value, **kwargs)
66 | else:
67 | new_dict[key] = value
68 | return new_dict
69 |
--------------------------------------------------------------------------------
/bimvee/importAe.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | importAe is a function for importing timestamped address-event data, given a path
16 | (defaulting to the current directory, intended for the yarp format) or a file.
17 | If the file format is not stated, there is an attempt to determine this from the file.
18 | Then a sub-function is called, specialised for importing the data contained
19 | into the workspace. Depending on the format, additional data may also be imported,
20 | including frame data, imu samples, skin data, 2d or 3d coords etc etc.
21 |
22 | The output is a dictionary containing:
23 | - info
24 | - data
25 | The exact contents varies according to the file type import but in general:
26 | info: this is a dict which starts life as the kwargs passed in, and is
27 | augmented and modified by the actual contents of the file. It may include
28 | any informational fields in file headers. Minimally, it will contain:
29 | - filePathAndName
30 | - fileFormat
31 | data: this is a list of dicts, one for each sensor or "channel" which has
32 | been imported. Bear in mind that sub-functions may optionally split or join
33 | channels. Within each dict, there is a field for each type of data contained.
34 | A file for example may contain data from a several sensors, but a single sensor
35 | may produce polarity events ("pol"), aps samples ("aps"), imu samples etc.
36 | Within each of these fields, there is a dict, generally containing fields for
37 | each data column, so in the case of pol events, there are 4-5 fields:
38 | - ts
39 | - x
40 | - y
41 | - pol
42 | - optionally ch (channel)
43 | each containing a numpy array of the appropriate type for the data
44 | contained, where all these arrays will be of the same length.
45 |
46 | Aim is to support:
47 |
48 | YARP .log - ATIS Gen1 - 24 bit - includes, IMU, Vicon, (also SKIN?)
49 | rpg_dvs_ros - DVS/DAVIS
50 | Maybe others?
51 | jAER / cAER .aedat (v1/2/3) DVS / DAVIS / Cochlea?
52 | Samsung Gen3 VGA?
53 | Celex ...???
54 | """
55 |
56 | #%%
57 |
58 | import os
59 |
60 | # local imports
61 | from .timestamps import rezeroTimestampsForImportedDicts
62 |
63 | def getOrInsertDefault(inDict, arg, default):
64 | # get an arg from a dict.
65 | # If the the dict doesn't contain the arg, return the default,
66 | # and also insert the default into the dict
67 | value = inDict.get(arg, default)
68 | if value == default:
69 | inDict[arg] = default
70 | return value
71 |
72 |
73 | def importAe(**kwargs):
74 | print(kwargs)
75 | filePathOrName = getOrInsertDefault(kwargs, 'filePathOrName', '.')
76 | print(kwargs)
77 | if not os.path.exists(filePathOrName):
78 | raise FileNotFoundError("File or folder not found.")
79 | fileFormat = kwargs.get('fileFormat', '').lower()
80 | if not fileFormat:
81 | # Try to determine the file format
82 | if os.path.isdir(filePathOrName):
83 | # It's a path - it could contain yarp .log or frames
84 | listDir = os.listdir(filePathOrName)
85 | fileTypes = [subName.split(".")[-1] for subName in listDir]
86 | mostCommonFileType = max(set(fileTypes), key=fileTypes.count)
87 | if mostCommonFileType == 'log':
88 | kwargs['fileFormat'] = 'iityarp'
89 | elif mostCommonFileType in ['png', 'jpg', 'jpeg']:
90 | kwargs['fileFormat'] = 'frames'
91 | else:
92 | # recurse into this folder
93 | resultsList = []
94 | for subName in listDir:
95 | kwargs['filePathOrName'] = os.path.join(filePathOrName, subName)
96 | try:
97 | result = importAe(**kwargs)
98 | except ValueError:
99 | continue
100 | if isinstance(result, list):
101 | resultsList = resultsList + result
102 | else:
103 | resultsList.append(result)
104 | if len(resultsList) > 1 and \
105 | kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
106 | # Optional: start the timestamps at zero for the first event
107 | # This is done collectively for all the concurrent imports
108 | rezeroTimestampsForImportedDicts(resultsList)
109 | elif len(resultsList) == 1:
110 | resultsList = resultsList[0]
111 |
112 | return resultsList
113 | else:
114 | # Guess the file format based on file extension
115 | ext = os.path.splitext(filePathOrName)[1]
116 | if ext == '.dat' or ext == '.raw':
117 | kwargs['fileFormat'] = 'dat'
118 | elif ext == '.bag':
119 | kwargs['fileFormat'] = 'rosbag'
120 | elif ext == '.bin':
121 | kwargs['fileFormat'] = 'secdvs'
122 | elif ext == '.npy':
123 | kwargs['fileFormat'] = 'iitnpy'
124 | elif ext == '.aer2':
125 | kwargs['fileFormat'] = 'aer2'
126 | elif ext == '.hdf5':
127 | kwargs['fileFormat'] = 'hdf5'
128 | elif ext == '.log':
129 | kwargs['fileFormat'] = 'iit'
130 | elif ext == '.aerdat':
131 | kwargs['fileFormat'] = 'aerdat'
132 | elif ext == '.es':
133 | kwargs['fileFormat'] = 'es'
134 | # etc ...
135 | else:
136 | raise ValueError("The file format cannot be determined.")
137 | # Let the fileformat parameter dictate the file or folder format
138 | fileFormat = kwargs.get('fileFormat').lower()
139 | if fileFormat in ['iityarp', 'yarp', 'iit', 'log', 'yarplog']:
140 | if not os.path.isdir(kwargs['filePathOrName']):
141 | kwargs['filePathOrName'] = os.path.dirname(kwargs['filePathOrName'])
142 | from .importIitYarp import importIitYarp
143 | importedData = importIitYarp(**kwargs)
144 | elif fileFormat in ['rpgdvsros', 'rosbag', 'rpg', 'ros', 'bag', 'rpgdvs']:
145 | from .importRpgDvsRos import importRpgDvsRos
146 | importedData = importRpgDvsRos(**kwargs)
147 | elif fileFormat in ['iitnpy', 'npy', 'numpy']:
148 | try:
149 | from .importIitNumpy import importIitNumpy
150 | importedData = importIitNumpy(**kwargs)
151 | except ValueError:
152 | from .importInivationNumpy import importInivationNumpy
153 | importedData = importInivationNumpy(**kwargs)
154 | elif fileFormat in ['dat', 'raw']:
155 | from .importProph import importProph
156 | importedData = importProph(**kwargs)
157 | elif fileFormat in ['secdvs', 'bin', 'samsung', 'sec', 'gen3']:
158 | from .importSecDvs import importSecDvs
159 | importedData = importSecDvs(**kwargs)
160 | elif fileFormat in ['aer2']:
161 | from .importAer2 import importAer2
162 | importedData = importAer2(**kwargs)
163 | elif fileFormat in ['frame', 'frames', 'png', 'pngfolder', 'imagefolder']:
164 | from .importFrames import importFrames
165 | importedData = importFrames(**kwargs)
166 | elif fileFormat in ['hdf5', 'bimveehdf5']:
167 | from .importHdf5 import importHdf5
168 | importedData = importHdf5(**kwargs)
169 | elif fileFormat in ['aerdat']:
170 | from .importAerdat import importAerdat
171 | importedData = importAerdat(**kwargs)
172 | elif fileFormat in ['es']:
173 | from .importEs import importEs
174 | importedData = importEs(**kwargs)
175 | else:
176 | raise Exception("fileFormat: " + str(fileFormat) + " not supported.")
177 | #celex
178 | return importedData
--------------------------------------------------------------------------------
/bimvee/importAer2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | An example of 'aer2' format in the wild is:
16 |
17 | https://github.com/VLOGroup/dvs-reconstruction/blob/master/data/sample_data.tar.bz2
18 |
19 | It's a text file, no headers, where each event encodes one line, in the format:
20 |
21 | ts x y pol
22 |
23 | importAer2 opens such a file (pass with filePathOrName parameter)
24 | and returns a dict in this format:
25 | {'info': {},
26 | 'data': {
27 | ch0: {
28 | dvs: {
29 | 'ts': np.array of float in seconds
30 | 'x': np.array of np.uint16 in pixels
31 | 'y': np.array of np.uint16 in pixels
32 | 'pol': np.array of bool -- 1 = ON event
33 | }}}}
34 | """
35 |
36 | import numpy as np
37 | from tqdm import tqdm
38 |
39 | # Local imports
40 | from .timestamps import zeroTimestampsForADataType
41 |
42 | def inferDim(array):
43 | dimStops = np.array([32, 64, 128, 180, 240, 256, 260, 304, 320, 346, 480, 640, 720, 1080, 1280, 1920], dtype=np.uint16)
44 | idx = np.searchsorted(dimStops, np.max(array))
45 | try:
46 | return dimStops[idx]
47 | except IndexError:
48 | return np.max(array) + 1
49 |
50 | def importAer2(**kwargs):
51 | filePathOrName = kwargs['filePathOrName']
52 | print('Attempting to import ' + filePathOrName + ' as aer2')
53 | sizeOfArray = 1024
54 | ts = np.zeros((sizeOfArray), dtype=float)
55 | x = np.zeros((sizeOfArray), dtype=np.uint16)
56 | y = np.zeros((sizeOfArray), dtype=np.uint16)
57 | pol = np.zeros((sizeOfArray), dtype=bool)
58 |
59 | with open(filePathOrName, 'r') as file:
60 | for idx, line in enumerate(tqdm(file)):
61 | if idx == sizeOfArray:
62 | ts = np.concatenate((ts, np.zeros((sizeOfArray), dtype=float)))
63 | x = np.concatenate((x, np.zeros((sizeOfArray), dtype=np.uint16)))
64 | y = np.concatenate((y, np.zeros((sizeOfArray), dtype=np.uint16)))
65 | pol = np.concatenate((pol, np.zeros((sizeOfArray), dtype=bool)))
66 | sizeOfArray *= 2
67 | lineSplit = line.split()
68 | ts[idx] = float(lineSplit[0])
69 | x[idx] = int(lineSplit[1])
70 | y[idx] = int(lineSplit[2])
71 | pol[idx] = int(lineSplit[3])
72 | numEvents = idx + 1
73 | dvsDict = {'ts': ts[:numEvents] / 1000000,
74 | 'x': x[:numEvents],
75 | 'y': y[:numEvents],
76 | 'pol': pol[:numEvents],
77 | 'dimX': inferDim(x),
78 | 'dimY': inferDim(y)
79 | }
80 |
81 | if kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
82 | zeroTimestampsForADataType(dvsDict)
83 | outDict = {
84 | 'info': {'filePathOrName':filePathOrName,
85 | 'fileFormat': 'aer2'},
86 | 'data': {
87 | 'ch0': {
88 | 'dvs': dvsDict
89 | }
90 | }
91 | }
92 | print('Done.')
93 | return outDict
94 |
--------------------------------------------------------------------------------
/bimvee/importAerdat.py:
--------------------------------------------------------------------------------
1 | import struct
2 | import numpy as np
3 |
4 | def importAerdat(**kwargs):
5 | filePathOrName = kwargs['filePathOrName']
6 | with open(filePathOrName, 'rb') as f:
7 | content = f.read()
8 |
9 | ''' Packet format'''
10 | packet_format = 'BHHI' # pol = uchar, (x,y) = ushort, t = uint32
11 | packet_size = struct.calcsize('=' + packet_format) # 16 + 16 + 8 + 32 bits => 2 + 2 + 1 + 4 bytes => 9 bytes
12 | num_events = len(content) // packet_size
13 | extra_bits = len(content) % packet_size
14 |
15 | '''Remove Extra Bits'''
16 | if extra_bits:
17 | content = content[0:-extra_bits]
18 |
19 | ''' Unpacking'''
20 | event_list = list(struct.unpack('=' + packet_format * num_events, content))
21 |
22 | timestamps = np.array(event_list[3:][::4], dtype=float)
23 | x = np.array(event_list[2:][::4])
24 | y = np.array(event_list[1:][::4])
25 | pol = np.array(event_list[::4])
26 | tsOffset = timestamps[0]
27 | timestamps -= tsOffset
28 | timestamps /= 1e6
29 | out_dict = {'info': {'tsOffset': tsOffset,
30 | 'filePathOrName': filePathOrName,
31 | 'fileFormat': 'aerdat'},
32 | 'data': {
33 | 'ch0': {
34 | 'dvs': {
35 | 'ts': timestamps,
36 | 'x': x,
37 | 'y': y,
38 | 'pol': pol
39 | }}}}
40 | return out_dict
41 |
--------------------------------------------------------------------------------
/bimvee/importBoundingBoxes.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def importBoundingBoxes(**kwargs):
5 | bboxes = np.loadtxt(kwargs.get('filePathOrName'))
6 | if len(bboxes.shape) == 1:
7 | bboxes = np.expand_dims(bboxes,0)
8 | bboxes = bboxes[np.argsort(bboxes[:, 0])]
9 | outDict = {'ts': bboxes[:, 0],
10 | 'minY': bboxes[:, 1],
11 | 'minX': bboxes[:, 2],
12 | 'maxY': bboxes[:, 3],
13 | 'maxX': bboxes[:, 4],
14 | 'label': bboxes[:, 5],
15 | }
16 | return outDict
17 |
--------------------------------------------------------------------------------
/bimvee/importEs.py:
--------------------------------------------------------------------------------
1 | import loris
2 | import numpy as np
3 |
4 |
5 | def importEs(**kwargs):
6 | filePathOrName = kwargs['filePathOrName']
7 | data = loris.read_file(filePathOrName)
8 | ts = []
9 | xs = []
10 | ys = []
11 | ps = []
12 | for t, x, y, _, p in data['events']:
13 | ts.append(t)
14 | xs.append(x)
15 | ys.append(y)
16 | ps.append(p)
17 | timestamps = np.array(ts, dtype=float)
18 | x = np.array(xs)
19 | y = np.array(ys)
20 | pol = np.array(ps)
21 | tsOffset = timestamps[0]
22 | timestamps -= tsOffset
23 | timestamps /= 1e6
24 | out_dict = {'info': {'tsOffset': tsOffset,
25 | 'filePathOrName': filePathOrName,
26 | 'fileFormat': 'es'},
27 | 'data': {
28 | 'ch0': {
29 | 'dvs': {
30 | 'ts': timestamps,
31 | 'x': x,
32 | 'y': y,
33 | 'pol': pol
34 | }}}}
35 | return out_dict
36 |
--------------------------------------------------------------------------------
/bimvee/importExportBatches.py:
--------------------------------------------------------------------------------
1 | #%%
2 | #!/usr/bin/env python3
3 | # -*- coding: utf-8 -*-
4 | """
5 | @author: adinale
6 | Copyright (C) 2020 Event-driven Perception for Robotics
7 | Authors: Aiko Dinale
8 | Sim Bamford
9 | This program is free software: you can redistribute it and/or modify it under
10 | the terms of the GNU General Public License as published by the Free Software
11 | Foundation, either version 3 of the License, or (at your option) any later version.
12 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
14 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 | You should have received a copy of the GNU General Public License along with
16 | this program. If not, see .
17 |
18 | Part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
19 | importExportBatches... is for importing timestamped address-event data,
20 | and then exporting it again possibly in a different format.
21 | Intended for use with large files, it allows you to read in a certain amount
22 | of data in a batch, in order not to exceed the limits of working memory.
23 | At the time of writing there is only a function:
24 |
25 | - importExportBatchesIitYarp
26 |
27 | Ideally we will extend this to handle all formats.
28 |
29 | Use params numberBatches and importMaxBytes for batch control. the info field
30 | in the imported dict contains running info about the state of the import,
31 | but this in not encoded in the output file.
32 | """
33 |
34 | import os
35 | from .importIitYarp import importIitYarp
36 | from .exportIitYarp import exportIitYarp
37 | from .timestamps import offsetTimestampsForAContainer
38 |
39 | def importExportBatchesIitYarp(datasetPath, numberBatches, importMaxBytes = 1000000, tsBits = 30):
40 | """
41 | Import a binary dataset into batches, then export each single batch into YARP format.
42 |
43 | Args:
44 | datasetPath (string): path containing the dataset in binary format, i.e. binaryevents.log
45 | numberBatches (int): number of batches which the input dataset should be split into
46 | importMaxBytes (int, optional): maximum numbe of Bytes to be imported. Defaults to 1000000 (1 MB).
47 | """
48 | dataSizeBytes = os.path.getsize(datasetPath + "/binaryevents.log")
49 |
50 | numMegaByte = round(dataSizeBytes/importMaxBytes)
51 | sizeBatchMegaByte = round(numMegaByte/numberBatches)
52 | importedToByte = -1
53 |
54 | for idx in range(0, numberBatches):
55 | wrapOffset = 0
56 | for megaByteCounter in range(0, sizeBatchMegaByte):
57 |
58 | importedBatch = importIitYarp(filePathOrName = datasetPath,
59 | tsBits = tsBits,
60 | convertSamplesToImu = False,
61 | importFromByte = importedToByte + 1,
62 | importMaxBytes = importMaxBytes)
63 |
64 | if megaByteCounter == 0:
65 | # The first batch is treated differently
66 | exportIitYarp(importedBatch,
67 | exportFilePath = os.path.join(datasetPath + "_convertedBatch" + str(idx)),
68 | pathForPlayback = os.path.join(datasetPath + "_convertedBatch" + str(idx)),
69 | dataTypes = ['sample', 'dvs'],
70 | protectedWrite = False,
71 | writeMode = 'w')
72 | first_tsOffset = importedBatch['info']['tsOffsetFromData']
73 | previous_tsOffset = first_tsOffset
74 | else:
75 | # Offset timestamps in the second batch
76 | imported_tsOffset = importedBatch['info']['tsOffsetFromData']
77 | if imported_tsOffset > previous_tsOffset:
78 | wrapOffset += 2**tsBits * 0.08 / 1000000
79 |
80 | offsetToApplyToImportedBatch = first_tsOffset - imported_tsOffset + wrapOffset
81 | offsetTimestampsForAContainer(importedBatch, offsetToApplyToImportedBatch)
82 | importedBatch['info']['tsOffsetFromData'] += offsetToApplyToImportedBatch
83 |
84 | exportIitYarp(importedBatch,
85 | exportFilePath = os.path.join(datasetPath + "_convertedBatch" + str(idx)),
86 | pathForPlayback = os.path.join(datasetPath + "_convertedBatch" + str(idx)),
87 | dataTypes = ['sample', 'dvs'],
88 | protectedWrite = False,
89 | writeMode = 'a')
90 |
91 | previous_tsOffset = imported_tsOffset
92 |
93 | time = importedBatch['data']['right']['dvs']['ts'][-1]
94 | print("Time " + str(time))
95 |
96 | importedToByte = importedBatch['info']['importedToByte']
97 | print(str(megaByteCounter) + ": imported to Byte " + str(importedToByte) + "\n")
98 |
--------------------------------------------------------------------------------
/bimvee/importEyeTracking.py:
--------------------------------------------------------------------------------
1 | import json
2 | import numpy as np
3 |
4 | def importEyeTracking(**kwargs):
5 | with open(kwargs.get('filePathOrName')) as f:
6 | eyes = json.load(f)
7 | return {k: np.array([x[k] for x in eyes]) for k in eyes[0]}
8 |
--------------------------------------------------------------------------------
/bimvee/importFrames.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 |
16 | importFrames imports a folder full of (timestamped) frames saved as image files.
17 | It might make sense to generalise this using e.g. https://github.com/soft-matter/pims
18 | but on day one this imports frames in the format exported by:
19 | https://github.com/uzh-rpg/rpg_e2vid
20 | i.e. a folder full of png, also containing a 'timestamps.txt' file, with a timestamp
21 | in seconds written on each line.
22 |
23 | Returns a dict:
24 | {'info': {'filePathOrName': str},
25 | 'data': {
26 | channel0: {
27 | frame: {
28 | "ts": numpy array of float - seconds
29 | "frames": a list of numpy arrays where dim 0 = y (increasing downwards)
30 |
31 | """
32 |
33 | #%%
34 |
35 | import numpy as np
36 | import os
37 | from tqdm import tqdm
38 | import imageio
39 |
40 | # local imports
41 | from .timestamps import zeroTimestampsForAChannel, rezeroTimestampsForAnImportedDict
42 | from .importEyeTracking import importEyeTracking
43 |
44 | def getOrInsertDefault(inDict, arg, default):
45 | # get an arg from a dict.
46 | # If the the dict doesn't contain the arg, return the default,
47 | # and also insert the default into the dict
48 | value = inDict.get(arg, default)
49 | if value == default:
50 | inDict[arg] = default
51 | return value
52 |
53 | def importFrames(**kwargs):
54 | path = getOrInsertDefault(kwargs, 'filePathOrName', '.')
55 | print('importFrames trying path: ' + path)
56 | if not os.path.exists(path):
57 | raise FileNotFoundError("path not found.")
58 | if not os.path.isdir(path):
59 | raise FileNotFoundError("path is not a directory.")
60 | files = sorted(os.listdir(path))
61 | # TODO: trusting the os to sort the files may not work
62 | frames = []
63 | gt_available = False
64 | for file in tqdm(files):
65 | filePathAndName = os.path.join(path, file)
66 | if file == 'timestamps.txt': # todo: is there a more general form?
67 | ts = np.loadtxt(filePathAndName)
68 | elif file=='gt.json':
69 | eyes = importEyeTracking(filePathOrName=filePathAndName)
70 | gt_available = True
71 | elif os.path.splitext(filePathAndName)[-1] in ['.png', '.jpeg', '.jpg']:
72 | frames.append(imageio.imread(filePathAndName))
73 |
74 | channelDict = {'frame':
75 | {'ts': ts,
76 | 'frames': frames}}
77 | if gt_available:
78 | channelDict['eyeTracking'] = eyes
79 | if kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
80 | zeroTimestampsForAChannel(channelDict)
81 | importedDict = {
82 | 'info': kwargs,
83 | 'data': {'ch0': channelDict}
84 | }
85 | importedDict['info']['fileFormat'] = 'imagefolder'
86 | if kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
87 | rezeroTimestampsForAnImportedDict(importedDict)
88 | return importedDict
89 |
90 |
91 |
--------------------------------------------------------------------------------
/bimvee/importHdf5.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 |
16 | Using hickle to add hierarchical lists and dicts to hdf5 automatically
17 | https://github.com/telegraphic/hickle
18 | In fact, these are just thin wrappers around hickle.dump/load,
19 | to offer a similar export function to other export calls.
20 | """
21 | #%%
22 |
23 | import hickle
24 |
25 | def importHdf5(filePathOrName='./temp.hdf5', **kwargs):
26 | #TODO: Handle path with no filename
27 | if filePathOrName[-5:] != '.hdf5':
28 | filePathOrName = filePathOrName + '.hdf5'
29 | print('importHdf5 called, targeting file path and name' + filePathOrName)
30 | return hickle.load(filePathOrName)
--------------------------------------------------------------------------------
/bimvee/importIitNumpy.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Massimiliano Iacono
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | importIitNumpy imports timestamped address-event data, given a path to a .npy file.
16 | It serves the needs of a specific data format developed at IIT involving events,
17 | frames and bounding boxes.
18 | If available at the predetermined locations, relative to the npy file additional
19 | data may also be imported, such as frames and bounding boxes used to label the
20 | data. At the moment frames and boxes are stored in a data.log file using the
21 | YARP format (see importIitYarp as an example). TODO: generalize data structure
22 |
23 | The output is a dictionary containing:
24 | - info
25 | - data
26 | The exact contents varies according to the file type import but in general:
27 | info: this is a dict which starts life as the kwargs passed in, and is
28 | augmented and modified by the actual contents of the file. It may include
29 | any informational fields in file headers. Minimally, it will contain:
30 | - filePathAndName
31 | - fileFormat
32 | data: this is a list of dicts, one for each sensor or "channel" which has
33 | been imported. Bear in mind that sub-functions may optionally split or join
34 | channels. Within each dict, there is a field for each type of data contained.
35 | A file for example may contain data from a several sensors, but a single sensor
36 | may produce polarity events ("pol"), aps samples ("aps"), imu samples etc.
37 | Within each of these fields, there is a dict, generally containing fields for
38 | each data column, so in the case of pol events, there are 4-5 fields:
39 | - ts
40 | - x
41 | - y
42 | - pol
43 | - optionally ch (channel)
44 | each containing a numpy array of the appropriate type for the data
45 | contained, where all these arrays will be of the same length.
46 | Similarly bounding boxes are saved in the channel in a separate dictionary containing the following fields:
47 | - ts
48 | - minY
49 | - minX
50 | - maxY
51 | - maxX
52 | """
53 |
54 | import numpy as np
55 | import os
56 |
57 | def importIitNumpy(filePathOrName, **kwargs):
58 | outDict = {
59 | 'info': kwargs,
60 | 'data': {}
61 | }
62 | outDict['info']['filePathOrName'] = filePathOrName
63 |
64 | # Importing events
65 | events = np.load(filePathOrName)
66 |
67 | outDict['data']['labelledEvents'] = {}
68 | outDict['data']['labelledEvents']['dvs'] = {}
69 |
70 | tsOffset = events[0, 0]
71 | ts_to_sync = [events[:, 0]]
72 |
73 | outDict['data']['labelledEvents']['dvs']['ts'] = events[:, 0]
74 | outDict['data']['labelledEvents']['dvs']['x'] = events[:, 1].astype(int)
75 | outDict['data']['labelledEvents']['dvs']['y'] = events[:, 2].astype(int)
76 | outDict['data']['labelledEvents']['dvs']['pol'] = events[:, 3].astype(bool)
77 | outDict['data']['labelledEvents']['dvs']['dimX'] = 304
78 | outDict['data']['labelledEvents']['dvs']['dimY'] = 240
79 |
80 |
81 | # Importing bounding boxes for events
82 | gt_filename = os.path.join(os.path.dirname(filePathOrName), 'boxes.npy')
83 |
84 | if os.path.exists(gt_filename):
85 | b_boxes = np.load(gt_filename)
86 | outDict['data']['labelledEvents']['boundingBoxes'] = {}
87 |
88 | tsOffset = min(tsOffset, b_boxes[0, 0])
89 | ts_to_sync.append(b_boxes[:, 0])
90 | outDict['data']['labelledEvents']['boundingBoxes']['ts'] = b_boxes[:, 0]
91 | outDict['data']['labelledEvents']['boundingBoxes']['minY'] = b_boxes[:, 1]
92 | outDict['data']['labelledEvents']['boundingBoxes']['minX'] = b_boxes[:, 2]
93 | outDict['data']['labelledEvents']['boundingBoxes']['maxY'] = b_boxes[:, 3]
94 | outDict['data']['labelledEvents']['boundingBoxes']['maxX'] = b_boxes[:, 4]
95 | outDict['data']['labelledEvents']['boundingBoxes']['label'] = b_boxes[:, 5]
96 |
97 | # Importing frames
98 | framesPath = (os.path.join(os.path.dirname(filePathOrName), '../processed/frames_left')) # TODO make path argument
99 | if os.path.exists(framesPath):
100 | import re
101 | from imageio import imread
102 | pattern = re.compile('\d+ (\d+\.\d+) (.+\.\w+) \[rgb\]')
103 | with open(os.path.join(framesPath, 'data.log')) as f:
104 | content = f.read()
105 | found = np.array(pattern.findall(content))
106 |
107 | outDict['data']['labelledFrames'] = {}
108 | outDict['data']['labelledFrames']['frame'] = {}
109 |
110 | frames_ts = found[:, 0].astype(float)
111 | tsOffset = min(tsOffset, frames_ts[0])
112 | ts_to_sync.append(frames_ts)
113 |
114 | outDict['data']['labelledFrames']['frame']['ts'] = frames_ts
115 | outDict['data']['labelledFrames']['frame']['frames'] = [imread(os.path.join(framesPath, x)) for x in found[:, 1]]
116 |
117 |
118 | # Importing Bounding Boxes for frames
119 | framesPath = (os.path.join(os.path.dirname(filePathOrName), '../processed/gt_left')) # TODO make path argument
120 | if os.path.exists(framesPath):
121 | import re
122 | pattern = re.compile('\d+ (\d+\.\d+) (\d+\.\w+) (.*)') # TODO specific to yarp data format
123 | pattern2 = re.compile('\( (\d+ \d+ \d+ \d+) \) (\d+) (\d+\.\d+)')
124 | with open(os.path.join(framesPath, 'data.log')) as f:
125 | content = f.read()
126 | found = np.array(pattern.findall(content))
127 |
128 | boxes = []
129 | boxes_ts = []
130 | labels = []
131 |
132 | for ts, frame_name, bbs in found:
133 | for box, label, _ in pattern2.findall(bbs): # TODO third element is confidence score. not used at the moment
134 | boxes.append(box.split(' '))
135 | labels.append(label)
136 | boxes_ts.append(ts)
137 |
138 | boxes_ts = np.array(boxes_ts).astype(float)
139 | labels = np.array(labels).astype(int)
140 | boxes = np.array(boxes).astype(int)
141 |
142 | tsOffset = min(tsOffset, boxes_ts[0])
143 | ts_to_sync.append(boxes_ts)
144 | outDict['data']['labelledFrames']['boundingBoxes'] = {}
145 | outDict['data']['labelledFrames']['boundingBoxes']['ts'] = boxes_ts
146 | outDict['data']['labelledFrames']['boundingBoxes']['minY'] = boxes[:, 0]
147 | outDict['data']['labelledFrames']['boundingBoxes']['minX'] = boxes[:, 1]
148 | outDict['data']['labelledFrames']['boundingBoxes']['maxY'] = boxes[:, 2]
149 | outDict['data']['labelledFrames']['boundingBoxes']['maxX'] = boxes[:, 3]
150 | outDict['data']['labelledFrames']['boundingBoxes']['label'] = labels
151 |
152 | for x in ts_to_sync:
153 | x -= tsOffset
154 |
155 | return outDict
--------------------------------------------------------------------------------
/bimvee/importIitVicon.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Author: Suman Ghosh
5 | Sim Bamford
6 | This program is free software: you can redistribute it and/or modify it under
7 | the terms of the GNU General Public License as published by the Free Software
8 | Foundation, either version 3 of the License, or (at your option) any later version.
9 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 | You should have received a copy of the GNU General Public License along with
13 | this program. If not, see .
14 |
15 | Input:
16 | vicon data log recorded through yarp
17 |
18 | There are two output formats. If 'parameter' separateBodiesAsChannels is present
19 | and True then the format is:
20 | outDict = {
21 | 'info': {
22 | 'filePathOrName':
23 | 'uniqueIds': np.array of strings, one for each unique bodyId
24 | },
25 | 'data': {
26 | : {
27 | 'pose6q': {
28 | 'ts' : <1D array of timestamps>,
29 | 'point' : <2D array where each row has 3d position of a body at a time instant>,
30 | 'rotation' : <2D array where each row has rotation of a body at a time instant expressed as a quaternion (4d)>} } } }
31 | Otherwise:
32 | outDict = {
33 | 'info': {
34 | 'filePathOrName':
35 | },
36 | 'data': {
37 | 'vicon': {
38 | 'pose6q': {
39 | 'ts' : <1D array of timestamps>,
40 | 'point' : <2D array where each row has 3d position of a body at a time instant>,
41 | 'rotation' : <2D array where each row has rotation of a body at a time instant expressed as a quaternion (4d)>,
42 | 'bodyId' : <1D array where each row has the bodyId of the corresponding marker>,
43 | } } } }
44 |
45 | A bodyID is the name assigned by vicon to a marker (labeled / unlableled) or rigid body
46 | The pose consists of a point in the form [x, y, z]
47 | and a rotation as a quaternion [r_w, r_x, r_y, r_z] (Caution with alternative orderings here)
48 | The datatype is called 'pose6q', referring to the 6dof with rotation in quaternion form.
49 |
50 | Additionally, if separateBodiesAsChannels is not present or false,
51 | and the separateMarkersAndSegments parameter is present and True,
52 | then the data in the vicon channel is broken into two datatypes:
53 | ...
54 | 'vicon': {
55 | 'pose6q': {
56 | 'ts' : <1D array of timestamps>,
57 | 'point' : <2D array where each row has 3d position of a body at a time instant>,
58 | 'rotation' : <2D array where each row has the rotation of a body at a time instant expressed as a quaternion (4d)>,
59 | 'bodyId' : <1D array where each row has the bodyId of the corresponding marker>,
60 | }
61 | 'point3': {
62 | 'ts' : <1D array of timestamps>,
63 | 'point' : <2D array where each row has 3d position of a body at a time instant>,
64 | 'bodyId' : <1D array where each row has the bodyId of the corresponding marker>,
65 | } ...
66 |
67 | Known Issue: timestamps may not be monotonic in the raw data,
68 | and this function doesn't attempt to correct it.
69 | Timestamps are monotonic for a single segment, but not necessarily for multiple
70 | segments imported in the same container.
71 | """
72 |
73 | import os
74 | import re
75 | import numpy as np
76 |
77 | # local imports
78 | from .timestamps import zeroTimestampsForADataType
79 | from .split import splitByLabel, selectByBool
80 |
81 | def getOrInsertDefault(inDict, arg, default):
82 | # get an arg from a dict.
83 | # If the the dict doesn't contain the arg, return the default,
84 | # and also insert the default into the dict
85 | value = inDict.get(arg, default)
86 | if value == default:
87 | inDict[arg] = default
88 | return value
89 |
90 | # accepts a pose6q datatype dict; returns a channel dict containing pose6q and point3 datatypes
91 | def separateMarkersFromSegments(poseDict):
92 | isMarker = np.apply_along_axis(lambda x : 'Marker' in str(x[0]), 1, poseDict['bodyId'][..., np.newaxis])
93 | pointDict = selectByBool(poseDict, isMarker)
94 | poseDict = selectByBool(poseDict, ~isMarker)
95 | return {
96 | 'pose6q': poseDict,
97 | 'point3': pointDict}
98 |
99 | def importIitVicon(**kwargs):
100 | filePathOrName = kwargs.get('filePathOrName')
101 | # handle the case in which filename is not specified - iterate through files
102 | # in the current directory looking for data.log
103 | if filePathOrName is None:
104 | files = [file for file in os.listdir('.') if os.path.isfile(file)]
105 | for filename in files:
106 | if filename == 'data.log':
107 | kwargs['filePathOrName'] = filename
108 | return importIitVicon(**kwargs)
109 | print('No suitable file found')
110 | return None
111 | pattern = re.compile('(\d+) (\d+\.\d+) \((.*)\)')
112 | # yarpBottleTimes = []
113 | outDict = {'info': {'filePathOrName': filePathOrName}, 'data': {}}
114 | poseDict = {'ts': [], 'point': [], 'rotation': [], 'bodyId': []}
115 | with open(filePathOrName, 'r') as file:
116 | print('Found file to read')
117 | line = file.readline()
118 | while line:
119 | found = pattern.search(line.strip())
120 | # yarpBottleTimes.append(float(found.group(2)))
121 | viconData = found.group(3)
122 | bodies = viconData.split(') (')
123 | for body in bodies:
124 | elements = body.split(" ")
125 | bodyId = elements[1].strip('\"')
126 | ts = elements[2]
127 | point = elements[3:6]
128 | # Note: quaternion order is [w,x,y,z] - this is defined by yarp
129 | # IFrameTransform component, so ignore vicon documentation
130 | rotation = elements[6:]
131 | poseDict['ts'].append(ts)
132 | poseDict['point'].append(point)
133 | poseDict['rotation'].append(rotation)
134 | poseDict['bodyId'].append(bodyId)
135 | line = file.readline()
136 |
137 | # converting lists of strings to numpy arrays of objects
138 | poseDict['ts'] = np.array(poseDict['ts'], dtype=float)
139 | poseDict['point'] = np.array(poseDict['point'], dtype=float)
140 | poseDict['rotation'] = np.array(poseDict['rotation'], dtype=float)
141 | poseDict['bodyId'] = np.array(poseDict['bodyId'], dtype=object)
142 | if kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
143 | zeroTimestampsForADataType(poseDict)
144 | if kwargs.get('separateBodiesAsChannels', False):
145 | outDict['info']['uniqueIds'] = np.unique(poseDict['bodyId'])
146 | separatedBodies = splitByLabel(poseDict, 'bodyId')
147 | # The next line inserts the missing 'pose6q' dataType level into the hierarchy
148 | outDict['data'] = {bodyName: {'pose6q': bodyDict}
149 | for bodyName, bodyDict in zip(
150 | separatedBodies.keys(),
151 | separatedBodies.values())}
152 | elif kwargs.get('separateMarkersFromSegments', False):
153 | outDict['data']['vicon'] = separateMarkersFromSegments(poseDict)
154 | else:
155 | outDict['data']['vicon'] = {'pose6q': poseDict}
156 | return outDict
157 |
158 |
159 |
160 |
--------------------------------------------------------------------------------
/bimvee/importInivationNumpy.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Massimiliano Iacono
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | importInivationNumpy imports timestamped address-event data, given a path to a .npy file.
16 | It serves the needs of a specific data format developed at Inivation using the dv library
17 |
18 | The output is a dictionary containing:
19 | - info
20 | - data
21 | The exact contents varies according to the file type import but in general:
22 | info: this is a dict which starts life as the kwargs passed in, and is
23 | augmented and modified by the actual contents of the file. It may include
24 | any informational fields in file headers. Minimally, it will contain:
25 | - filePathAndName
26 | - fileFormat
27 | data: this is a list of dicts, one for each sensor or "channel" which has
28 | been imported. Bear in mind that sub-functions may optionally split or join
29 | channels. Within each dict, there is a field for each type of data contained.
30 | A file for example may contain data from a several sensors, but a single sensor
31 | may produce polarity events ("pol"), aps samples ("aps"), imu samples etc.
32 | Within each of these fields, there is a dict, generally containing fields for
33 | each data column, so in the case of pol events, there are 4-5 fields:
34 | - ts
35 | - x
36 | - y
37 | - pol
38 | - optionally ch (channel)
39 | each containing a numpy array of the appropriate type for the data
40 | contained, where all these arrays will be of the same length.
41 | Similarly bounding boxes are saved in the channel in a separate dictionary containing the following fields:
42 | - ts
43 | - minY
44 | - minX
45 | - maxY
46 | - maxX
47 | """
48 |
49 | import numpy as np
50 | import os
51 | import dv
52 |
53 | def importInivationNumpy(filePathOrName, **kwargs):
54 | outDict = {
55 | 'info': kwargs,
56 | 'data': {}
57 | }
58 | outDict['info']['filePathOrName'] = filePathOrName
59 |
60 | # Importing events
61 | events = np.load(filePathOrName, allow_pickle=True)
62 | if len(events.shape) > 1:
63 | events = events[0]
64 | outDict['data']['events'] = {}
65 | outDict['data']['events']['dvs'] = {}
66 |
67 | outDict['data']['events']['dvs']['ts'] = np.array([(e.timestamp - events[0].timestamp) * 1e-6 for e in events])
68 | outDict['data']['events']['dvs']['x'] = np.array([e.x for e in events])
69 | outDict['data']['events']['dvs']['y'] = np.array([e.y for e in events])
70 | outDict['data']['events']['dvs']['pol'] = np.array([e.polarity for e in events])
71 | outDict['data']['events']['dvs']['dimX'] = max(outDict['data']['events']['dvs']['x']) + 1
72 | outDict['data']['events']['dvs']['dimY'] = max(outDict['data']['events']['dvs']['y']) + 1
73 |
74 |
75 | return outDict
--------------------------------------------------------------------------------
/bimvee/importRpgDvsRos.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """https://github.com/event-driven-robotics/importRosbag
4 | Copyright (C) 2019 Event-driven Perception for Robotics
5 | Authors: Sim Bamford
6 | This program is free software: you can redistribute it and/or modify it under
7 | the terms of the GNU General Public License as published by the Free Software
8 | Foundation, either version 3 of the License, or (at your option) any later version.
9 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 | You should have received a copy of the GNU General Public License along with
13 | this program. If not, see .
14 |
15 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
16 | importRpgDvsRos uses the importRosbag submodule
17 | (original located at https://github.com/event-driven-robotics/importRosbag)
18 | to import a rosbag, containing a selected set of ros msg types.
19 | In particular, this supports the dvs_msgs/EventArray messages defined at:
20 | http://rpg.ifi.uzh.ch/davis_data.html
21 | It also supports some standard ros types:
22 | geometry_msgs/PoseStamped
23 | geometry_msgs/Transform
24 | geometry_msgs/TransformStamped
25 | geometry_msgs/TwistStamped
26 | sensor_msgs/CameraInfo, (calibration)
27 | sensor_msgs/Image
28 | sensor_msgs/Imu
29 | sensor_msgs/PointCloud2
30 | tf/tfMessage
31 | Furthermore there is support for:
32 | esim_msgs/OpticFlow
33 |
34 | It returns nested dicts of the form:
35 | {
36 | info
37 | data
38 | channelName
39 | dvs
40 | "pol": numpy array of bool
41 | "x": numpy array of uint16
42 | "y": numpy array of uint16
43 | "ts": numpy array of float - seconds (basic format is int with unit increments of 80 ns)
44 | frame ...
45 | imu ...
46 | etc ...
47 |
48 | It optionally reorders the resulting data to support legacy templates (kwarg=template)
49 | Here follow 2 example templates:
50 |
51 | template = {
52 | 'left': {
53 | 'dvs': '/davis/left/events',
54 | }, 'right': {
55 | 'dvs': '/davis/right/events',
56 | }
57 | }
58 |
59 | template = {
60 | 'ch0': {
61 | 'dvs': '/dvs/events',
62 | 'frame': '/dvs/image_raw',
63 | 'pose6q': '/optitrack/davis',
64 | 'cam': '/dvs/camera_info',
65 | 'imu': '/dvs/imu'
66 | }
67 | }
68 |
69 | If a template is supplied, any connections which are not named in the template are not imported but simply listed.
70 | If an empty template is supplied, then the contained types are printed out.
71 | Use this to inspect a new .bag file before defining the import template.
72 | Without any template, the default behaviour is to put any imported data into
73 | its own channel, named after the topic in the bag.
74 | """
75 |
76 | #%%
77 |
78 | import string
79 |
80 | # Local imports
81 | from .timestamps import zeroTimestampsForAChannel, rezeroTimestampsForImportedDicts
82 | from .importRosbag.importRosbag.importRosbag import importRosbag
83 |
84 | def bimveeTypeForRosbagType(rosbagType):
85 | rosbagType = rosbagType.replace('/','_')
86 | if rosbagType == 'dvs_msgs_EventArray': return 'dvs'
87 | if rosbagType == 'esim_msgs_OpticFlow': return 'flowMap'
88 | if rosbagType == 'geometry_msgs_PoseStamped': return 'pose6q'
89 | if rosbagType == 'geometry_msgs_Transform': return 'pose6q'
90 | if rosbagType == 'geometry_msgs_TransformStamped': return 'pose6q'
91 | if rosbagType == 'geometry_msgs_TwistStamped': return 'twist'
92 | if rosbagType == 'sensor_msgs_CameraInfo': return 'cam'
93 | if rosbagType == 'sensor_msgs_Image': return 'frame'
94 | if rosbagType == 'sensor_msgs_Imu': return 'imu'
95 | if rosbagType == 'sensor_msgs_PointCloud2': return 'point3'
96 | if rosbagType == 'sensor_msgs_Illuminance': return 'illuminance'
97 | if rosbagType == 'nav_msgs_Odometry': return 'pose6q'
98 | if rosbagType == 'tf_tfMessage': return 'pose6q'
99 | if rosbagType == 'vicon_Subject': return 'pose6q'
100 | return None
101 |
102 | def importRpgDvsRos(filePathOrName, **kwargs):
103 | template = kwargs.get('template')
104 | if template == {}: # Just list contents of bag without importing
105 | topics = importRosbag(filePathOrName=filePathOrName, listTopics=True, **kwargs)
106 | return
107 | topics = importRosbag(filePathOrName=filePathOrName, **kwargs)
108 | outDict = {
109 | 'info': kwargs,
110 | 'data': {}
111 | }
112 | outDict['info']['filePathOrName'] = filePathOrName
113 | if template is None:
114 | for topicLabel in topics.keys():
115 | rosbagType = topics[topicLabel].pop('rosbagType')
116 | bimveeType = bimveeTypeForRosbagType(rosbagType)
117 | if bimveeType is None:
118 | print('Actually, ' + topicLabel + ' has not been imported, because the rosbag message type ' + rosbagType + ' has not been recognised.')
119 | else:
120 | outDict['data'][topicLabel] = {bimveeType: topics[topicLabel]}
121 | else:
122 | # If we get to here then there is a template to parse
123 | # The template becomes the data branch of the importedDict
124 | for channelKey in template.keys():
125 | channelKeyStripped = str(channelKey).translate(str.maketrans('', '', string.punctuation))
126 | outDict['data'][channelKeyStripped] = {}
127 | for dataType in template[channelKey]:
128 | topicLabel = template[channelKey][dataType]
129 | topic = topics.pop(topicLabel)
130 | rosbagType = topic.pop('rosbagType')
131 | bimveeType = bimveeTypeForRosbagType(rosbagType)
132 | if bimveeType != dataType:
133 | print('dataType "', dataType, '" not correctly defined for topic: "', topicLabel, '"')
134 | else:
135 | outDict['data'][channelKeyStripped][dataType] = topic
136 | # Post processing
137 | if kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
138 | # Optional: start the timestamps at zero for the first event
139 | # This is done collectively for all the concurrent imports
140 | for channelKey in outDict['data'].keys():
141 | zeroTimestampsForAChannel(outDict['data'][channelKey])
142 | # jointly rezero for all channels
143 | rezeroTimestampsForImportedDicts(outDict)
144 | return outDict
145 |
--------------------------------------------------------------------------------
/bimvee/importSecDvs.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | Copyright (C) 2020 Event-driven Perception for Robotics
5 | Authors: Sim Bamford
6 | Ander Arriandiaga Laresgoiti
7 |
8 | This program is free software: you can redistribute it and/or modify it under
9 | the terms of the GNU General Public License as published by the Free Software
10 | Foundation, either version 3 of the License, or (at your option) any later version.
11 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
13 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
14 | You should have received a copy of the GNU General Public License along with
15 | this program. If not, see .
16 |
17 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
18 | importSecDvs opens a .bin file, assumed to contain encoded data from SecDvs gen3.
19 | Returns a dict in this format:
20 | {'info': {},
21 | 'data': {
22 | ch0: {
23 | dvs: {
24 | 'ts': np.array of float in seconds
25 | 'x': np.array of np.uint16 in pixels
26 | 'y': np.array of np.uint16 in pixels
27 | 'pol': np.array of bool -- 1 = ON event
28 | }}}}
29 | """
30 |
31 | import numpy as np
32 | from tqdm import tqdm
33 |
34 | # Local imports
35 | from .timestamps import unwrapTimestamps, zeroTimestampsForADataType
36 |
37 | def importSecDvs(**kwargs):
38 | filePathOrName = kwargs['filePathOrName']
39 | print('Attempting to import ' + filePathOrName + ' as secdvs')
40 | with open(filePathOrName, 'rb') as file:
41 | data = np.fromfile(file, dtype='>u4')
42 |
43 | print('Clipping any rows without column info...')
44 | # Hunt for the first col iteratively and cut data before this
45 | for idx, word in enumerate(data):
46 | if word & 0x4000000:
47 | break
48 | data = data[idx:]
49 |
50 | print('Building indices for word types ...')
51 | isTs = (data & 0x08000000).astype(bool) # Reference timestamp, i.e. tsMsb
52 | isCol = (data & 0x4000000).astype(bool) # X and tsLsb
53 | isRow = (data & 0x80000000).astype(bool) # y and pol
54 |
55 | print('Handling col data ...')
56 | # create an index which points data back to col
57 | numCol = np.count_nonzero(isCol)
58 | colIdsRange = np.arange(numCol)
59 | colIdsSparse = np.where(isCol)[0]
60 | colIdsNext = np.append(colIdsSparse[1:], len(data))
61 | colIdx = np.zeros_like(data)
62 | for rangeIdx, firstIdx, nextIdx in zip(colIdsRange, colIdsSparse, colIdsNext):
63 | colIdx[firstIdx:nextIdx] = rangeIdx
64 | # now we can isolate col
65 | col = data[isCol]
66 | # convert col data
67 | xByCol = (col & 0x000003FF).astype(np.uint16) # column Address
68 | tsSmallByCol = (col & 0x1FF800) >> 11
69 | # create col data vectors which match data
70 | x = xByCol[colIdx]
71 | tsSmall = tsSmallByCol[colIdx]
72 |
73 | print('Handling timestamp data ...')
74 |
75 | # from data extract the "start col" flag
76 | isStartCol = (data & 0x200000).astype(bool) & isCol
77 | # now, for each startCol, we want to search backwards through data for a ts
78 | # To do this, we find the "data" idx of start col, we create a set of tsIds
79 | # and then we search tsIds for each dataIdx
80 | tsIdsSparse = np.where(isTs)[0]
81 | # Actually find the tsMsb at this point, to avoid more indexing
82 | tsMsbSparse = (data[isTs] & 0x003FFFFF) << 10
83 | tsMsbSparse = np.insert(tsMsbSparse, 0, tsMsbSparse[0] - (1 << 10))
84 | # create an index which points data back to startCols
85 | startColIdsSparse = np.where(isStartCol)[0]
86 | # Search tsIds for each dataIdx
87 | tsForStartCol = np.zeros_like(startColIdsSparse)
88 | for idx, startColIdx in enumerate(startColIdsSparse):
89 | tsForStartCol[idx] = tsMsbSparse[np.searchsorted(tsIdsSparse, startColIdx)]
90 |
91 | # Now we have the ts(Large) for each startCol event. Now create a tsLarge
92 | # array which matches data; do this in just the same way as above for
93 | # tsSmall given col and colIdx
94 | # create an index which points data back to col
95 | numStartCol = np.count_nonzero(isStartCol)
96 | startColIdsRange = np.arange(numStartCol)
97 | startColIdsSparse = np.where(isStartCol)[0]
98 | startColIdsNext = np.append(startColIdsSparse[1:], len(data))
99 | startColIdx = np.zeros_like(data)
100 | for rangeIdx, firstIdx, nextIdx in zip(startColIdsRange, startColIdsSparse, startColIdsNext):
101 | startColIdx[firstIdx:nextIdx] = rangeIdx
102 | tsLarge = tsForStartCol[startColIdx]
103 |
104 | # Now we have tsLarge and tsSmall aligned by data,
105 | # we can sum these to give the full ts
106 | ts = tsLarge + tsSmall
107 |
108 | print('Handling row data ...')
109 |
110 | # Now we have x and ts for each row in data;
111 | # now select these just for group/row data
112 | x = x[isRow]
113 | ts = ts[isRow]
114 | data = data[isRow]
115 |
116 | # A major timewrap is possible, so handle unwrapping before the following
117 | # processing, which will mix up the timestamps
118 | # Noticing that for whatever reason, there can be discontinuities in the
119 | # minor timestamps, there is a first call to unwrap timestamps which
120 | # uses a wrapTime of 1024 us. This would introduce about 1 ms of delay
121 | # in the case of an actual major timewrap
122 | ts = unwrapTimestamps(ts, wrapTime=1024)
123 | ts = unwrapTimestamps(ts) / 1000000 # Convert to seconds in the same step
124 |
125 | # Break out addr and pol for each of the two groups
126 | pol1 = ((data & 0x00010000) >> 16).astype(bool)
127 | yLarge1 = ((data & 0x00FC0000) >> 15).astype(np.uint16) # grp1Address
128 | pol2 = ((data & 0x00020000) >> 17).astype(bool)
129 | grp2Offset = (data & 0x7C000000) >> 23
130 | yLarge2 = (grp2Offset + yLarge1).astype(np.uint16)
131 |
132 | # for each of the single bit indices, select events for each of the two groups
133 | grp1Events = data & 0xFF
134 | grp2Events = data & 0xFF00
135 | tsToConcatenate = []
136 | xToConcatenate = []
137 | yToConcatenate = []
138 | polToConcatenate = []
139 | for idx in tqdm(range(8)):
140 | #group 1
141 | grp1Bool = (grp1Events & (2 ** idx)).astype(bool)
142 | tsToConcatenate.append(ts[grp1Bool])
143 | xToConcatenate.append(x[grp1Bool])
144 | yToConcatenate.append(yLarge1[grp1Bool] + idx)
145 | polToConcatenate.append(pol1[grp1Bool])
146 | # group 2
147 | grp2Bool = (grp2Events & (2 ** (idx + 8))).astype(bool)
148 | tsToConcatenate.append(ts[grp2Bool])
149 | xToConcatenate.append(x[grp2Bool])
150 | yToConcatenate.append(yLarge2[grp2Bool] + idx)
151 | polToConcatenate.append(pol2[grp2Bool])
152 |
153 | print('Post-processing steps ...')
154 |
155 | # Concatenate the resulting arrays
156 | ts = np.concatenate(tsToConcatenate)
157 | x = np.concatenate(xToConcatenate)
158 | y = np.concatenate(yToConcatenate)
159 | pol = np.concatenate(polToConcatenate)
160 |
161 | # The above selection strategy mixed up the timestamps, so sort the events by ts
162 |
163 | ids = np.argsort(ts)
164 |
165 | ts = ts[ids]
166 | x = x[ids]
167 | y = y[ids]
168 | pol = pol[ids]
169 |
170 | # Invert polarity, in line with library-wide definition
171 | pol = ~pol
172 |
173 | dvsDict = {'ts': ts,
174 | 'x': x,
175 | 'y': y,
176 | 'pol': pol,
177 | }
178 |
179 | if kwargs.get('zeroTime', kwargs.get('zeroTimestamps', True)):
180 | zeroTimestampsForADataType(dvsDict)
181 | outDict = {
182 | 'info': {'filePathOrName':filePathOrName,
183 | 'fileFormat': 'secdvs'},
184 | 'data': {
185 | 'ch0': {
186 | 'dvs': dvsDict
187 | }
188 | }
189 | }
190 | print('Done.')
191 |
192 | return outDict
193 |
--------------------------------------------------------------------------------
/bimvee/importSkeleton.py:
--------------------------------------------------------------------------------
1 | import json
2 | import numpy as np
3 | import re
4 | import os
5 |
6 | def importSkeletonDataLog(filePath, keys):
7 | pattern = re.compile('\d* (\d*.\d*) SKLT \((.*)\)')
8 | with open(filePath) as f:
9 | content = f.readlines()
10 | data_dict = {k: [] for k in keys}
11 | timestamps = []
12 | for line in content:
13 | ts, data = pattern.findall(line)[0]
14 | data = np.array(data.split(' ')).astype(int).reshape(-1, 2)
15 | for d, label in zip(data, data_dict):
16 | data_dict[label].append(d)
17 | timestamps.append(ts)
18 | data_dict['ts'] = np.array(timestamps).astype(float)
19 | for d in data_dict:
20 | data_dict[d] = np.array(data_dict[d])
21 | return data_dict
22 |
23 | def importSkeleton(**kwargs):
24 | with open(kwargs.get('filePathOrName'), 'r') as f:
25 | data_dict = json.load(f)
26 | return importSkeletonDataLog(os.path.join(os.path.dirname(kwargs['filePathOrName']), data_dict['file']), data_dict['labels'])
27 |
--------------------------------------------------------------------------------
/bimvee/importUdsAedat.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | following https://github.com/jpdominguez/pyNAVIS/blob/master/src/pyNAVIS/loaders.py
6 | by Juan Pedro Dominguez
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | importUdsAedat is a function for importing timestamped address-event data, given
18 | a path or a file containing an aedat file in the format used by PyNavis.
19 | The output is a file-level container in bimvee format.
20 | In general this is a dict containing:
21 | - info
22 | - data
23 | info: will contain minimally:
24 | - filePathAndName
25 | - fileFormat
26 | data: this is a list of dicts, one for each sensor or "channel" which has
27 | been imported. Bear in mind that sub-functions may optionally split or join
28 | channels. Within each dict, there is a field for each type of data contained.
29 | Each of these fields is a dict containing a numpy array for timestamps 'ts'
30 | (one row per event), and a np array for each type of data, with a row for
31 | each of those timestamps.
32 | In particular there will be at least an 'ear' field, containing at least:
33 | ts (timestamp)
34 | freq (frequency channel, incresing addr -> decreasing frequency)
35 | and possibly:
36 | pol (polarity 0 -> positive; 1 -> negative)
37 | ch (channel: 0 -> left; 1 -> right)
38 | xso (Olive model: 0 -> MSO; 1 -> LSO)
39 | auditoryModel (auditory model: 0 -> cochlea; 1 -> superior olivary complex)
40 | itdNeuron (Addresses of Interaural-time-difference neurons)
41 |
42 | There are basic formats corresponding to Ini Aedat v1 and 2.
43 | Files don't necessarily contain headers to disambiguate these, therefore the
44 | user must specify in kwargs any deviations from the following assumptions:
45 | (loosely following https://github.com/jpdominguez/pyNAVIS/blob/master/src/pyNAVIS/main_settings.py)
46 | codec: 'Addr2Bytes' i.e. [address(2-bytes) timestamp(4-bytes)] default
47 | 'Addr4Bytes' i.e. [address(4-bytes) timestamp(4-bytes)]
48 | stereo = True
49 | splitByChannel = False (i.e. left vs right into separate channel dicts)
50 | numChannelBits: 5 (number of bits used for frequency channel addresses per sensor)
51 | tsTick: 1e-6 (i.e. an integer time increment corresponds to 1 microsecond)
52 | polarised: True (i.e. decode polarity of spikes)
53 | zeroTime: True (whether to offset timestamps so that they start with zero)
54 |
55 | The address is interpretted as: 0...0cf...fp where:
56 | p is the polarity bit and may or may not be present
57 | f are the frequency bits - there are ceil(log_2(numChannels))of these
58 | c is the channel bit - left vs right, and may or may not be present.
59 |
60 | """
61 |
62 | #%%
63 |
64 | import os
65 | import numpy as np
66 |
67 | # local imports
68 | from .timestamps import zeroTimestampsForADataType
69 | from .split import splitByLabel
70 |
71 | def importUdsAedat(**kwargs):
72 | filePathOrName = kwargs.get('filePathOrName')
73 | if not os.path.exists(filePathOrName):
74 | raise FileNotFoundError("File or folder not found.")
75 | # Move forward assuming that it's a single file
76 | # TODO: implement hierarchical descent through folders
77 | codec = kwargs.get('codec', 'addr4Bytes')
78 | addrSize = 4 if codec == 'addr4Bytes' else 2
79 | importFromByte = kwargs.get('importFromByte', 0)
80 | importMaxBytes = kwargs.get('importMaxBytes')
81 | headers = ''
82 | with open(filePathOrName, 'rb') as file:
83 | file.seek(importFromByte)
84 |
85 | ## Check header ##
86 | lt = file.readline()
87 | while lt and lt[0] == ord("#"):
88 | headers = headers + lt
89 | importFromByte += len(lt)
90 | lt = file.readline()
91 | file.seek(0, 2)
92 | eof = file.tell()
93 | # TODO: headers to info
94 |
95 | # Establish how far ahead to read
96 | if importMaxBytes is None:
97 | importMaxBytes = eof - importFromByte
98 | else:
99 | importMaxBytes -= np.mod(importMaxBytes, addrSize + 4) # Import a multiple of the word size
100 | # Check if importMaxBytes is "feasible" (not beyond the file size)?
101 | if eof > importFromByte + importMaxBytes:
102 | importMaxBytes = eof - importFromByte
103 |
104 | # Now Read from the start point
105 | file.seek(importFromByte)
106 | events = file.read(importMaxBytes)
107 |
108 | # Pass out where we got to
109 | if file.read(1) is None:
110 | kwargs['importedToByte'] = 'EOF'
111 | else:
112 | kwargs['importedToByte'] = file.tell() - 2
113 | # - 2 compensates for the small read-ahead just performed to test EOF
114 |
115 | # Convert to uint16 - it's the highest common denominator for the address format possibilities.
116 | # dt = np.dtype(int)
117 | #>>> dt = dt.newbyteorder('>')
118 | #>>> np.frombuffer(buf, dtype=dt)
119 |
120 | events = np.frombuffer(events, np.uint8)
121 | # The following is one way to recover big-endian order while still using np.frombuffer
122 | # TODO: feels like there should be a better way
123 | events = events.reshape((-1, addrSize + 4)).astype(np.uint32)
124 | tsBytes = events[:, -4:]
125 | ts = tsBytes[:, -1]
126 | for byteIdx in range(3):
127 | ts = ts + tsBytes[:, byteIdx] * 2 ** (8 * (4 - byteIdx))
128 | #ts = events[:, -2].astype(np.uint32) * 2**16 + events[:, -1]
129 | earDict = {'ts': ts}
130 | if addrSize == 4:
131 | addr = (events[:, 0] * 2**24 +
132 | events[:, 1] * 2**16 +
133 | events[:, 2] * 2**8 +
134 | events[:, 3])
135 | else:
136 | addr = (events[:, 0] * 2**8 +
137 | events[:, 1])
138 | if kwargs.get('polarised', ('polarized', True)):
139 | earDict['pol'] = bool_(addr & 0x01)
140 | addr = addr >> 1
141 | numChannelBits = kwargs.get('numChannelBits', 5)
142 | earDict['freq'] = np.uint16(addr & (2**numChannelBits -1))
143 | addr >>= numChannelBits
144 | if kwargs.get('stereo', True):
145 | earDict['ch'] = np.uint8(addr & 0x01)
146 | # 'itdNeuronIds': itdNeuronIds,
147 | # 'auditoryModel': auditoryModel,
148 | # 'xsoType': xsoType,
149 | # xsoType = np.uint8(addr & 0x01)
150 | # addr >>= 1
151 | # auditoryModel = np.uint8(addr & 0x01)
152 | # addr >>= 3
153 | # itdNeuronIds = np.uint8(addr & 0x7F)
154 | # addr >>= 10
155 | if kwargs.get('zeroTimestamps', True):
156 | zeroTimestampsForADataType(earDict) # TODO: This should return a new dict
157 | tsTick = kwargs.get('tsTick', 1e-6)
158 | earDict['ts'] = earDict['ts'].astype(float) * tsTick
159 | if kwargs.get('stereo', True) and kwargs.get('splitByChannel', True):
160 | channels = splitByLabel(earDict, 'ch')
161 | if 0 in channels.keys():
162 | channels['left'] = {'ear': channels.pop(0)}
163 | if 1 in channels.keys():
164 | channels['right'] = {'ear': channels.pop(1)}
165 | else:
166 | channels = {'general': {'ear': earDict}}
167 |
168 | if headers == '':
169 | kwargs['headers'] = headers
170 | outDict = {
171 | 'info': kwargs,
172 | 'data': channels
173 | }
174 | outDict['info']['fileFormat'] = 'iityarp'
175 | return outDict
176 |
--------------------------------------------------------------------------------
/bimvee/info.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | Contains general functions for text summaries of the contents of dicts which result
16 | from imports using importAe function
17 | """
18 |
19 | from math import log10, floor
20 | import numpy as np
21 | import pprint
22 | pp = pprint.PrettyPrinter(indent=12)
23 |
24 | # Round to 3 s.f.
25 | def sf3(x):
26 | if x and isinstance(x, (int, float, complex)) and not isinstance(x, bool):
27 | return round(x, -int(floor(log10(abs(x)))) + 2)
28 | else:
29 | return x
30 |
31 | def fieldMinMax(dataTypeDict, fieldName):
32 | if fieldName in dataTypeDict:
33 | field = dataTypeDict[fieldName]
34 | if type(field) == np.ndarray:
35 | if field.shape[0] > 1:
36 | # Handle 2D arrays, e.g. an array containing x, y, z in columns
37 | try:
38 | if len(field.shape) > 1:
39 | for dim1Idx in range(field.shape[1]):
40 | print(' ',
41 | sf3(np.min(field[:, dim1Idx])),
42 | ' >= ', fieldName, ' - col ', dim1Idx,
43 | ' >= ', sf3(np.max(field[:, dim1Idx])))
44 | else:
45 | print(' ',
46 | sf3(np.min(field)),
47 | ' >= ', fieldName,
48 | ' >= ', sf3(np.max(field)))
49 | except ValueError:
50 | print(' ', fieldName, ' contains data error!')
51 |
52 | def info(containers, **kwargs):
53 | if not isinstance(containers, list):
54 | containers = [containers]
55 | for container in containers:
56 | pp.pprint(container['info'])
57 | for channelName in container['data']:
58 | print(' Channel: ' + channelName)
59 | for dataType in container['data'][channelName]:
60 | print(' DataType: ' + dataType)
61 | dataTypeDict = container['data'][channelName][dataType]
62 | if 'ts' in dataTypeDict:
63 | print(' Num events: ', len(dataTypeDict['ts']))
64 | fieldMinMax(dataTypeDict, 'ts')
65 | if 'tsOffset' in dataTypeDict:
66 | print(' Ts offset: ', dataTypeDict['tsOffset'])
67 | for fieldName in dataTypeDict.keys():
68 | if fieldName not in ['ts', 'tsOffset']:
69 | fieldMinMax(dataTypeDict, fieldName)
70 | else:
71 | pp.pprint(dataTypeDict)
72 | print()
73 | print()
74 | def infoTs(containers, **kwargs):
75 | if not isinstance(containers, list):
76 | containers = [containers]
77 | for container in containers:
78 | print(container['info'])
79 | for channelName in container['data']:
80 | print(' Channel: ' + channelName)
81 | for dataType in container['data'][channelName]:
82 | print(' DataType: ' + dataType)
83 | dataTypeDict = container['data'][channelName][dataType]
84 | if 'ts' in dataTypeDict:
85 | fieldMinMax(dataTypeDict, 'ts')
86 | if 'tsOffset' in dataTypeDict:
87 | print(' Ts offset: ', dataTypeDict['tsOffset'])
88 |
89 | #%% Legacy function names
90 |
91 | def dict_keys_print(d, indent):
92 | print(' ' * (4 * indent - 2) + '{ ', end='')
93 | first = True
94 | for key, value in d.items():
95 | if first:
96 | print(str(key), end='')
97 | first = False
98 | else:
99 | print(' ' * 4 * indent + str(key), end='')
100 | if isinstance(value, dict):
101 | print(':')
102 | dict_keys_print(value, indent + 1)
103 | else:
104 | print(',')
105 | continue
106 | print(' ' * (4 * indent - 2) + '}')
107 |
108 | def infoForImportedDicts(container, **kwargs):
109 | info(container, **kwargs)
110 |
111 | def infoTsForImportedDicts(container, **kwargs):
112 | infoTs(container, **kwargs)
--------------------------------------------------------------------------------
/bimvee/player.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Jul 3 13:39:02 2024
4 |
5 | @author: sbamford
6 |
7 |
8 | RETARD: Recorded Event Time-synchronization Agnostic Representation Dataplayer (Thanks, ChatGpt)
9 |
10 | Given a container, create a player with one subwindow for each channel / data type
11 | """
12 |
13 | import numpy as np
14 | import matplotlib.pyplot as plt
15 | import matplotlib.animation as animation
16 | from matplotlib.widgets import Slider, Button
17 | from bimvee.plotDvsContrast import getEventImageForTimeRange
18 | import math
19 | import time
20 |
21 | class ViewerDvs():
22 |
23 | def __init__(self, events, ax):
24 | # let's assume that the container is directly a single dvs event container
25 | self.events = events
26 | self.ax = ax
27 | self.dimX = np.max(self.events['x']) + 1
28 | self.dimY = np.max(self.events['y']) + 1
29 | self.contrast = 1
30 | self.time_window = 0.03
31 | self.label = events.get('label', '')
32 |
33 | def update(self, target_time):
34 | # Remove previous data
35 | self.ax.clear()
36 |
37 | # This function should be pushed to a visualiser
38 | startTime = target_time - self.time_window / 2
39 | endTime = target_time + self.time_window / 2
40 | event_image = getEventImageForTimeRange(
41 | self.events,
42 | startTime=startTime,
43 | endTime=endTime,
44 | contrast=self.contrast,
45 | dimX=self.dimX,
46 | dimY=self.dimY,
47 | image_type='not_polarized')
48 | event_image += self.contrast
49 | self.ax.imshow(event_image, cmap='gray', vmin=0, vmax=self.contrast*2)
50 | self.ax.set_xticks([])
51 | self.ax.set_xticks([], minor=True)
52 | self.ax.set_yticks([])
53 | self.ax.set_yticks([], minor=True)
54 | self.ax.set_title(self.label)
55 |
56 | # TODO: This functionality, or some of it, may belong in the container class
57 | def get_dvs_data(container, label=[]):
58 | keys = container.keys()
59 | if ('ts' in keys
60 | and 'pol' in keys
61 | and 'x' in keys
62 | and 'y' in keys):
63 | container['label'] = '_'.join(label)
64 | return [container]
65 | # TODO: put stricter check here for data type
66 | else:
67 | dvs_dicts = []
68 | for key, value in container.items():
69 | if type(value) == dict:
70 | dvs_dicts = dvs_dicts + get_dvs_data(value, label + [str(key)])
71 | return dvs_dicts
72 |
73 |
74 | from math import log10, floor
75 | def round_to_1_sf(x):
76 | return round(x, -int(floor(log10(abs(x)))))
77 |
78 | class Player():
79 |
80 | # TODO: - there might be a global override for local controls like contrast, TBD
81 | is_playing = True
82 | interval_ms = 100 # time between animation frames
83 | viewers = []
84 |
85 | def __init__(self, container):
86 | self.fig = plt.figure()
87 | dvs_containers = get_dvs_data(container)
88 | num_containers = len(dvs_containers)
89 | num_cols = math.ceil(math.sqrt(num_containers))
90 | num_rows = math.ceil(num_containers / num_cols)
91 | x_min = 0
92 | y_min = 0.1
93 | x_extent = 1
94 | y_extent = 0.7
95 | x_spacing = 0.15 / (num_cols + 1)
96 | y_spacing = 0.15 / (num_rows + 1)
97 | x_extent_per_col = (x_extent - x_spacing) / num_cols
98 | y_extent_per_row = (y_extent - y_spacing) / num_rows
99 | self.last_time = time.time()
100 | self.speed = 1
101 |
102 | for idx, events in enumerate(dvs_containers):
103 | row_idx = math.floor(idx / num_cols)
104 | col_idx = idx % num_cols
105 | ax = self.fig.add_axes([
106 | x_min + x_spacing / 2 + col_idx / max((num_cols - 1), 1) * x_extent_per_col,
107 | y_min + y_spacing / 2 + row_idx / max((num_rows - 1), 1) * y_extent_per_row,
108 | x_extent_per_col - x_spacing / 2,
109 | y_extent_per_row - y_spacing / 2]) # xmin ymin x-extent y-extent
110 | viewer = ViewerDvs(events, ax)
111 | self.viewers.append(viewer)
112 |
113 | # recalculations here based on container(s)
114 | max_times = [events['ts'][-1] for events in dvs_containers]
115 | self.max_time = max(max_times)
116 |
117 | # Add controls
118 | self.ax_time_slider = self.fig.add_axes([0.2, 0.825, 0.7, 0.05]) # xmin ymin x-extent y-extent
119 | self.slider_time = Slider(
120 | ax=self.ax_time_slider,
121 | label='time',
122 | valmin=0,
123 | valmax=self.max_time,
124 | valinit=0)
125 |
126 | self.ax_speed_slider = self.fig.add_axes([0.2, 0.875, 0.7, 0.05]) # xmin ymin x-extent y-extent
127 | self.slider_speed = Slider(
128 | ax=self.ax_speed_slider,
129 | label='speed',
130 | valmin=-3,
131 | valmax=1,
132 | valinit=0)
133 |
134 | self.ax_window_slider = self.fig.add_axes([0.2, 0.925, 0.7, 0.05]) # xmin ymin x-extent y-extent
135 | self.slider_window = Slider(
136 | ax=self.ax_window_slider,
137 | label='time_window',
138 | valmin=-3,
139 | valmax=3,
140 | valinit=1.5)
141 |
142 | self.ax_button_play = self.fig.add_axes([0.05, 0.85, 0.05, 0.05]) # xmin ymin x-extent y-extent
143 | self.button_play = Button(self.ax_button_play, 'Pause') #, color="blue")
144 | self.button_play.on_clicked(self.toggle_play)
145 |
146 | self.slider_time.on_changed(self.update_viewers)
147 | self.slider_speed.on_changed(self.slider_speed_manual_control)
148 | self.slider_speed_manual_control(self.slider_speed.valinit)
149 | self.slider_window.on_changed(self.slider_window_manual_control)
150 | self.slider_window_manual_control(self.slider_window.valinit)
151 |
152 | #self.fig.canvas.mpl_connect('button_press_event', self.on_click)
153 |
154 | self.ani = animation.FuncAnimation(
155 | self.fig,
156 | self.slider_time_autoplay,
157 | interval=self.interval_ms,
158 | cache_frame_data=False) #TODO - could cahcing helpo?
159 |
160 | self.update_viewers(0)
161 |
162 | def toggle_play(self, val):
163 | if self.is_playing:
164 | self.is_playing = False
165 | self.button_play.label.set_text('Play')
166 | else:
167 | self.last_time = time.time()
168 | self.is_playing = True
169 | self.button_play.label.set_text('Pause')
170 |
171 | # update a single plot pane - this should be pushed to a visualiser subclass
172 | def update_viewers(self, val):
173 | # call for each viewer
174 | target_time = self.slider_time.val
175 | for viewer in self.viewers:
176 | viewer.update(target_time)
177 |
178 | def slider_speed_manual_control(self, val):
179 | self.speed = round_to_1_sf(10 ** val)
180 | self.slider_speed.valtext.set_text(self.speed)
181 |
182 | def slider_window_manual_control(self, val):
183 | self.time_window = round_to_1_sf(10 ** val) / 1000
184 | for viewer in self.viewers:
185 | viewer.time_window = self.time_window
186 | self.slider_window.valtext.set_text(self.time_window)
187 |
188 | def slider_time_autoplay(self, _):
189 | if self.is_playing:
190 | interval = (time.time() - self.last_time) * self.speed
191 | self.last_time = time.time()
192 | new_time = (self.slider_time.val + interval) % self.slider_time.valmax
193 | self.slider_time.set_val(new_time)
194 |
195 |
--------------------------------------------------------------------------------
/bimvee/plot.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | Takes a dict as imported by importAe, and for each channel,
16 | tries to run the appropriate general visualisation function
17 | """
18 |
19 | # local imports
20 | from .plotDvsContrast import plotDvsContrast
21 | from .plotEventRate import plotEventRate
22 | from .plotFrame import plotFrame
23 | from .plotPose import plotPose
24 | from .plotImu import plotImu
25 | from .plotFlow import plotFlow
26 | from .container import Container
27 |
28 | def plot(inDict, **kwargs):
29 | if isinstance(inDict, Container):
30 | inDict = inDict.container
31 |
32 | if isinstance(inDict, list):
33 | for inDictInst in inDict:
34 | plot(inDictInst, **kwargs)
35 | return
36 | if isinstance(inDict, dict):
37 | if 'info' in inDict: # Special handling for a file-level container
38 | kwargs['title'] = inDict['info'].get('filePathOrName', '')
39 | plot(inDict['data'], **kwargs)
40 | else:
41 | title = kwargs.get('title', '')
42 | for keyName in inDict.keys():
43 | if isinstance(inDict[keyName], dict):
44 | if keyName == 'dvs':
45 | plotDvsContrast(inDict[keyName], **kwargs)
46 | plotEventRate(inDict[keyName], **kwargs)
47 | elif keyName == 'frame':
48 | plotFrame(inDict[keyName], **kwargs)
49 | elif keyName == 'imu':
50 | plotImu(inDict[keyName], **kwargs)
51 | elif keyName == 'pose6q':
52 | plotPose(inDict[keyName], **kwargs)
53 | elif keyName == 'point3' in channel:
54 | plotPose(inDict[keyName], **kwargs)
55 | elif keyName == 'flow' in channel:
56 | plotFlow(inDict[keyName], **kwargs)
57 | else:
58 | kwargs['title'] = (title + '-' + keyName).lstrip('-')
59 | plot(inDict[keyName], **kwargs)
60 |
--------------------------------------------------------------------------------
/bimvee/plotDvsLastTs.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | plotDvsLastTs takes a dict containing address-event data.
16 | Minimally, there must be x, y, and ts fields.
17 | For a given "time" (or a list of times, in which case subplots),
18 | creates a plot in which pixels are ordered according to their
19 | last time stamp, and the order is represented by colour from red (recent)
20 | to blue (oldest) through the spectrum.
21 |
22 | Parameters which can be used:
23 | - time
24 | - minX
25 | - minY
26 | - maxX
27 | - maxY
28 | - flipVertical, flipHorizontal, transpose (applied in this order)
29 | - title
30 | - axis (to plot on; if not passed in, a new figure is created)
31 | - cmap
32 | '''
33 |
34 | import numpy as np
35 | import matplotlib.pyplot as plt
36 | from math import log10, floor
37 | import matplotlib.colors as colors
38 |
39 | # Optional import of rankdata method from scipy
40 | try:
41 | from scipy.stats import rankdata
42 | except ModuleNotFoundError:
43 | # Stripped down version of implementation from scipy
44 | def rankdata(a, method='average'):
45 | arr = np.ravel(np.asarray(a))
46 | algo = 'mergesort' if method == 'ordinal' else 'quicksort'
47 | sorter = np.argsort(arr, kind=algo)
48 | inv = np.empty(sorter.size, dtype=intp)
49 | inv[sorter] = np.arange(sorter.size, dtype=intp)
50 | if method == 'ordinal':
51 | return inv + 1
52 | arr = arr[sorter]
53 | obs = np.r_[True, arr[1:] != arr[:-1]]
54 | dense = obs.cumsum()[inv]
55 | # cumulative counts of each unique value
56 | count = np.r_[np.nonzero(obs)[0], len(obs)]
57 | # average method
58 | return .5 * (count[dense] + count[dense - 1] + 1)
59 |
60 | def roundToSf(x, sig=3): # https://stackoverflow.com/questions/3410976/how-to-round-a-number-to-significant-figures-in-python
61 | try:
62 | return round(x, sig-int(floor(log10(abs(x))))-1)
63 | except ValueError: # log of zero
64 | return 0
65 |
66 | def plotDvsLastTsSingle(inDict, **kwargs):
67 |
68 | time = kwargs.get('time', kwargs.get('maxTime', kwargs.get('lastTime',
69 | kwargs.get('ts', kwargs.get('maxTs', kwargs.get('lastTs',
70 | np.max(inDict['ts'])))))))
71 | minTime = kwargs.get('minTime', kwargs.get('firstTime',
72 | kwargs.get('minTs', kwargs.get('firstTs',
73 | np.min(inDict['ts'])))))
74 |
75 | # TODO: if the actual sensor size is known, use this instead of the following
76 | minY = kwargs.get('minY',inDict['y'].min())
77 | maxY = kwargs.get('maxY',inDict['y'].max())
78 | minX = kwargs.get('minX',inDict['x'].min())
79 | maxX = kwargs.get('maxX',inDict['x'].max())
80 | sizeX = maxX - minX + 1
81 | sizeY = maxY - minY + 1
82 | tsArray = np.ones((sizeY, sizeX), dtype=float) * -1
83 |
84 | # populate the array by running time forward to time
85 | chosenLogical = inDict['ts'] <= time
86 | chosenLogical &= inDict['ts'] >= minTime
87 | chosenLogical &= inDict['x'] >= minX
88 | chosenLogical &= inDict['x'] <= maxX
89 | chosenLogical &= inDict['y'] >= minY
90 | chosenLogical &= inDict['y'] <= maxY
91 | xChosen = inDict['x'][chosenLogical] - minX
92 | yChosen = inDict['y'][chosenLogical] - minY
93 | tsChosen = inDict['ts'][chosenLogical]
94 | for x, y, ts in zip(xChosen, yChosen, tsChosen):
95 | tsArray[y, x] = ts
96 |
97 | ordinal = kwargs.get('ordinal', False)
98 | if ordinal:
99 | tsArrayFlattened = tsArray.flatten()
100 | tsOrdinal = rankdata(tsArrayFlattened, method='dense') - 1 # min rank is 1
101 | if np.any(tsArrayFlattened == -1):
102 | tsOrdinal -= 1 # If there are unset timestamps, they will have rank 0 - push these to -1
103 | tsArray = tsOrdinal.reshape(tsArray.shape)
104 | else:
105 | pass # TODO: cardinal
106 | axes = kwargs.get('axes')
107 | if axes is None:
108 | fig, axes = plt.subplots()
109 | kwargs['axes'] = axes
110 |
111 | transpose = kwargs.get('transpose', False)
112 | if transpose:
113 | tsArray = np.transpose(tsArray)
114 | cmap = plt.get_cmap(kwargs.get('cmap', 'jet'))
115 | cmap.set_under(color='white')
116 | image = axes.imshow(tsArray, cmap=cmap, norm=colors.Normalize(vmin=0, vmax=np.max(tsArray)))
117 | axes.set_aspect('equal', adjustable='box')
118 | axes.grid(b=False)
119 | if kwargs.get('flipVertical', False):
120 | axes.invert_yaxis()
121 | if kwargs.get('flipHorizontal', False):
122 | axes.invert_xaxis()
123 | title = kwargs.get('title')
124 | if title is not None:
125 | axes.set_title(title)
126 | axes.set_title(str(roundToSf(minTime)) + ' - ' + str(roundToSf(time)) + ' s')
127 |
128 | callback = kwargs.get('callback')
129 | if callback is not None:
130 | callback(tsArray=tsArray, **kwargs)
131 |
132 | return image
133 |
134 | def plotDvsLastTs(inDict, **kwargs):
135 | # Boilerplate for descending higher level containers
136 | if isinstance(inDict, list):
137 | for inDictInst in inDict:
138 | plotDvsLastTs(inDictInst, **kwargs)
139 | return
140 | if 'info' in inDict: # Top level container
141 | fileName = inDict['info'].get('filePathOrName', '')
142 | print('plotDvsContrast was called for file ' + fileName)
143 | if not inDict['data']:
144 | print('The import contains no data.')
145 | return
146 | for channelName in inDict['data']:
147 | channelData = inDict['data'][channelName]
148 | if 'dvs' in channelData and len(channelData['dvs']['ts']) > 0:
149 | kwargs['title'] = ' '.join([fileName, str(channelName)])
150 | plotDvsLastTs(channelData['dvs'], **kwargs)
151 | else:
152 | print('Channel ' + channelName + ' skipped because it contains no polarity data')
153 | return
154 | times = kwargs.get('time', kwargs.get('maxTime', kwargs.get('lastTime',
155 | kwargs.get('ts', kwargs.get('maxTs', kwargs.get('lastTs',
156 | np.max(inDict['ts'])))))))
157 | minTimes = kwargs.get('minTime', kwargs.get('firstTime',
158 | kwargs.get('minTs', kwargs.get('firstTs',
159 | np.min(inDict['ts'])))))
160 | if np.isscalar(times):
161 | times = [times]
162 | minTimes = [minTimes]
163 | numPlots = len(times)
164 | numPlotsX = int(round(np.sqrt(numPlots / 3 * 4)))
165 | numPlotsY = int(np.ceil(numPlots / numPlotsX))
166 | fig, allAxes = plt.subplots(numPlotsY, numPlotsX)
167 | if numPlots == 1:
168 | allAxes = [allAxes]
169 | else:
170 | allAxes = allAxes.flatten()
171 | fig.suptitle(kwargs.get('title', ''))
172 | for time, minTime, axes in zip(times, minTimes, allAxes):
173 | kwargs['time'] = time
174 | kwargs['minTime'] = minTime
175 | kwargs['axes'] = axes
176 | plotDvsLastTsSingle(inDict, **kwargs)
177 |
--------------------------------------------------------------------------------
/bimvee/plotDvsSpaceTime.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | plotDvsSpaceTime takes 'inDict' - a dict containing an imported ae file,
16 | as created by importAe, crops it spatially and temporally according to any kwargs
17 | such as maxTim etc
18 | and creates a point cloud viewer containing
19 |
20 | ...
21 | '''
22 |
23 | import numpy as np
24 | import pptk
25 |
26 | from .split import cropSpaceTime
27 |
28 | def plotDvsSpaceTime(inDicts, **kwargs):
29 | if isinstance(inDicts, list):
30 | for inDict in inDicts:
31 | plotDvsSpaceTime(inDict, **kwargs)
32 | return
33 | else:
34 | inDict = inDicts
35 | if not isinstance(inDict, dict):
36 | return
37 | if 'ts' not in inDict:
38 | #title = kwargs.pop('title', '')
39 | if 'info' in inDict and isinstance(inDict, dict):
40 | fileName = inDict['info'].get('filePathOrName')
41 | if fileName is not None:
42 | print('plotDvsContrast was called for file ' + fileName)
43 | #title = (title + ' ' + fileName).lstrip()
44 | for key in inDict.keys():
45 | # kwargs['title'] = (title + ' ' + key).lstrip()
46 | plotDvsSpaceTime(inDict[key], **kwargs)
47 | return
48 | # From this point onwards, it's a data-type container
49 | if 'pol' not in inDict:
50 | return
51 | # From this point onwards, it's a dvs container
52 |
53 | inDict = cropSpaceTime(inDict, **kwargs)
54 |
55 | # scale x and y to match time range
56 | timeRange = inDict['ts'][-1] - inDict['ts'][0]
57 | spatialDim = max(max(inDict['x']), max(inDict['y']))
58 | scalingFactor = timeRange / spatialDim
59 | events = np.concatenate((inDict['x'][:, np.newaxis] * scalingFactor,
60 | inDict['y'][:, np.newaxis] * scalingFactor,
61 | inDict['ts'][:, np.newaxis]), axis=1)
62 | pptkViewer = pptk.viewer(events)
63 | return pptkViewer
64 |
--------------------------------------------------------------------------------
/bimvee/plotEventRate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | plotEventRate takes 'inDict' - a dict containing an imported ae file,
16 | as created by importAe, and creates a series of plots of event rates.
17 | It creates one plot for each dataType dct contianing a 'ts' field.
18 |
19 | Parameters which can be used:
20 | - min/maxTime
21 | - min/maxX/Y
22 | - flipVertical/Horizontal
23 | - transpose
24 | '''
25 |
26 | import numpy as np
27 | import matplotlib.pyplot as plt
28 | from math import log10, floor
29 |
30 | def roundToSf(x, sig=3):
31 | try:
32 | return round(x, sig-int(floor(log10(abs(x))))-1)
33 | except ValueError: # log of zero
34 | return 0
35 |
36 | '''
37 | Descend a container hierarchically, looking for 'ts' fields to work on
38 | '''
39 |
40 | def plotEventRate(inDicts, **kwargs):
41 | if isinstance(inDicts, list):
42 | for inDict in inDicts:
43 | plotEventRate(inDict, **kwargs)
44 | return
45 | else:
46 | inDict = inDicts
47 | if not isinstance(inDict, dict):
48 | return
49 | if 'ts' not in inDict:
50 | title = kwargs.pop('title', '')
51 | if 'info' in inDict and isinstance(inDict, dict):
52 | fileName = inDict['info'].get('filePathOrName')
53 | if fileName is not None:
54 | print('plotEventRate was called for file ' + fileName)
55 | title = (title + ' ' + fileName).lstrip()
56 | for key in inDict.keys():
57 | kwargs['title'] = (title + ' ' + key).lstrip()
58 | plotEventRate(inDict[key], **kwargs)
59 | else: # It's a data-type container
60 | # Break out data array for cleaner code
61 | ts = inDict['ts']
62 | if ts.shape[0] == 0:
63 | return
64 | if ts.shape[0] == 1:
65 | title = kwargs.get('title', 'this container')
66 | print('Only one event in ' + title)
67 | return
68 | startTime = kwargs.get('startTime', kwargs.get('minTime', kwargs.get('firstTime', np.min(ts))))
69 | endTime = kwargs.get('endTime', kwargs.get('maxTime', kwargs.get('lastTime', np.max(ts))))
70 | freqs = kwargs.get('freqs')
71 | if freqs is None:
72 | periods = kwargs.get('periods', [0.001, 0.01, 0.1, 1])
73 | else:
74 | periods = [1/f for f in freqs]
75 | axes = kwargs.get('axes')
76 | if axes is None:
77 | fig, axes = plt.subplots()
78 | kwargs['axes'] = axes
79 | legend = kwargs.get('legend', [])
80 | for period in periods:
81 | endTimes = [t + period for t in np.arange(startTime, endTime, period)]
82 | midTimes = [t - period / 2 for t in endTimes]
83 | endIds = [np.searchsorted(ts, t) for t in endTimes]
84 | counts = [end-start for start, end in zip(endIds[:-1], endIds[1:])]
85 | counts.insert(0, endIds[0])
86 | rates = [count/period for count in counts]
87 | if kwargs.get('perPixel', False):
88 | minX = inDict.get('minX', kwargs.get('minX', min(inDict['x'])))
89 | maxX = inDict.get('maxX', kwargs.get('maxX', max(inDict['x'])))
90 | minY = inDict.get('minY', kwargs.get('minY', min(inDict['y'])))
91 | maxY = inDict.get('maxY', kwargs.get('maxY', max(inDict['y'])))
92 | numPixels = (maxX - minX + 1) * (maxY - minY + 1)
93 | rates = rates / numPixels
94 | endTimes = np.arange(startTime, endTime, period)
95 | axes.plot(midTimes, rates)
96 | plt.xlabel('Time (s)')
97 | plt.ylabel('Rate (events/s)')
98 | legend.append('period: ' + str(period) + ' s')
99 | axes.legend(legend)
100 | axes.set_ylim(bottom=0)
101 | kwargs['legend'] = legend
102 | if kwargs.get('title') is not None:
103 | axes.set_title(kwargs.get('title'))
104 | callback = kwargs.get('callback')
105 | if callback is not None:
106 | kwargs = callback(**kwargs)
107 |
108 |
109 |
110 |
--------------------------------------------------------------------------------
/bimvee/plotFlow.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Copyright (C) 2020 Event-driven Perception for Robotics
5 | Authors: Sim Bamford
6 | Aiko Dinale
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | plotImu takes 'inDict' - a dictionary containing imported IMU data
18 | (or a higher level container, in which attempts to descend and call itself)
19 | as created by importAe, and plots against time the various dimensions of the
20 | IMU samples contained.
21 | """
22 | import os
23 | import matplotlib.pyplot as plt
24 |
25 | #-----------------------------------------------------------------------------------------------------
26 | def plotFlow(flowDict, fig_path=None, fig_name=None, fig_subtitle=None):
27 | """
28 | Plot the FLOW events in flowDict against the time. If specified, save the
29 | generated figure as fig_name.png at the location defined by fig_path.
30 |
31 | Arguments:
32 | flowDict {dict} -- dictionary of FLOW events as formatted by bimvee from event-driven library
33 |
34 | Keyword Arguments:
35 | fig_path {string} -- save path for the generated figure (default: {None})
36 | fig_name {string} -- name of the generated figure (default: {None})
37 | fig_subtitle {string} -- figure sub-title (default: {None})
38 | """
39 | fig = plt.figure(figsize=(16.0, 10.0))
40 | if isinstance(fig_subtitle, str):
41 | fig.suptitle("FLOW Events\n" + fig_subtitle, fontsize=20, fontweight='bold')
42 | else:
43 | fig.suptitle("FLOW Events", fontsize=20, fontweight='bold')
44 |
45 |
46 | ax11 = plt.subplot(2, 1, 1)
47 | plt.plot(flowDict['ts'], flowDict['vx'], color=retro_palette[0], marker='.', linewidth=0.0)
48 | plt.xlabel('Time [s]', fontsize=10, fontweight='bold')
49 | plt.ylabel('Vx [px/s]', fontsize=10, fontweight='bold')
50 | plt.grid(True)
51 |
52 | plt.subplot(2, 1, 2, sharex=ax11)
53 | plt.plot(flowDict['ts'], flowDict['vy'], color=retro_palette[1], marker='.', linewidth=0.0)
54 | plt.xlabel('Time [s]', fontsize=10, fontweight='bold')
55 | plt.ylabel('Vy [px/s]', fontsize=10, fontweight='bold')
56 | plt.grid(True)
57 |
58 | fig.tight_layout()
59 | if isinstance(fig_subtitle, str):
60 | fig.subplots_adjust(top=0.9)
61 | else:
62 | fig.subplots_adjust(top=0.95)
63 |
64 | fig.align_ylabels()
65 |
66 | if isinstance(fig_path, str) and isinstance(fig_name, str):
67 | plt.savefig(os.path.join(fig_path, fig_name + ".png"), dpi=300, bbox_inches='tight')
68 | print("Saving " + fig_name + ".png")
69 | plt.close()
70 | else:
71 | plt.show()
72 |
73 | #-----------------------------------------------------------------------------------------------------
74 | def plotFlowDistribution(flowDict, fig_path=None, fig_name=None, fig_subtitle=None):
75 | """
76 | Plot the distribution of the FLOW events in flowDict and save the generated
77 | figure as fig_name.png at the location defined by fig_path.
78 |
79 | Arguments:
80 | flowDict {dict} -- dictionary of FLOW events as formatted by bimvee from event-driven library
81 |
82 | Keyword Arguments:
83 | fig_path {string} -- save path for the generated figure (default: {None})
84 | fig_name {string} -- name of the generated figure (default: {None})
85 | fig_subtitle {string} -- figure sub-title (default: {None})
86 | """
87 | import seaborn as sns
88 |
89 | sns.set(palette="colorblind")
90 |
91 | # Color Palette for Color Blindness
92 | zesty_palette = ['#F5793A', '#A95AA1', '#85C0F9', '#0F2080']
93 | retro_palette = ['#601A4A', '#EE442F', '#63ACBE', '#F9F4EC']
94 | corporate_palette = ['#8DB8AD', '#EBE7E0', '#C6D4E1', '#44749D']
95 | fig = plt.figure(figsize=(14.0, 10.0))
96 | if isinstance(fig_subtitle, str):
97 | fig.suptitle("FLOW Events Distribution\n" + fig_subtitle, fontsize=20, fontweight='bold')
98 | else:
99 | fig.suptitle("FLOW Events Distribution", fontsize=20, fontweight='bold')
100 |
101 | plt.subplot(2,1,1)
102 | sns.distplot(flowDict['vx'], bins=100, color = retro_palette[0])
103 | plt.xlabel('Vx [px/s]', fontsize=10, fontweight='bold')
104 |
105 | plt.subplot(2,1,2)
106 | sns.distplot(flowDict['vy'], bins=100, color = retro_palette[1])
107 | plt.xlabel('Vy [px/s]', fontsize=10, fontweight='bold')
108 |
109 | fig.tight_layout()
110 | if isinstance(fig_subtitle, str):
111 | fig.subplots_adjust(top=0.9)
112 | else:
113 | fig.subplots_adjust(top=0.95)
114 |
115 | if isinstance(fig_path, str) and isinstance(fig_name, str):
116 | plt.savefig(os.path.join(fig_path, fig_name + '.png'), dpi=300, bbox_inches='tight')
117 | print("Saving " + fig_name + ".png")
118 | plt.close()
119 | else:
120 | plt.show()
121 |
--------------------------------------------------------------------------------
/bimvee/plotFrame.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | '''
4 | Copyright (C) 2019 Event-driven Perception for Robotics
5 | Authors: Sim Bamford
6 | This program is free software: you can redistribute it and/or modify it under
7 | the terms of the GNU General Public License as published by the Free Software
8 | Foundation, either version 3 of the License, or (at your option) any later version.
9 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 | You should have received a copy of the GNU General Public License along with
13 | this program. If not, see .
14 |
15 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
16 | plotFrame takes 'inDict' - a dictionary containing imported frame data
17 | (or a higher level container, in which attempts to descend and call itself)
18 | as created by importAe, and creates a series of images from selected
19 | frames.
20 | The number of subplots is given by the numPlots parameter.
21 | 'distributeBy' can either be 'time' or 'events', to decide how the points
22 | around which data is rendered are chosen.
23 | The frame events are then chosen as those nearest to the time points.
24 | If the 'distributeBy' is 'time' then if the further parameters 'minTime'
25 | and 'maxTime' are used then the time window used is only between
26 | those limits.
27 | Params include:
28 | numPlots, distributeBy, minTime, maxTime, flipVertical, flipHorizontal, transpose
29 | '''
30 |
31 | import numpy as np
32 | import matplotlib.pyplot as plt
33 | from math import log10, floor
34 |
35 | def roundToSf(x, sig=3):
36 | try:
37 | return round(x, sig-int(floor(log10(abs(x))))-1)
38 | except ValueError: # log of zero
39 | return 0
40 |
41 | def plotFrame(inDicts, **kwargs):
42 | if isinstance(inDicts, list):
43 | for inDict in inDicts:
44 | plotFrame(inDict, **kwargs)
45 | return
46 | else:
47 | inDict = inDicts
48 | if not isinstance(inDict, dict):
49 | return
50 | if 'ts' not in inDict:
51 | title = kwargs.pop('title', '')
52 | if 'info' in inDict and isinstance(inDict, dict):
53 | fileName = inDict['info'].get('filePathOrName')
54 | if fileName is not None:
55 | print('plotFrame was called for file ' + fileName)
56 | title = (title + ' ' + fileName).lstrip()
57 | for key in inDict.keys():
58 | kwargs['title'] = (title + ' ' + key).lstrip()
59 | plotFrame(inDict[key], **kwargs)
60 | return
61 | # From this point onwards, it's a data-type container
62 | if 'frames' not in inDict:
63 | return
64 | # From this point onwards, it's a frame data-type container
65 | distributeBy = kwargs.get('distributeBy', 'time').lower()
66 | numPlots = kwargs.get('numPlots', 6)
67 |
68 | ts = inDict['ts']
69 | frames = inDict['frames']
70 | numFrames = len(ts)
71 | if numFrames < numPlots:
72 | numPlots = numFrames
73 |
74 | if numFrames == numPlots:
75 | distributeBy = 'events'
76 |
77 | # Distribute plots in a raster with a 3:4 ratio
78 | numPlotsX = int(np.round(np.sqrt(numPlots / 3 * 4)))
79 | numPlotsY = int(np.ceil(numPlots / numPlotsX))
80 |
81 | minTime = kwargs.get('startTime', kwargs.get('minTime', kwargs.get('beginTime', ts[0])))
82 | maxTime = kwargs.get('stopTime', kwargs.get('maxTime', kwargs.get('endTime', ts[-1])))
83 |
84 | if distributeBy == 'time':
85 | totalTime = maxTime - minTime
86 | timeStep = totalTime / numPlots
87 | timePoints = np.arange(minTime + timeStep * 0.5, maxTime, timeStep)
88 | else: # distribute by event number
89 | framesPerStep = numFrames / numPlots
90 | timePoints = ts(int(np.ceil(np.arange(framesPerStep * 0.5, numFrames, framesPerStep))))
91 |
92 | fig, axes = plt.subplots(numPlotsY, numPlotsX)
93 | fig.suptitle(kwargs.get('title', ''))
94 |
95 | axes = axes.flatten().tolist()
96 | for ax, timePoint in zip(axes, timePoints):
97 |
98 | # Find eventIndex nearest to timePoint
99 | frameIdx = np.searchsorted(ts, timePoint)
100 | frame = frames[frameIdx]
101 | if kwargs.get('transpose', False):
102 | frame = np.transpose(frame)
103 | if kwargs.get('flipVertical', False):
104 | frame = np.flip(frame, axis=0)
105 | if kwargs.get('flipHorizontal', False):
106 | frame = np.flip(frame, axis=1)
107 | ax.imshow(frame, cmap='gray')
108 | ax.grid(b=False)
109 | ax.set_title('Time: ' + str(roundToSf(timePoint)) + ' s; frame number: ' + str(frameIdx))
110 |
111 | #%%
112 | '''
113 | Optional extra - not including it by default because I don't want this
114 | extra dependency, but this is a quick way to see frame data as a video
115 | '''
116 |
117 | '''
118 | from imageio import mimwrite
119 | def framesToGif(framesDict, **kwargs):
120 | ts = framesDict['ts']
121 | frames = framesDict['frames']
122 | outputFilePathAndName = kwargs.get('outputFilePathAndName', 'framesAsMovie.mp4')
123 | frameRate = len(ts) / (ts[-1] - ts[0])
124 | framesExpanded = [np.expand_dims(f, 0) for f in frames]
125 | framesAllArray = np.concatenate(framesExpanded, 0)
126 | mimwrite(outputFilePathAndName, framesAllArray , fps = int(frameRate))
127 | '''
128 |
129 |
--------------------------------------------------------------------------------
/bimvee/plotImu.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | plotImu takes 'inDict' - a dictionary containing imported IMU data
16 | (or a higher level container, in which attempts to descend and call itself)
17 | as created by importAe, and plots against time the various dimensions of the
18 | IMU samples contained.
19 | """
20 | import os, sys
21 | import matplotlib.pyplot as plt
22 |
23 | #-----------------------------------------------------------------------------------------------------
24 | def plotImu(inDict, **kwargs):
25 | if isinstance(inDict, list):
26 | for inDictInst in inDict:
27 | plotImu(inDictInst, **kwargs)
28 | return
29 | if 'info' in inDict:
30 | fileName = inDict['info'].get('filePathOrName', '')
31 | print('plotImu was called for file ' + fileName)
32 | if not inDict['data']:
33 | print('The import contains no data.')
34 | return
35 | for channelName in inDict['data']:
36 | channelData = inDict['data'][channelName]
37 | if 'imu' in channelData and len(channelData['imu']['ts']) > 0:
38 | kwargs['title'] = ' '.join([fileName, str(channelName)])
39 | plotImu(channelData['imu'], **kwargs)
40 | else:
41 | print('Channel ' + channelName + ' skipped because it contains no polarity data')
42 | return
43 | if 'temp' in inDict:
44 | numSubplots = 4
45 | else:
46 | numSubplots = 3
47 | fig, allAxes = plt.subplots(numSubplots, 1)
48 | fig.suptitle(kwargs.get('title', ''))
49 | axesAcc = allAxes[0]
50 | axesAcc.plot(inDict['ts'], inDict['acc'][:, 0], 'r')
51 | axesAcc.plot(inDict['ts'], inDict['acc'][:, 1], 'g')
52 | axesAcc.plot(inDict['ts'], inDict['acc'][:, 2], 'b')
53 | axesAcc.set_title('Acceleration (m/s)')
54 | axesAcc.legend(['x', 'y', 'z'])
55 |
56 | axesAngV = allAxes[1]
57 | axesAngV.plot(inDict['ts'], inDict['angV'][:, 0], 'r')
58 | axesAngV.plot(inDict['ts'], inDict['angV'][:, 1], 'g')
59 | axesAngV.plot(inDict['ts'], inDict['angV'][:, 2], 'b')
60 | axesAngV.set_title('Angular velocity (rad/s)')
61 | axesAngV.legend(['x', 'y', 'z'])
62 |
63 | axesMag = allAxes[2]
64 | axesMag.plot(inDict['ts'], inDict['mag'][:, 0], 'r')
65 | axesMag.plot(inDict['ts'], inDict['mag'][:, 1], 'g')
66 | axesMag.plot(inDict['ts'], inDict['mag'][:, 2], 'b')
67 | axesMag.set_title('Mag (uT)')
68 | axesMag.legend(['x', 'y', 'z'])
69 |
70 | if 'temp' in inDict:
71 | axesTemp = allAxes[3]
72 | axesTemp.plot(inDict['ts'], inDict['temp'])
73 | axesTemp.set_title('Temp (K)')
74 |
75 |
76 | #-----------------------------------------------------------------------------------------------------
77 | def plotImuDistribution(imuDict, unitIMU='FPGA', fig_path=None, fig_name=None, fig_subtitle=None):
78 | import seaborn as sns
79 |
80 | sns.set(palette="colorblind")
81 |
82 | # Color Palette for Color Blindness
83 | zesty_palette = ['#F5793A', '#A95AA1', '#85C0F9', '#0F2080']
84 | retro_palette = ['#601A4A', '#EE442F', '#63ACBE', '#F9F4EC']
85 | corporate_palette = ['#8DB8AD', '#EBE7E0', '#C6D4E1', '#44749D']
86 | """
87 | Plot the distribution of the IMU data in imuDict. If specified, save the
88 | generated figure as fig_name.png at the location defined by fig_path.
89 |
90 | Arguments:
91 | imuDict {dict} -- dictionary of IMU data (as formatted by bimvee)
92 |
93 | Keyword Arguments:
94 | unitIMU {str} -- either 'FPGA' or 'SI' (default: {'FPGA'})
95 | fig_path {string} -- save path for the generated figure (default: {None})
96 | fig_name {string} -- name of the generated figure (default: {None})
97 | fig_subtitle {string} -- figure sub-title (default: {None})
98 | """
99 | fig = plt.figure(figsize=(14.0, 10.0))
100 | if isinstance(fig_subtitle, str):
101 | fig.suptitle("IMU Samples Distribution\n" + fig_subtitle, fontsize=20, fontweight='bold')
102 | else:
103 | fig.suptitle("IMU Samples Distribution", fontsize=20, fontweight='bold')
104 |
105 | plt.subplot(3,2,1)
106 | sns.distplot([v for v, s in zip(imuDict['value'], imuDict['sensor']) if s == 0], bins=100, color=zesty_palette[0])
107 | plt.title("Accelerometer", fontsize=16, fontweight='bold')
108 | if unitIMU == 'FPGA':
109 | plt.xlabel('accX [fpga]', fontsize=10, fontweight='bold')
110 | elif unitIMU == 'SI':
111 | plt.xlabel('accX [m/s]', fontsize=10, fontweight='bold')
112 |
113 | plt.subplot(3,2,2)
114 | sns.distplot([v for v, s in zip(imuDict['value'], imuDict['sensor']) if s == 3], bins=100, color=zesty_palette[0])
115 | plt.title("Gyroscope", fontsize=16, fontweight='bold')
116 | if unitIMU == 'FPGA':
117 | plt.xlabel('gyroX [fpga]', fontsize=10, fontweight='bold')
118 | elif unitIMU == 'SI':
119 | plt.xlabel('gyroX [rad/s]', fontsize=10, fontweight='bold')
120 |
121 | plt.subplot(3,2,3)
122 | sns.distplot([v for v, s in zip(imuDict['value'], imuDict['sensor']) if s == 1], bins=100, color=zesty_palette[1])
123 | if unitIMU == 'FPGA':
124 | plt.xlabel('accY [fpga]', fontsize=10, fontweight='bold')
125 | elif unitIMU == 'SI':
126 | plt.xlabel('accY [m/s]', fontsize=10, fontweight='bold')
127 |
128 | plt.subplot(3,2,4)
129 | sns.distplot([v for v, s in zip(imuDict['value'], imuDict['sensor']) if s == 4], bins=100, color=zesty_palette[1])
130 | if unitIMU == 'FPGA':
131 | plt.xlabel('gyroY [fpga]', fontsize=10, fontweight='bold')
132 | elif unitIMU == 'SI':
133 | plt.xlabel('gyroY [rad/s]', fontsize=10, fontweight='bold')
134 |
135 | plt.subplot(3,2,5)
136 | sns.distplot([v for v, s in zip(imuDict['value'], imuDict['sensor']) if s == 2], bins=100, color=zesty_palette[3])
137 | if unitIMU == 'FPGA':
138 | plt.xlabel('accZ [fpga]', fontsize=10, fontweight='bold')
139 | elif unitIMU == 'SI':
140 | plt.xlabel('accZ [m/s]', fontsize=10, fontweight='bold')
141 |
142 | plt.subplot(3,2,6)
143 | sns.distplot([v for v, s in zip(imuDict['value'], imuDict['sensor']) if s == 5], bins=100, color=zesty_palette[3])
144 | if unitIMU == 'FPGA':
145 | plt.xlabel('gyroZ [fpga]', fontsize=10, fontweight='bold')
146 | elif unitIMU == 'SI':
147 | plt.xlabel('gyroZ [rad/s]', fontsize=10, fontweight='bold')
148 |
149 | fig.tight_layout()
150 | if isinstance(fig_subtitle, str):
151 | fig.subplots_adjust(top=0.85)
152 | else:
153 | fig.subplots_adjust(top=0.9)
154 |
155 | if isinstance(fig_path, str) and isinstance(fig_name, str):
156 | plt.savefig(os.path.join(fig_path, fig_name + '.png'), dpi=300, bbox_inches='tight')
157 | print("Saving " + fig_name + ".png")
158 | plt.close()
159 | else:
160 | plt.show()
161 |
--------------------------------------------------------------------------------
/bimvee/visualiser.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | In general get_frame takes two args: time, and time_window.
27 | In general one could think about representing data in an interpolated way or not.
28 | For example, with poses, imu etc, one could interpolate between samples,
29 | or one could simply choose the sample which is nearest in time.
30 | Likewise for frames.
31 | The time_window parameter says how much data to take around the sample point for event-type data.
32 | It might be possible to develop visualisations for other types of data that make use of the concept of a time window.
33 |
34 | colorfmt is a choice between luminance and rgb.
35 | If luminance, then the frame returned should have dim 2 = 3.
36 | Nothing stops the calling function from applying a color mask to an output in luminance format.
37 | """
38 |
39 | import numpy as np
40 |
41 | # Local imports
42 |
43 | # Importing child classes lets this one module be referenced for all imports
44 |
45 |
46 |
47 | try:
48 | from .visualisers.visualiserDvs import VisualiserDvs
49 | from .visualisers.visualiserFrame import VisualiserFrame
50 | from .visualisers.visualiserPoint3 import VisualiserPoint3
51 | from .visualisers.visualiserPose6q import VisualiserPose6q
52 | from .visualisers.visualiserBoundingBoxes import VisualiserBoundingBoxes
53 | from .visualisers.visualiserEyeTracking import VisualiserEyeTracking
54 | from .visualisers.visualiserOpticFlow import VisualiserOpticFlow
55 | from .visualisers.visualiserImu import VisualiserImu
56 | from .visualisers.visualiserSkeleton import VisualiserSkeleton
57 | except ImportError:
58 | from bimvee.visualisers.visualiserDvs import VisualiserDvs
59 | from bimvee.visualisers.visualiserFrame import VisualiserFrame
60 | from bimvee.visualisers.visualiserPoint3 import VisualiserPoint3
61 | from bimvee.visualisers.visualiserPose6q import VisualiserPose6q
62 | from bimvee.visualisers.visualiserBoundingBoxes import VisualiserBoundingBoxes
63 | from bimvee.visualisers.visualiserEyeTracking import VisualiserEyeTracking
64 | from bimvee.visualisers.visualiserOpticFlow import VisualiserOpticFlow
65 | from bimvee.visualisers.visualiserImu import VisualiserImu
66 | from bimvee.visualisers.visualiserSkeleton import VisualiserSkeleton
67 |
68 |
--------------------------------------------------------------------------------
/bimvee/visualisers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/bimvee/visualisers/__init__.py
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserBase.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | In general get_frame takes two args: time, and time_window.
27 | In general one could think about representing data in an interpolated way or not.
28 | For example, with poses, imu etc, one could interpolate between samples,
29 | or one could simply choose the sample which is nearest in time.
30 | Likewise for frames.
31 | The time_window parameter says how much data to take around the sample point for event-type data.
32 | It might be possible to develop visualisations for other types of data that make use of the concept of a time window.
33 |
34 | colorfmt is a choice between luminance and rgb.
35 | If luminance, then the frame returned should have dim 2 = 3.
36 | Nothing stops the calling function from applying a color mask to an output in luminance format.
37 | """
38 |
39 | import numpy as np
40 |
41 |
42 | class Visualiser:
43 |
44 | _data = None
45 | data_type = None
46 |
47 | def __init__(self, data):
48 | self.set_data(data)
49 |
50 | def set_data(self, data):
51 | self._data = {}
52 | self._data.update(data)
53 |
54 | def get_data(self):
55 | return self._data
56 |
57 | def get_frame(self, time, timeWindow, **kwargs):
58 | raise NotImplementedError
59 |
60 | def get_colorfmt(self):
61 | return 'luminance'
62 |
63 | def get_settings(self):
64 | return {}
65 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserBoundingBoxes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | VisualiserBoundingBoxes doesn't actually create a visualisation but rather
27 | queries the data for the bounding boxes within the time window and passes
28 | these out for visualisation as an overlay.
29 | """
30 |
31 | import numpy as np
32 |
33 | # Local imports
34 | from .visualiserBase import Visualiser
35 |
36 |
37 | class VisualiserBoundingBoxes(Visualiser):
38 |
39 | data_type = 'boundingBoxes'
40 |
41 | def get_frame(self, time, timeWindow, **kwargs):
42 | if self._data is None or not kwargs.get('show_bounding_boxes', True):
43 | return None
44 | gt_bb = self._data
45 | indices = abs(gt_bb['ts'] - time) < timeWindow
46 | if not any(indices):
47 | if not kwargs.get('interpolate'):
48 | return None
49 | if kwargs.get('interpolate'):
50 | boxes = []
51 | for label in np.unique(gt_bb['label']):
52 | label_mask = gt_bb['label'] == label
53 | ts = gt_bb['ts'][label_mask]
54 | minY = gt_bb['minY'][label_mask]
55 | minX = gt_bb['minX'][label_mask]
56 | maxY = gt_bb['maxY'][label_mask]
57 | maxX = gt_bb['maxX'][label_mask]
58 |
59 | i1 = np.searchsorted(ts, time)
60 | i0 = i1 - 1
61 | if i0 < 0:
62 | if abs(ts[0] - time) < timeWindow:
63 | if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():
64 | boxes.append((minY[0], minX[0], maxY[0], maxX[0], label))
65 | else:
66 | boxes.append((minY[0], minX[0], maxY[0], maxX[0]))
67 | continue
68 | else:
69 | continue
70 | if i1 >= len(ts):
71 | if abs(ts[-1] - time) < timeWindow:
72 | if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():
73 | boxes.append((minY[-1], minX[-1], maxY[-1], maxX[-1], label))
74 | else:
75 | boxes.append((minY[-1], minX[-1], maxY[-1], maxX[-1]))
76 | continue
77 | else:
78 | continue
79 |
80 | minY_interp = minY[i0] + ((minY[i1] - minY[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])
81 | minX_interp = minX[i0] + ((minX[i1] - minX[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])
82 | maxY_interp = maxY[i0] + ((maxY[i1] - maxY[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])
83 | maxX_interp = maxX[i0] + ((maxX[i1] - maxX[i0]) / abs(ts[i1] - ts[i0])) * (time - ts[i0])
84 | if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():
85 | boxes.append((minY_interp, minX_interp, maxY_interp, maxX_interp, label))
86 | else:
87 | boxes.append((minY_interp, minX_interp, maxY_interp, maxX_interp))
88 | boxes = np.array(boxes).astype(int)
89 | else:
90 | boxes = np.column_stack((gt_bb['minY'][indices], gt_bb['minX'][indices],
91 | gt_bb['maxY'][indices], gt_bb['maxX'][indices])).astype(int)
92 | if kwargs.get('with_labels', True) and 'label' in gt_bb.keys():
93 | labels = gt_bb['label'][indices].astype(int)
94 | boxes = np.column_stack([boxes, labels])
95 | boxes = np.unique(boxes, axis=0)
96 | return boxes
97 |
98 | def get_settings(self):
99 | settings = {'with_labels': {'type': 'boolean',
100 | 'default': True
101 | },
102 | 'show_bounding_boxes': {'type': 'boolean',
103 | 'default': True
104 | },
105 | 'interpolate': {'type': 'boolean',
106 | 'default': False
107 | }
108 | }
109 | return settings
110 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserDvs.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | VisualiseDvs creates an event frame centred at time and of size timeWindow.
27 | The user may set the amount of contrast (concurrent events in a pixel to reach
28 | full colour) and whether to render polarity (in which case ON and OFF events
29 | are counted against each other).
30 | """
31 |
32 | import numpy as np
33 |
34 | # Local imports
35 | from ..plotDvsContrast import getEventImageForTimeRange
36 | from .visualiserBase import Visualiser
37 |
38 |
39 | class VisualiserDvs(Visualiser):
40 | data_type = 'dvs'
41 | coloured = False
42 |
43 | # TODO: There can be methods which better choose the best frame, or which create a visualisation which
44 | # respects the time_window parameter
45 |
46 | def get_frame(self, time, timeWindow, **kwargs):
47 | self.coloured = kwargs.get('image_type') == 'coloured'
48 | data = self._data
49 | kwargs['startTime'] = time - timeWindow / 2
50 | kwargs['stopTime'] = time + timeWindow / 2
51 | kwargs['dimX'] = data['dimX']
52 | kwargs['dimY'] = data['dimY']
53 | image = getEventImageForTimeRange(data, **kwargs)
54 | # Post processing to get image into uint8 with correct scale
55 | contrast = kwargs.get('contrast', 3)
56 | if kwargs.get('image_type') == 'coloured':
57 | pass
58 | elif kwargs.get('image_type') == 'count' or kwargs.get('image_type') == 'binary':
59 | image = ((image + contrast) / contrast / 2 * 255).astype(np.uint8)
60 | else:
61 | image = (image / contrast * 255).astype(np.uint8)
62 | # Allow for arbitrary post-production on image with a callback
63 | # TODO: as this is boilerplate, it could be pushed into pie syntax ...
64 | if kwargs.get('callback', None) is not None:
65 | kwargs['image'] = image
66 | image = kwargs['callback'](**kwargs)
67 | return image
68 |
69 | def get_dims(self):
70 | try:
71 | data = self._data
72 | except AttributeError: # data hasn't been set yet
73 | return 1, 1
74 | if 'dimX' in data:
75 | x = data['dimX']
76 | else:
77 | x = np.max(data['x']) + 1
78 | data['dimX'] = x
79 | if 'dimY' in data:
80 | y = data['dimY']
81 | else:
82 | y = np.max(data['y']) + 1
83 | data['dimY'] = y
84 | return x, y
85 |
86 | def get_settings(self):
87 | settings = {'image_type': {'type': 'value_list',
88 | 'default': 'binary',
89 | 'values': ['count', 'binary', 'not_polarized', 'time_image', 'coloured']
90 | },
91 | 'contrast': {'type': 'range',
92 | 'default': 3,
93 | 'min': 1,
94 | 'max': 20,
95 | 'step': 1
96 | },
97 | 'pol_to_show': {'type': 'value_list',
98 | 'default': 'Both',
99 | 'values': ['Pos', 'Neg', 'Both']
100 | }}
101 | return settings
102 |
103 | def get_colorfmt(self):
104 | return 'rgb' if self.coloured else 'luminance'
105 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserEyeTracking.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | VisualiserBoundingBoxes doesn't actually create a visualisation but rather
27 | queries the data for the bounding boxes within the time window and passes
28 | these out for visualisation as an overlay.
29 | """
30 |
31 | import numpy as np
32 | from scipy.interpolate import interp1d
33 |
34 | # Local imports
35 | from .visualiserBase import Visualiser
36 |
37 | def radian_difference(rad1, rad2):
38 | diff = rad1 - rad2
39 | # Normalize the difference to be within [-π, π]
40 | return (diff + np.pi) % (2 * np.pi) - np.pi
41 |
42 | class VisualiserEyeTracking(Visualiser):
43 |
44 | data_type = 'eyeTracking'
45 |
46 | def get_frame(self, time, timeWindow, **kwargs):
47 | if self._data is None or not kwargs.get('show_eyes_gt', True):
48 | return None
49 | idx = np.searchsorted(self._data['ts'], time)
50 | try:
51 | if np.abs(self._data['ts'][idx] - time) > timeWindow:
52 | if not kwargs.get('interpolate'):
53 | return None
54 | data_to_interpolate = {k: self.get_data()[k][max(0, idx-2):idx + 2] for k in self.get_data().keys() if hasattr(self.get_data()[k], '__len__')}
55 | if not(data_to_interpolate['ts'][0] < time < data_to_interpolate['ts'][-1]):
56 | return None
57 | out_dict = {}
58 | for x in data_to_interpolate:
59 | val = data_to_interpolate[x]
60 | if x == 'eye_closed':
61 | out_dict[x] = val[0] and val[1]
62 | continue
63 | linear_interp = interp1d(data_to_interpolate['ts'], val, kind='linear')
64 | out_dict[x] = linear_interp(time)
65 | out_dict['interpolated'] = True
66 | return out_dict
67 | return {k: self.get_data()[k][idx] for k in self.get_data().keys() if hasattr(self.get_data()[k], '__len__')}
68 | except IndexError:
69 | return None
70 |
71 | def get_settings(self):
72 | settings = {'show_eyes_gt': {'type': 'boolean',
73 | 'default': True
74 | },
75 | 'show_xy_pointcloud': {'type': 'boolean',
76 | 'default': False
77 | },
78 | 'fixed_radius': {'type': 'boolean',
79 | 'default': True
80 | },
81 | 'fixed_uv': {'type': 'boolean',
82 | 'default': True
83 | },
84 | 'interpolate': {'type': 'boolean',
85 | 'default': False
86 | }
87 | }
88 | return settings
89 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserFrame.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | VisualiserFrame visualises frame data, using the frame in the dict nearest
27 | to the current time.
28 |
29 | There is an optional mode: you can add a 'tsEnd' field to the dict, an np array
30 | of length correspinding to 'ts'. Then, the selected frame will be the last
31 | one prior to (or exactly equal to) the current time, and will only be shown
32 | until 'tsEnd'. Otherwise, timeWindow dictates whether the selected (nearest)
33 | frame is shown or not.
34 | """
35 |
36 | import numpy as np
37 | import math
38 |
39 | # Local imports
40 | from .visualiserBase import Visualiser
41 |
42 | # A function intended to find the nearest timestamp
43 | # adapted from https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
44 |
45 |
46 | def findNearest(array, value):
47 | idx = np.searchsorted(array, value) # side="left" param is the default
48 | if idx > 0 and (
49 | idx == len(array) or
50 | math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
51 | return idx-1
52 | else:
53 | return idx
54 |
55 |
56 | class VisualiserFrame(Visualiser):
57 |
58 | data_type = 'frame'
59 |
60 | def set_data(self, data):
61 | super().set_data(data)
62 |
63 | if self._data['frames'][0].dtype != np.uint8:
64 | # Convert to uint8, converting to fullscale accross the whole dataset
65 | minValue = min([frame.min() for frame in self._data['frames']])
66 | maxValue = max([frame.max() for frame in self._data['frames']])
67 | # TODO: assuming that it starts scaled in 0-1 - could have more general approach?
68 | self._data['frames'] = [((frame-minValue)/(maxValue-minValue)*255).astype(np.uint8)
69 | for frame in data['frames']]
70 |
71 | def get_colorfmt(self):
72 | try:
73 | if len(self._data['frames'][0].shape) == 3:
74 | return 'rgb'
75 | else:
76 | return 'luminance'
77 | except: # TODO None type error?
78 | return 'luminance'
79 |
80 | def get_default_image(self):
81 | x, y = self.get_dims()
82 | # Return an x,y,3 by default i.e. rgb, for safety, since in the absence of data we may not know how the texture's colorfmt is set
83 | return np.ones((x, y, 3), dtype=np.uint8) * 128 # TODO: Hardcoded midway (grey) value
84 |
85 | # TODO: There can be methods which better choose the best frame, or which create a visualisation which
86 | # respects the time_window parameter
87 | def get_frame(self, time, timeWindow, **kwargs):
88 | data = self._data
89 | if 'tsEnd' in data:
90 | # Optional mode in which frames are only displayed
91 | # between corresponding ts and tsEnd
92 | frameIdx = np.searchsorted(data['ts'], time, side='right') - 1
93 | if frameIdx < 0:
94 | image = self.get_default_image()
95 | elif time > data['tsEnd'][frameIdx]:
96 | image = self.get_default_image()
97 | else:
98 | image = data['frames'][frameIdx]
99 | elif time < data['ts'][0] - timeWindow / 2 or time > data['ts'][-1] + timeWindow / 2:
100 | # Gone off the end of the frame data
101 | image = self.get_default_image()
102 | else:
103 | frameIdx = findNearest(data['ts'], time)
104 | image = data['frames'][frameIdx]
105 | # Allow for arbitrary post-production on image with a callback
106 | # TODO: as this is boilerplate, it could be pushed into pie syntax ...
107 | if kwargs.get('callback', None) is not None:
108 | kwargs['image'] = image
109 | image = kwargs['callback'](**kwargs)
110 | return image
111 |
112 | def get_dims(self):
113 | try:
114 | data = self._data
115 | except AttributeError: # data hasn't been set yet
116 | return 1, 1
117 | x = data['dimX'] if 'dimX' in data else data['frames'][0].shape[1]
118 | y = data['dimY'] if 'dimY' in data else data['frames'][0].shape[0]
119 | return x, y
120 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserOpticFlow.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | VisualiserOpticFlow - for dense optic flow maps ...
27 | """
28 |
29 | import numpy as np
30 | import math
31 |
32 | # Local imports
33 | from .visualiserBase import Visualiser
34 |
35 | # A function intended to find the nearest timestamp
36 | # adapted from https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
37 |
38 |
39 | def findNearest(array, value):
40 | idx = np.searchsorted(array, value) # side="left" param is the default
41 | if idx > 0 and (
42 | idx == len(array) or
43 | math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
44 | return idx-1
45 | else:
46 | return idx
47 |
48 |
49 | class VisualiserOpticFlow(Visualiser):
50 |
51 | data_type = 'flowMap'
52 |
53 | def __init__(self, data):
54 | super().__init__(data)
55 | self.colorwheel = self.make_colorwheel() # shape [55x3]
56 |
57 | def get_colorfmt(self):
58 | return 'rgb'
59 |
60 | def get_default_image(self):
61 | x, y = self.get_dims()
62 | # Return an x,y,3 by default i.e. rgb, for safety, since in the absence of data we may not know how the texture's colorfmt is set
63 | return np.ones((x, y, 3), dtype=np.uint8) * 128 # TODO: Hardcoded midway (grey) value
64 |
65 | # TODO: There can be methods which better choose the best frame, or which create a visualisation which
66 | # respects the time_window parameter
67 | def get_frame(self, time, timeWindow, **kwargs):
68 | data = self._data
69 | if time < data['ts'][0] - timeWindow / 2 or time > data['ts'][-1] + timeWindow / 2:
70 | # Gone off the end of the frame data
71 | image = self.get_default_image()
72 | else:
73 | frameIdx = findNearest(data['ts'], time)
74 | image = self.flow_to_color(data['flowMaps'][frameIdx])
75 |
76 | return image
77 |
78 | def get_dims(self):
79 | try:
80 | data = self._data
81 | except AttributeError: # data hasn't been set yet
82 | return 1, 1
83 | x = data['dimX'] if 'dimX' in data else data['flowMaps'][0].shape[1]
84 | y = data['dimY'] if 'dimY' in data else data['flowMaps'][0].shape[0]
85 | return x, y
86 |
87 | ''' The methods make_colorwheel, flow_to_color and flow_uv_to_colors are taken from
88 | https://github.com/tomrunia/OpticalFlow_Visualization/blob/master/flow_vis/flow_vis.py'''
89 | @staticmethod
90 | def make_colorwheel():
91 | """
92 | Generates a color wheel for optical flow visualization as presented in:
93 | Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
94 | URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
95 | Code follows the original C++ source code of Daniel Scharstein.
96 | Code follows the the Matlab source code of Deqing Sun.
97 | Returns:
98 | np.ndarray: Color wheel
99 | """
100 |
101 | RY = 15
102 | YG = 6
103 | GC = 4
104 | CB = 11
105 | BM = 13
106 | MR = 6
107 |
108 | ncols = RY + YG + GC + CB + BM + MR
109 | colorwheel = np.zeros((ncols, 3))
110 | col = 0
111 |
112 | # RY
113 | colorwheel[0:RY, 0] = 255
114 | colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)
115 | col = col + RY
116 | # YG
117 | colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)
118 | colorwheel[col:col + YG, 1] = 255
119 | col = col + YG
120 | # GC
121 | colorwheel[col:col + GC, 1] = 255
122 | colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)
123 | col = col + GC
124 | # CB
125 | colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)
126 | colorwheel[col:col + CB, 2] = 255
127 | col = col + CB
128 | # BM
129 | colorwheel[col:col + BM, 2] = 255
130 | colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)
131 | col = col + BM
132 | # MR
133 | colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)
134 | colorwheel[col:col + MR, 0] = 255
135 | return colorwheel
136 |
137 | def flow_uv_to_colors(self, u, v, convert_to_bgr=False):
138 | """
139 | Applies the flow color wheel to (possibly clipped) flow components u and v.
140 | According to the C++ source code of Daniel Scharstein
141 | According to the Matlab source code of Deqing Sun
142 | Args:
143 | u (np.ndarray): Input horizontal flow of shape [H,W]
144 | v (np.ndarray): Input vertical flow of shape [H,W]
145 | convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
146 | Returns:
147 | np.ndarray: Flow visualization image of shape [H,W,3]
148 | """
149 | flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
150 | ncols = self.colorwheel.shape[0]
151 | rad = np.sqrt(np.square(u) + np.square(v))
152 | a = np.arctan2(-v, -u) / np.pi
153 | fk = (a + 1) / 2 * (ncols - 1)
154 | k0 = np.floor(fk).astype(int)
155 | k1 = k0 + 1
156 | k1[k1 == ncols] = 0
157 | f = fk - k0
158 | for i in range(self.colorwheel.shape[1]):
159 | tmp = self.colorwheel[:, i]
160 | col0 = tmp[k0] / 255.0
161 | col1 = tmp[k1] / 255.0
162 | col = (1 - f) * col0 + f * col1
163 | idx = (rad <= 1)
164 | col[idx] = 1 - rad[idx] * (1 - col[idx])
165 | col[~idx] = col[~idx] * 0.75 # out of range
166 | # Note the 2-i => BGR instead of RGB
167 | ch_idx = 2 - i if convert_to_bgr else i
168 | flow_image[:, :, ch_idx] = np.floor(255 * col)
169 | return flow_image
170 |
171 | def flow_to_color(self, flow_uv, clip_flow=None, convert_to_bgr=False):
172 | """
173 | Expects a two dimensional flow image of shape.
174 | Args:
175 | flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
176 | clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
177 | convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
178 | Returns:
179 | np.ndarray: Flow visualization image of shape [H,W,3]
180 | """
181 | assert flow_uv.ndim == 3, 'input flow must have three dimensions'
182 | assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
183 | if clip_flow is not None:
184 | flow_uv = np.clip(flow_uv, 0, clip_flow)
185 | u = flow_uv[:, :, 0]
186 | v = flow_uv[:, :, 1]
187 | rad = np.sqrt(np.square(u) + np.square(v))
188 | rad_max = np.max(rad)
189 | epsilon = 1e-5
190 | u = u / (rad_max + epsilon)
191 | v = v / (rad_max + epsilon)
192 | return self.flow_uv_to_colors(u, v, convert_to_bgr)
193 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserPoint3.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | Massimiliano Iacono
6 |
7 | This program is free software: you can redistribute it and/or modify it under
8 | the terms of the GNU General Public License as published by the Free Software
9 | Foundation, either version 3 of the License, or (at your option) any later version.
10 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
12 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 | You should have received a copy of the GNU General Public License along with
14 | this program. If not, see .
15 |
16 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
17 | Functionality for serving images which represent the data at a certain time
18 | (or given a certain time window).
19 | The intended use case is to support video-like playback of data
20 | There is a generic Visualiser class - it contains a dataType dict.
21 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
22 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
23 | Implementing set_data allows the visualiser to do some preparation for visualisation
24 | when it receives new data.
25 |
26 | VisualisePoint3 shows a basic point cloud containing every point in the dict
27 | with a timestamp within the time window.
28 | The world (everywhere the points reach in the whole data) is compressed into
29 | a rectangle which is then viewed from the outside as if the world reference
30 | frame were aligned with a standard camera reference frame,
31 | so z increases away from the viewer, x increases to the right,
32 | and y in a downwards direction.
33 | The user has a choice to apply perspective (scale decreases away from the
34 | viewer) or not.
35 | There are also pitch and yaw controls - this alters the basic viewpoint of
36 | the viewer described above.
37 | """
38 |
39 | import numpy as np
40 |
41 | # Local imports
42 | from .visualiserBase import Visualiser
43 |
44 |
45 | class VisualiserPoint3(Visualiser):
46 | renderX = 300 # TODO Hardcoded
47 | renderY = 300
48 | labels = None
49 | data_type = 'point3'
50 |
51 | def __init__(self, data):
52 | self.set_data(data)
53 | self.smallestRenderDim = min(self.renderX, self.renderY)
54 |
55 | '''
56 | Offset and scale the pose translations so that they all fit into the volume:
57 | x [-0.5:0.5]
58 | y[-0.5:0.5]
59 | z[1:2]
60 | '''
61 |
62 | def set_data(self, data):
63 | # scale and offset point data so that it remains proportional
64 | # but stays in the range 0-1 for all dimensions
65 | pointX = data['point'][:, 0]
66 | pointY = data['point'][:, 1]
67 | pointZ = data['point'][:, 2]
68 | minX = np.min(pointX)
69 | maxX = np.max(pointX)
70 | minY = np.min(pointY)
71 | maxY = np.max(pointY)
72 | minZ = np.min(pointZ)
73 | maxZ = np.max(pointZ)
74 | centreX = (minX + maxX) / 2
75 | centreY = (minY + maxY) / 2
76 | centreZ = (minZ + maxZ) / 2
77 | largestDim = max(maxX - minX, maxY - minY, maxZ - minZ)
78 | if largestDim == 0:
79 | largestDim = 1
80 |
81 | pointScaled = np.empty_like(data['point'])
82 | pointScaled[:, 0] = pointX - centreX
83 | pointScaled[:, 1] = pointY - centreY
84 | pointScaled[:, 2] = pointZ - centreZ
85 | pointScaled = pointScaled / largestDim + 0.5
86 | internalData = {'ts': data['ts'],
87 | 'point': pointScaled
88 | }
89 | self._data = internalData
90 |
91 | def project3dTo2d(self, x=0, y=0, z=0, **kwargs):
92 | smallestRenderDim = kwargs.get('smallestRenderDim', 1)
93 | windowFill = kwargs.get('windowFill', 0.9)
94 | if kwargs.get('perspective', True):
95 | # Move z out by 1, so that the data is between 1 and 2 distant in z
96 | # x and y are in range 0-1, so they get shifted to be centred around 0 during this operation
97 | x = (x - 0.5) / (z + 1) + 0.5
98 | y = (y - 0.5) / (z + 1) + 0.5
99 | x = (x * windowFill + (1 - windowFill) / 2) * smallestRenderDim
100 | y = (y * windowFill + (1 - windowFill) / 2) * smallestRenderDim
101 | x = x.astype(int)
102 | y = y.astype(int)
103 | return x, y
104 |
105 | def point_to_image(self, point, image, **kwargs):
106 | if point is None:
107 | return image
108 | # Unpack
109 | pointX = point[0]
110 | pointY = point[1]
111 | pointZ = point[2]
112 | # Project the location
113 | projX, projY = self.project3dTo2d(x=pointX, y=pointY, z=pointZ,
114 | smallestRenderDim=self.smallestRenderDim, **kwargs)
115 | try:
116 | image[projY, projX, :] = 255
117 | except IndexError: # perspective or other projection issues cause out of bounds? ignore
118 | pass
119 | return image
120 |
121 | def get_frame(self, time, timeWindow, **kwargs):
122 | data = self._data
123 | if data is None:
124 | print('Warning: data is not set')
125 | return np.zeros((1, 1), dtype=np.uint8) # This should not happen
126 | image = np.zeros((self.renderY, self.renderX, 3), dtype=np.uint8)
127 | # Put a grey box around the edge of the image
128 | image[0, :, :] = 128
129 | image[-1, :, :] = 128
130 | image[:, 0, :] = 128
131 | image[:, -1, :] = 128
132 | # Put a grey crosshair in the centre of the image
133 | rY = self.renderY
134 | rX = self.renderY
135 | chp = 20 # Cross Hair Proportion for following expression
136 | image[int(rY / 2 - rY / chp): int(rY / 2 + rY / chp), int(rX / 2), :] = 128
137 | image[int(rY / 2), int(rX / 2 - rX / chp): int(rX / 2 + rX / chp), :] = 128
138 | firstIdx = np.searchsorted(data['ts'], time - timeWindow)
139 | lastIdx = np.searchsorted(data['ts'], time + timeWindow)
140 | points = data['point'][firstIdx:lastIdx, :]
141 | # Use yaw and pitch sliders to transform points
142 | yaw = -kwargs.get('yaw', 0) / 180 * np.pi
143 | pitch = kwargs.get('pitch', 0) / 180 * np.pi
144 | roll = 0
145 | cosA = np.cos(roll)
146 | cosB = np.cos(yaw)
147 | cosC = np.cos(pitch)
148 | sinA = np.sin(roll)
149 | sinB = np.sin(yaw)
150 | sinC = np.sin(pitch)
151 | rotMat = np.array([[cosA * cosB, cosA * sinB * sinC - sinA * cosC, cosA * sinB * cosC + sinA * sinC],
152 | [sinA * cosB, sinA * sinB * sinC + cosA * cosC, sinA * sinB * cosC - cosA * sinC],
153 | [-sinB, cosB * sinC, cosB * cosC]],
154 | dtype=float)
155 | points = points - 0.5
156 | points = np.matmul(rotMat, points.transpose()).transpose()
157 | points = points + 0.5
158 |
159 | for row in points:
160 | image = self.point_to_image(row, image, **kwargs)
161 |
162 | # Allow for arbitrary post-production on image with a callback
163 | # TODO: as this is boilerplate, it could be pushed into pie syntax ...
164 | if kwargs.get('callback', None) is not None:
165 | kwargs['image'] = image
166 | image = kwargs['callback'](**kwargs)
167 | return image
168 |
169 | def get_dims(self):
170 | return self.renderX, self.renderY
171 |
172 | def get_colorfmt(self):
173 | return 'rgb'
174 |
175 | def get_settings(self):
176 | settings = {'perspective': {'type': 'boolean',
177 | 'default': True
178 | },
179 | 'yaw': {'type': 'range',
180 | 'default': 0,
181 | 'min': -90,
182 | 'max': 90,
183 | 'step': 1
184 | },
185 | 'pitch': {'type': 'range',
186 | 'default': 0,
187 | 'min': -90,
188 | 'max': 90,
189 | 'step': 1
190 | }}
191 |
192 | return settings
193 |
--------------------------------------------------------------------------------
/bimvee/visualisers/visualiserSkeleton.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2021 Event-driven Perception for Robotics
4 | Authors: Franco Di Pietro
5 | Sim Bamford
6 | Massimiliano Iacono
7 |
8 | This program is free software: you can redistribute it and/or modify it under
9 | the terms of the GNU General Public License as published by the Free Software
10 | Foundation, either version 3 of the License, or (at your option) any later version.
11 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
13 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
14 | You should have received a copy of the GNU General Public License along with
15 | this program. If not, see .
16 |
17 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
18 | Functionality for serving images which represent the data at a certain time
19 | (or given a certain time window).
20 | The intended use case is to support video-like playback of data
21 | There is a generic Visualiser class - it contains a dataType dict.
22 | This is subclassed for each supported dataType (e.g. dvs, frame, pose etc)
23 | Each subclass should implement basic methods: get_dims, get_frame, get_colorfmt
24 | Implementing set_data allows the visualiser to do some preparation for visualisation
25 | when it receives new data.
26 |
27 | VisualiseDvs creates an event frame centred at time and of size timeWindow.
28 | The user may set the amount of contrast (concurrent events in a pixel to reach
29 | full colour) and whether to render polarity (in which case ON and OFF events
30 | are counted against each other).
31 | """
32 |
33 | import numpy as np
34 | from scipy import ndimage
35 | import math
36 | # Local imports
37 | from ..plotDvsContrast import getEventImageForTimeRange
38 | from .visualiserBase import Visualiser
39 |
40 |
41 | def findNearest(array, value):
42 | idx = np.searchsorted(array, value) # side="left" param is the default
43 | if idx > 0 and (idx == len(array) or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])):
44 | return idx - 1
45 | else:
46 | return idx
47 |
48 |
49 | class VisualiserSkeleton(Visualiser):
50 | data_type = 'skeleton'
51 |
52 | def point_to_image(self, xS, yS, image, sktN, **kwargs):
53 | if xS is None:
54 | return image
55 | rad = 1 # radio for each joint point - hardcoded: could be a parameter
56 | if (sktN == 0):
57 | col = [255, 0, 0]
58 | elif (sktN == 1):
59 | col = [0, 0, 255]
60 | else:
61 | col = [0, 128, 0]
62 | try:
63 | for i in range(-rad, rad):
64 | for j in range(-rad, rad):
65 | image[yS + i, xS + j, :] = col
66 | except IndexError:
67 | pass
68 | return image
69 |
70 | def get_frame(self, time, timeWindow, **kwargs):
71 | if not kwargs.get('show_skeleton'):
72 | return None
73 | data = self._data
74 | kwargs['startTime'] = time - timeWindow / 2
75 | kwargs['stopTime'] = time + timeWindow / 2
76 |
77 | # firstIdx = np.searchsorted(data[skt]['ts'], time - timeWindow)
78 | lastIdx = np.searchsorted(data['ts'], time + timeWindow) - 1
79 | outData = {}
80 | for key in data:
81 | if key != 'ts' and key != 'tsOffset':
82 | outData[key] = [data[key][lastIdx, 0],
83 | data[key][lastIdx, 1]]
84 |
85 | return outData
86 |
87 | def get_settings(self):
88 | # settings[data_type] = {'type': 'boolean',
89 | # 'default': True
90 | # }
91 | settings = {'show_skeleton': {'type': 'boolean',
92 | 'default': True},
93 | 'show_labels': {'type': 'boolean',
94 | 'default': False
95 | }
96 | }
97 |
98 | # ,
99 | # 'zoom': {'type': 'boolean',
100 | # 'default': False
101 | # },
102 | # 'zoomFactor': {'type': 'range',
103 | # 'default': 9,
104 | # 'min': 4,
105 | # 'max': 15,
106 | # 'step': 1
107 | # },
108 | # 'jointZoom': {'type': 'value_list',
109 | # 'default': list(
110 | # self._data[
111 | # 'gt'].keys())[0],
112 | # 'values': self._data[
113 | # 'gt'].keys()
114 | # }
115 | # }
116 |
117 | return settings
118 |
--------------------------------------------------------------------------------
/examples/examples.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2019 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | This script contains a set of examples of how to use the functions of the bimvee library.
16 | In each case, change the file paths as required to point toyour own example data.
17 | """
18 |
19 | #%% Preliminaries - set your paths as necessary, or change working directory
20 |
21 | import os, sys # A system-specific prefix, for working between linux and windows
22 | prefix = 'C:/' if os.name == 'nt' else '/home/sbamford/'
23 | sys.path.insert(0, os.path.join(prefix, 'repos/bimvee'))
24 |
25 | #%% Import from yarp
26 |
27 | from importIitYarp import importIitYarp
28 |
29 | filePathOrName = os.path.join(prefix, "data/2019_11_11_AikoImu/tripod_pitch")
30 | # If the number of bits dedicated to the timestamp have been limited
31 | # prior to dumping, then match this with the 'tsBits' parameter
32 | imported = importIitYarp(filePathOrName=filePathOrName, tsBits=30)
33 |
34 | #%% Inspection of a rosbag
35 | '''
36 | To just inspect the connections of a rosbag file , pass in an empty template.
37 | This doesn't import any data but prints out which connections are present
38 | in the file. You can then construct a template to import the file as desired.
39 | '''
40 |
41 | from importRpgDvsRos import importRpgDvsRos
42 | filePathOrName = os.path.join(prefix, 'data/rpg/shapes_rotation.bag')
43 | inspected = importRpgDvsRos(filePathOrName=filePathOrName)
44 |
45 | #%% Import Rpg Event-Camera Dataset
46 |
47 | # http://rpg.ifi.uzh.ch/davis_data.html
48 | from importRpgDvsRos import importRpgDvsRos
49 |
50 | filePathOrName = os.path.join(prefix, 'data/rpg/shapes_rotation.bag')
51 |
52 | template = {
53 | 'ch0': {
54 | 'dvs': '/dvs/events',
55 | 'frame': '/dvs/image_raw',
56 | 'pose6q': '/optitrack/davis',
57 | 'cam': '/dvs/camera_info',
58 | 'imu': '/dvs/imu'
59 | }
60 | }
61 |
62 | imported = importRpgDvsRos(filePathOrName=filePathOrName, template=template)
63 |
64 | #%% import Penn MVSEC
65 |
66 | from importPennMvsec import importPennMvsecDavis, importPennMvsecGt
67 |
68 |
69 | filePathOrName = os.path.join(prefix, 'data/mvsec/indoor_flying1_data.bag')
70 | '''
71 | Optionally, override the default template ...
72 | template = {
73 | 'left': {
74 | 'dvs': '/davis/left/events',
75 | 'frame': '/davis/left/image_raw',
76 | 'imu': '/davis/left/imu',
77 | 'cam': '/davis/left/camera_info',
78 | }, 'right': {
79 | 'dvs': '/davis/right/events',
80 | 'frame': '/davis/right/image_raw',
81 | 'imu': '/davis/right/imu',
82 | 'cam': '/davis/right/camera_info',
83 | }
84 | }
85 | '''
86 | importedDavis = importPennMvsecDavis(filePathOrName=filePathOrName)
87 |
88 | filePathOrName = os.path.join(prefix, 'data/mvsec/indoor_flying1_gt.bag')
89 | importedGt = importPennMvsecGt(filePathOrName=filePathOrName)
90 |
91 | #imported = [importedDavis, importedGt]
92 |
93 | # Rafactor into single container
94 | container = importedDavis
95 | container['data'].update(importedGt['data'])
96 |
97 |
98 | #%% Import realsense
99 |
100 | from importIntelRealsense import importIntelRealsense
101 |
102 | filePathOrName = os.path.join(prefix, '/data/2019_10_23_static/20191023_165520.bag')
103 |
104 | imported = importIntelRealsense(filePathOrName=filePathOrName)
105 |
106 | #%% MANIPULATION FUNCTIONS
107 |
108 | #%% Cropping a dataset to a desired time range
109 |
110 | from split import cropTime
111 |
112 | cropped = cropTime(imported, minTime=35, maxTime=38.5)
113 |
114 | #%% Cropping a dataset to a desired spatial range
115 | # works for dvs and derived data types 2020_01 doesn't yet work for frame datatype
116 |
117 | from split import cropSpace
118 |
119 | # This example takes all events with x in 9-19 inclusive, and with y in 0-9 inclusive
120 | cropped = cropSpace(imported, minX=9, maxX=19, maxY= 9)
121 |
122 | #%% Splitting a dataset by labels
123 |
124 | from split import splitByLabel, selectByLabel
125 |
126 | # select your labelled data from an import (alternatively label it using some processing)
127 | labelledData = imported['data']['right']['dvslbl']
128 |
129 | splitData = splitByLabel(labelledData, 'lbl') # 'lbl' in this case is the name of the field that contains the labels
130 |
131 | # Alternatively, select a single label
132 |
133 | selectedData = selectByLabel(labelledData, 'lbl', 3) # in this case, 3 is the label you want to isolate
134 |
135 | #%% VISUALISATION FUNCTIONS
136 |
137 | #%% Info
138 |
139 | from info import info
140 |
141 | info(imported)
142 |
143 | #%% Timestamp info only
144 |
145 | from info import infoTsForImportedDicts
146 |
147 | infoTsForImportedDicts(imported)
148 |
149 | #%% General function for plotting data present in channels according to its type
150 |
151 | from plot import plot
152 |
153 | plot(imported, zeroT=True, polarised=True)
154 |
155 | #%% More specific example of taking particular datatypes from an import and
156 | # visualising them
157 |
158 | from plotDvsContrast import plotDvsContrast
159 | from plotFrame import plotFrame
160 | from plotPose import plotPose
161 |
162 | # Unpack
163 | cam = imported['data']['davis']['cam']
164 | events = imported['data']['davis']['dvs']
165 | frames = imported['data']['davis']['frame']
166 | poses = imported['data']['extra']['pose6q']
167 | if 'frame' in imported['data']['extra']:
168 | depthMaps = imported['data']['extra']['frame']
169 | else:
170 | depthMaps = None
171 |
172 | plotDvsContrast(events, numPlots=2)
173 | plotFrame(frames, numPlots=15)
174 | plotPose(poses)
175 | # Using 'frame' datatype to import and visualise depth maps
176 | if depthMaps:
177 | plotFrame(depthMaps, numPlots=2)
178 |
179 |
180 | #%% Distribute the sub-plots in a visualisation of dvs event images
181 | # by event density rather than time
182 |
183 | from plotDvsContrast import plotDvs
184 |
185 | plotDvs(imported, numPlots=6, distributeBy='events')
186 |
187 | #%% This function for visualising dvs surface-of-active-events-(SAE)-like
188 | # visualisation is not included in standard plot function.
189 |
190 | from plotDvsLastTs import plotDvsLastTs
191 |
192 | plotDvsLastTs(imported)
193 |
194 | #%% EXPORT FUNCTIONS
195 |
196 | #%% Export to yarp
197 |
198 | from exportIitYarp import exportIitYarp
199 |
200 | exportIitYarp(imported,
201 | exportFilePath= 'C:/data/mvsec/indoorFlying1Yarp',
202 | pathForPlayback= '/home/sbamford/data/mvsec/indoorFlying1Yarp')
203 |
204 |
205 | #%% Choose to export only specific datatypes;
206 | # overwrite data if the export is already there
207 |
208 | from exportIitYarp import exportIitYarp
209 |
210 | exportIitYarp(imported,
211 | exportFilePath= 'C:/data/rpg/shapes_rotation',
212 | pathForPlayback= '/home/sbamford/data/rpg/shapes_rotation',
213 | dataTypes = ['imu', 'dvs'],
214 | overwrite=True)
215 |
--------------------------------------------------------------------------------
/examples/examplesBatchImportExport.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Simeon Bamford, Aiko Dinale
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | This script contains a set of examples of how to import and export data
16 | from and to (large) files, one batch at a time.
17 | In each case, change the file paths as required to point to your own example data.
18 | """
19 |
20 | #%% Preliminaries
21 |
22 | import os, sys # A system-specific prefix, for working between linux and windows
23 | prefix = 'C:/' if os.name == 'nt' else '/home/adinale/'
24 | # A path to the location of bimvee library, if it is not installed
25 | sys.path.append(os.path.join(prefix, 'src/event-driven/python/libraries'))
26 |
27 | #%%
28 |
29 | from bimvee.importIitYarp import importIitYarp
30 | from bimvee.exportIitYarp import exportIitYarp
31 | from bimvee.info import info
32 | from bimvee.timestamps import offsetTimestampsForAContainer
33 |
34 | dataset_root = "Documents/Event-Driven_Dataset/datasetIMU/roundTable/"
35 | dataset_name = "roundTable_6DOF_004"
36 | filePathOrName = os.path.join(prefix, dataset_root, dataset_name)
37 |
38 | #%%
39 | # Read a first batch
40 |
41 | container1 = importIitYarp(filePathOrName=filePathOrName,
42 | tsBits=30,
43 | convertSamplesToImu=False,
44 | importFromByte=0,
45 | importMaxBytes=1000000)
46 | info(container1)
47 |
48 | importedToByte = container1['info']['importedToByte']
49 | tsOffsetContainer1 = container1['info']['tsOffsetFromData']
50 | #tsOffsetFromInfo = container1['info']['tsOffsetFromInfo']
51 |
52 | # Export first batch
53 |
54 | exportIitYarp(container1,
55 | exportFilePath= filePathOrName + "_batch01",
56 | pathForPlayback= filePathOrName + "_batch01",
57 | dataTypes = ['sample', 'dvs'],
58 | protectedWrite = False)
59 |
60 | #%%
61 |
62 | # Read a second batch
63 |
64 | container2 = importIitYarp(filePathOrName=filePathOrName,
65 | tsBits = 30,
66 | convertSamplesToImu=False,
67 | importFromByte=importedToByte+1,
68 | importMaxBytes=1000000)
69 |
70 | exportIitYarp(container2,
71 | exportFilePath= filePathOrName + "_batch02",
72 | pathForPlayback= filePathOrName + "_batch02",
73 | dataTypes = ['sample', 'dvs'],
74 | protectedWrite = False)
75 |
76 | #%%
77 |
78 | # Offset timestamps in the second batch
79 |
80 | tsOffsetContainer2 = container2['info']['tsOffsetFromData']
81 | tsOffset = container1['info']['tsOffsetFromData']
82 | offsetToApplyToContainer2 = tsOffsetContainer1 - tsOffsetContainer2
83 | offsetTimestampsForAContainer(container2, offsetToApplyToContainer2)
84 | container2['info']['tsOffsetFromData'] += offsetToApplyToContainer2
85 | info(container2)
86 |
87 | # Export first and second batches
88 |
89 | exportIitYarp(container1,
90 | exportFilePath= filePathOrName + "_batch12",
91 | pathForPlayback= filePathOrName + "_batch12",
92 | dataTypes = ['sample', 'dvs'],
93 | protectedWrite = False)
94 |
95 | exportIitYarp(container2,
96 | exportFilePath= filePathOrName + "_batch12",
97 | pathForPlayback= filePathOrName + "_batch12",
98 | dataTypes = ['sample', 'dvs'],
99 | protectedWrite = False,
100 | writeMode = 'a')
101 |
--------------------------------------------------------------------------------
/examples/examplesImportThirdPartyDatasets.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
15 | This script contains a set of examples of how to use the import functions of
16 | the bimvee library.
17 | In each case, change the file paths as required to point to your own data.
18 | """
19 |
20 | #%% import from intel realsense rosbag dump
21 |
22 | from bimvee.importRpgDvsRos import importRpgDvsRos
23 |
24 | filePathOrName = '/path/to/data.bag'
25 |
26 | # You can just import straight away like this:
27 | container = importRpgDvsRos(filePathOrName=filePathOrName)
28 |
29 | # Alternatively, you can define a template and get just the topics you want
30 | template = {
31 | 'pose': {
32 | 'pose6q': '/device_0/sensor_0/Pose_0/pose/transform/data',
33 | },
34 | 'left': {
35 | 'frame': '/device_0/sensor_0/Fisheye_1/image/data',
36 | 'cam': '/device_0/sensor_0/Fisheye_1/info/camera_info',
37 | },
38 | 'right': {
39 | 'frame': '/device_0/sensor_0/Fisheye_2/image/data',
40 | 'cam': '/device_0/sensor_0/Fisheye_2/info/camera_info',
41 | }
42 | }
43 | container = importRpgDvsRos(filePathOrName=filePathOrName, template=template)
44 |
45 | #%% Import the MVSEC dataset
46 |
47 | from bimvee.importRpgDvsRos import importRpgDvsRos
48 |
49 | '''
50 | The dataset is available here:
51 | https://daniilidis-group.github.io/mvsec/download/
52 | First download the rosbag files.
53 | '''
54 | # This is for the first data file
55 | filePathOrName = '/path/to/indoor_flying1_data.bag'
56 | filePathOrName = 'C:/data/mvsec/indoor_flying1_data.bag'
57 | template = {
58 | 'davisLeft': {
59 | 'dvs': '/davis/left/events',
60 | 'frame': '/davis/left/image_raw',
61 | 'imu': '/davis/left/imu',
62 | 'cam': '/davis/left/camera_info',
63 | }, 'davisRight': {
64 | 'dvs': '/davis/right/events',
65 | 'frame': '/davis/right/image_raw',
66 | 'imu': '/davis/right/imu',
67 | 'cam': '/davis/right/camera_info',
68 | }
69 | }
70 | containerData = importRpgDvsRos(filePathOrName=filePathOrName, template=template)
71 |
72 | #%%
73 | # This is for the corresponding ground-truth file
74 | filePathOrName = '/path/to/indoor_flying1_gt.bag'
75 | filePathOrName = 'C:/data/external/mvsec/indoor_flying1_gt.bag'
76 | template = {
77 | 'poseLocal': {
78 | 'pose6q': '/davis/left/odometry',
79 | }, 'poseGlobal': {
80 | 'pose6q': '/davis/left/pose',
81 | }, 'depthLeft': {
82 | 'frame': '/davis/left/depth_image_raw',
83 | }, 'depthRight': {
84 | 'frame': '/davis/right/depth_image_raw',
85 | }
86 | }
87 | #containerGt = importRpgDvsRos(filePathOrName=filePathOrName, template=template)
88 | containerGt = importRpgDvsRos(filePathOrName=filePathOrName)
89 |
90 | # If you want, you can combine these containers into a single container;
91 | # In this case, the 'info' branch becomes inconsistent, but it doesn't really matter
92 |
93 |
94 | container = containerData
95 | container['data'].update(containerGt['data'])
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/examples/examplesPoseAndVicon.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2020 Event-driven Perception for Robotics
4 | Authors: Simeon Bamford
5 | Suman Ghosh
6 | This program is free software: you can redistribute it and/or modify it under
7 | the terms of the GNU General Public License as published by the Free Software
8 | Foundation, either version 3 of the License, or (at your option) any later version.
9 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
10 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
11 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 | You should have received a copy of the GNU General Public License along with
13 | this program. If not, see .
14 |
15 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation and Export of Events etc)
16 | This script contains a set of examples of how to use the 'pose6q' data type
17 | of the bimvee library.
18 | In each case, change the file paths as required to point to your own example data.
19 | """
20 |
21 | #%% Preliminaries
22 |
23 | import os, sys # A system-specific prefix, for working between linux and windows
24 | prefix = 'C:/' if os.name == 'nt' else '/home/sbamford/'
25 | # A path to the location of this library, if not installed as a package
26 | sys.path.append(os.path.join(prefix, 'repos/event-driven-poseExamples/python/libraries'))
27 | sys.path.append(os.path.join(prefix, 'repos/event-driven/python'))
28 |
29 | #%% Import with all bodies in the same channel
30 |
31 | from bimvee.importIitVicon import importIitVicon
32 |
33 | viconDataDict = importIitVicon(filePathOrName=filePathOrName)
34 | if 'uniqueIds' in viconDataDict['data']['vicon']['pose6q']:
35 | print('The parsed body IDs are: ', viconDataDict['data']['vicon']['pose6q']['uniqueIds'])
36 |
37 | #%% Import with all bodies in the same channel, with markers separated from segments
38 |
39 | from bimvee.importVicon import importVicon
40 |
41 | filePathOrName = os.path.join(prefix, '/data/2019_12_12_vicon/Trial2WithVicon/Vicon/data.log')
42 | viconDataDict = importIitVicon(filePathOrName=filePathOrName, separateMarkersFromSegments=True)
43 | if 'uniqueIds' in viconDataDict['data']['vicon']['pose6q']:
44 | print('The parsed body IDs are: ', viconDataDict['data']['vicon']['pose6q']['uniqueIds'])
45 | if 'uniqueIds' in viconDataDict['data']['vicon']['point3']:
46 | print('The parsed marker IDs are: ', viconDataDict['data']['vicon']['point3']['uniqueIds'])
47 |
48 | #%% Import with each body as separate channel
49 |
50 | from bimvee.importIitVicon import importIitVicon
51 |
52 | filePathOrName = os.path.join(prefix, '/data/2019_12_12_vicon/Trial2WithVicon/Vicon/data.log')
53 | kwargs = {'filePathOrName': filePathOrName,
54 | 'separateBodiesAsChannels': True}
55 | viconDataDict = importIitVicon(**kwargs)
56 | if 'uniqueIds' in viconDataDict['info']:
57 | print('The parsed body IDs are: ', viconDataDict['info']['uniqueIds'])
58 |
59 | #%%
60 |
61 | from bimvee.plotPose import plotTrajectories
62 | import matplotlib.pyplot as plt
63 | from mpl_toolkits import mplot3d
64 |
65 | uniqueIds = viconDataDict['info']['uniqueIds']
66 |
67 | # StEFI body trajectory
68 | fig = plt.figure(1)
69 | ax = plt.axes(projection='3d')
70 | include = ['StEFI']
71 | exclude = ['Marker']
72 | plotTrajectories(viconDataDict, uniqueIds, include, exclude, ax=ax)
73 |
74 | # StEFI labeled markers trajectory
75 | fig = plt.figure(2)
76 | ax = plt.axes(projection='3d')
77 | include = ['StEFI', 'Marker']
78 | exclude = []
79 | plotTrajectories(viconDataDict, uniqueIds, include, exclude, ax=ax)
80 |
81 | # Unlabeled markers trajectory
82 | fig = plt.figure(3)
83 | ax = plt.axes(projection='3d')
84 | include = ['UnlMarker']
85 | exclude = []
86 | plotTrajectories(viconDataDict, uniqueIds, include, exclude, ax=ax)
87 |
88 | plt.show()
89 |
90 |
91 | #%% Import poses from Vicon and select only one body for further analysis
92 |
93 | from bimvee.importIitVicon import importIitVicon
94 | from bimvee.split import selectByLabel
95 |
96 | filePathOrName = os.path.join(prefix, '/data/2019_12_12_vicon/Trial2WithVicon/Vicon/data.log')
97 | kwargs = {'filePathOrName': filePathOrName,
98 | 'separateBodiesAsChannels': False}
99 | viconDataDict = importIitVicon(**kwargs)
100 | posesForSelectedBody = selectByLabel(viconDataDict['data']['vicon']['pose6q'], 'bodyId', 'Subj_StEFI::Seg_body')
101 | del posesForSelectedBody['uniqueIds']
102 |
103 | dataDict = {'pose6q': posesForSelectedBody}
104 |
105 | #%% Remove erroneous pose samples
106 |
107 | # We observe null poses in our data, elimnate these. In practice, these are set
108 | # to point=0,0,0 rotation=0,0.5,0,0 so one way to tell that they're
109 | # erroneous is to test for magnitude != unity
110 |
111 | import numpy as np
112 |
113 | rotation = dataDict['pose6q']['rotation']
114 | magnitude = np.sum(rotation ** 2, axis = 1)
115 | toKeep = magnitude == 1
116 |
117 | dataDict['pose6q']['toKeep'] = toKeep
118 | dataDict['pose6q'] = selectByLabel(dataDict['pose6q'], 'toKeep', True)
119 |
120 | #%% Interpolate poses - make sure there is at least one sample every 10 ms
121 |
122 | from bimvee.geometry import pose6qInterp
123 |
124 | dataDict['pose6q'] = pose6qInterp(dataDict['pose6q'], maxPeriod=0.005)
125 |
126 |
127 | #%% Construct a pose sequence by hand
128 |
129 | '''
130 | This example section makes a series of movements, with range of about 1m,
131 | over a period of 4 seconds, which between them, cover all basic degrees of
132 | freedom in translation and rotation; it produces a sample every 10 ms.
133 | '''
134 |
135 | import numpy as np
136 |
137 | ts = np.arange(0, 4.01, 0.01, dtype=float)
138 | point = np.zeros((401, 3), dtype=float)
139 | rotation = np.zeros((401, 4), dtype=float)
140 | rotation[:, 0] = 1 # Neutral quaternion pose
141 |
142 | # First: yaw-through-pitch anticlockwise
143 |
144 | rotation[:100, 1] = np.sin(ts[:100] * np.pi * 2) / 4
145 | rotation[:100, 2] = (np.cos(ts[:100] * np.pi * 2) - 1) / 4
146 | rotation[:100, 0] = np.sqrt(1 - (rotation[:100, 1] ** 2 + rotation[:100, 2] ** 2))
147 |
148 | # Second: translation anticlockwise:
149 |
150 | point[100:200, 0] = np.sin(ts[100:200] * np.pi * 2) / 2
151 | point[100:200, 1] = (np.cos(ts[100:200] * np.pi * 2) - 1) / 2
152 |
153 | # Third: Out-in along z.
154 |
155 | point[200:250, 2] = np.arange(0, -1, -0.02)
156 | point[250:300, 2] = np.arange(-1, 0, 0.02)
157 |
158 | # Fourth: roll anticlockwise around z:
159 |
160 | rotation[300:350, 3] = -np.sqrt(np.arange(0, 1, 0.02))
161 | rotation[300:350, 0] = np.sqrt(np.arange(1, 0, -0.02))
162 | rotation[350:400, 3] = np.sqrt(np.arange(1, 0, -0.02))
163 | rotation[350:400, 0] = np.sqrt(np.arange(0, 1, 0.02))
164 |
165 | # Optionally, add an arbitrary translational shift to the whole sequence
166 |
167 | point[:, 2] = point[:, 2] - 3
168 | point[:, 1] = point[:, 1] + 3
169 |
170 | # Construct the result as a bimvee-style container
171 | dataDict = {
172 | 'pose6q': {
173 | 'ts': ts,
174 | 'point': point,
175 | 'rotation': rotation
176 | }}
177 |
178 | #%%
179 | '''
180 | The above poses are from an initial viewpoint of the camera.
181 | We now convert to the coordinate frame of the simluated world:
182 | The camera is at x=1, y=0, z=1.5, so it's in front of the wall where the square is,
183 | And it's oriented with:
184 | camera z along world -x,
185 | camera x along y
186 | camera y along -z
187 | '''
188 |
189 | from bimvee.geometry import combineTwoQuaternions
190 | from pyquaternion import Quaternion as Q
191 |
192 | # Use pyquaternion library to give us a quaternion starting with a rotation matrix
193 | cameraToWorldRotMat = np.array([[0, 0, -1], [1, 0, 0], [0, -1, 0]], dtype=float)
194 | cameraToWorldQ = Q(matrix = cameraToWorldRotMat).q
195 |
196 | rotationNew = np.zeros_like(rotation)
197 |
198 | for idx in range(401):
199 | rotationNew[idx, :] = combineTwoQuaternions(rotation[idx, :], cameraToWorldQ)
200 |
201 | # Same business for the points
202 |
203 | pointNew = np.zeros_like(point)
204 | pointNew[:, 0] = -point[:, 2] + 1
205 | pointNew[:, 1] = point[:, 0]
206 | pointNew[:, 2] = -point[:, 1] + 1.5
207 |
208 | dataDict['pose6q']['point'] = pointNew
209 | dataDict['pose6q']['rotation'] = rotationNew
210 |
211 | #%% Try to visualise that
212 |
213 | import visualizer.ntupleviz
214 | import threading
215 |
216 | # Create the dualViz app and start it in a thread
217 | visualizerApp = visualizer.ntupleviz.Ntupleviz()
218 | thread = threading.Thread(target=visualizerApp.run)
219 | thread.daemon = True
220 | thread.start()
221 |
222 | #%% Visualise
223 |
224 | visualizerApp.root.data_controller.data_dict = dataDict
225 |
226 | #%% Export a posedict as csv for eSim simulator
227 |
228 | from bimvee.exportPoseRpgEsimCsv import exportPoseRpgEsimCsv
229 |
230 | filePathAndName = os.path.join(prefix, 'data/poses.csv')
231 | exportPoseRpgEsimCsv(dataDict['pose6q'], filePathAndName=filePathAndName)
232 |
233 | #%% Import the resulting simulation
234 |
235 | from importRpgDvsRos import importRpgDvsRos
236 |
237 | filePathOrName = os.path.join(prefix, 'data/2020_02_13 simulation square/simple_square_far.bag')
238 |
239 | template = {
240 | 'davis': {
241 | 'dvs': '/cam0/events',
242 | 'frame': '/cam0/image_raw',
243 | 'imu': '/imu'
244 | },
245 | 'extra': {
246 | 'pose6q': '/cam0/pose',
247 | 'frame': '/cam0/depthmap',
248 | }}
249 | # 'cam': '/cam0/camera_info',
250 |
251 | imported = importRpgDvsRos(filePathOrName=filePathOrName, template=template, )
252 |
253 | f = {'frame': imported['data']['extra']['frame']}
254 |
--------------------------------------------------------------------------------
/examples/examplesSamplesToEvents.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright (C) 2021 Event-driven Perception for Robotics
4 | Authors: Sim Bamford
5 | This program is free software: you can redistribute it and/or modify it under
6 | the terms of the GNU General Public License as published by the Free Software
7 | Foundation, either version 3 of the License, or (at your option) any later version.
8 | This program is distributed in the hope that it will be useful, but WITHOUT ANY
9 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
10 | PARTICULAR PURPOSE. See the GNU General Public License for more details.
11 | You should have received a copy of the GNU General Public License along with
12 | this program. If not, see .
13 |
14 | Intended as part of bimvee (Batch Import, Manipulation, Visualisation
15 | and Export of Events etc)
16 | This script contains a set of examples of how to use the functions of the
17 | bimvee library.
18 | In each case, change the file paths as required to point to your own example data.
19 | """
20 |
21 | #%% Preliminaries - set your paths as necessary, or change working directory
22 |
23 | import os, sys # A system-specific prefix, for working between linux and windows
24 | prefix = 'C:/' if os.name == 'nt' else '/home/sbamford/'
25 | sys.path.insert(0, os.path.join(prefix, 'repos/bimvee'))
26 |
27 | #%% load some data
28 |
29 | filePathOrName = os.path.join(prefix, 'data/2020_12_10_Ali_SkinExample/simeon_touch_static/samples')
30 |
31 | from bimvee.importAe import importAe
32 |
33 | container = importAe(filePathOrName=filePathOrName)
34 |
35 | #%% Data format
36 |
37 | '''
38 | At this point in the script we want to isolate a dict containing a single
39 | sample datatype. Concretely, we want a dict containing at least:
40 | 'ts' - a numpy array of timestamps of type float, assumed to be seconds,
41 | monotonically increasing.
42 | 'value' (or any other field name) - a numpy array of samples
43 | with one row (the zeroth dimension) for each timestamp, and additional
44 | dimensions sufficient to contain the address space.
45 | For example, if the input is from a camera,
46 | there may be two additional dimensions for greyscale or 3 in case of RGB data.
47 | '''
48 |
49 | inDict = container ['data']['general']['skinSamples']
50 |
51 | #%% convert dense samples to polarised address events
52 |
53 | from bimvee.samplesToEvents import samplesToEvents
54 |
55 | outDict = samplesToEvents(inDict,
56 | valueKey='pressure',
57 | refractoryPeriod = 0.01,
58 | threshold = 40.)
59 |
60 | #%% Visualise the result
61 |
62 | from bimvee.plotSpikeogram import plotSpikeogram
63 |
64 | plotSpikeogram(outDict)
65 |
66 | #%% Try with Simon's recent data
67 |
68 | #for m in range(5):
69 | # Choosing material 4
70 | m = 4
71 |
72 | import pickle
73 | import numpy as np
74 |
75 | file_name = os.path.join(prefix,
76 | 'data',
77 | '2021_03_09_Simon_Tactile',
78 | 'data_material_' + str(m+1)) #material_0 only for testing pipeline (no real data), material_1 - 5 different materials
79 | infile = open(file_name,'rb')
80 | dataDict = pickle.load(infile)
81 |
82 | experimentDict = dataDict[19]
83 | taxelData = experimentDict['taxel_data']
84 | numSamples = taxelData.shape[0]
85 |
86 | # Just choose one taxel
87 | #taxelData = taxelData[:, 6]
88 |
89 | period = 0.01
90 | ts = np.arange(0, numSamples * period, period)
91 |
92 | #%%
93 | from bimvee.samplesToEvents import samplesToEvents
94 |
95 | inDict = {'ts': ts,
96 | 'value': taxelData}
97 | outDict = samplesToEvents(inDict,
98 | refractoryPeriod = 0.01,
99 | threshold = 5.)
100 |
101 | if 'addr' not in outDict:
102 | outDict['addr'] = np.zeros_like(outDict['ts'], dtype=int)
103 |
104 | #%%
105 | from bimvee.plotSpikeogram import plotSpikeogram
106 |
107 | axes = plotSpikeogram(outDict)
108 |
109 | import matplotlib.pyplot as plt
110 |
111 | axes.plot(ts, taxelData/10)
112 |
113 |
114 |
--------------------------------------------------------------------------------
/images/dvslastts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/images/dvslastts.png
--------------------------------------------------------------------------------
/images/eventrate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/images/eventrate.png
--------------------------------------------------------------------------------
/images/events.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/images/events.png
--------------------------------------------------------------------------------
/images/frames.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/images/frames.png
--------------------------------------------------------------------------------
/images/imu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/images/imu.png
--------------------------------------------------------------------------------
/images/pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/event-driven-robotics/bimvee/c92c5a2504c5578c349072a44814a3b5aacd78f6/images/pose.png
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 | license_files = LICENSE.txt
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Always prefer setuptools over distutils
2 | from setuptools import setup, find_packages
3 | # To use a consistent encoding
4 | from os import path
5 |
6 | here = path.abspath(path.dirname(__file__))
7 |
8 | # Get the long description from the README file
9 | with open(path.join(here, 'README.md'), encoding='utf-8') as f:
10 | long_description = f.read()
11 |
12 | setup(
13 | name = 'bimvee',
14 | packages=['bimvee', 'bimvee.importRosbag', 'bimvee.importRosbag.importRosbag', 'bimvee.importRosbag.importRosbag.messageTypes', 'bimvee.visualisers'],
15 | version = '1.0.21',
16 | license='gpl',
17 | description = 'Batch Import, Manipulation, Visualisation and Export of Events etc',
18 | long_description=long_description,
19 | long_description_content_type='text/markdown',
20 | author = 'Event-driven Perception for Robotics group at Istituto Italiano di Tecnologia: Simeon Bamford, Suman Ghosh, Aiko Dinale, Massimiliano Iacono, Ander Arriandiaga, etc',
21 | author_email = 'simbamford@gmail.com',
22 | url = 'https://github.com/event-driven-robotics/bimvee',
23 | download_url = 'https://github.com/event-driven-robotics/bimvee_pkg/archive/v1.0.tar.gz',
24 | keywords = ['event', 'event camera', 'event-based', 'event-driven', 'spike', 'dvs', 'dynamic vision sensor', 'neuromorphic', 'aer', 'address-event representation' 'spiking neural network', 'davis', 'atis', 'celex' ],
25 | install_requires=[
26 | 'numpy',
27 | 'tqdm',
28 | 'setuptools',
29 | 'matplotlib',
30 | 'seaborn',
31 | 'imageio',
32 | 'hickle',
33 | 'opencv-python',
34 | 'dv'
35 | ],
36 | classifiers=[
37 | 'Development Status :: 3 - Alpha',
38 | 'Intended Audience :: Developers',
39 | 'Topic :: Software Development :: Build Tools',
40 | 'License :: OSI Approved :: GNU General Public License (GPL)',
41 | 'Programming Language :: Python :: 3',
42 | 'Programming Language :: Python :: 3.7',
43 | ],
44 | )
45 |
--------------------------------------------------------------------------------