├── .gitignore ├── CMakeLists.txt ├── COPYING.txt ├── README.md ├── aravis_install_notes ├── aravis_install_notes.md ├── dhcpd.conf ├── interfaces └── isc-dhcp-server ├── debug_notes_20210621 ├── 20210621_01 ├── 20210621_02 ├── 20210621_03 ├── 20210621_04 └── 20210621_05 ├── examples ├── data_processing │ └── example_data_processing.ipynb ├── demo │ ├── demo_1 │ │ └── src │ │ │ ├── camera_parameters.yaml │ │ │ ├── data_association_parameters.yaml │ │ │ ├── delta_video_config.py │ │ │ ├── delta_video_parameters.yaml │ │ │ ├── home_directory.yaml │ │ │ ├── kalman_parameters.py │ │ │ ├── liveviewer_parameters.yaml │ │ │ ├── tracker_parameters.yaml │ │ │ └── tracking_launcher.launch │ └── demo_2 │ │ └── src │ │ ├── data_association_parameters.yaml │ │ ├── delta_video_config.py │ │ ├── delta_video_parameters.yaml │ │ ├── home_directory.yaml │ │ ├── kalman_parameters.py │ │ ├── raw_data_bag_config.py │ │ ├── tracker_parameters.yaml │ │ └── tracking_launcher.launch └── sample_data │ ├── .ipynb_checkpoints │ └── examples_data_processing-checkpoint.ipynb │ ├── 20151014_145627_N1_deltavideo_bgimg_20151014_1456.png │ ├── 20160412_134708_N1_delta_video.bag │ ├── 20160412_134708_N1_trackedobjects.hdf5 │ ├── config_20160412_134708_N1.py │ ├── launch_delta_video.launch │ └── launch_delta_video_parameters.yaml ├── msg ├── Contourinfo.msg ├── Contourlist.msg ├── DeltaVid.msg ├── Trackedobject.msg └── Trackedobjectlist.msg ├── multi_tracker_analysis ├── Kalman.py ├── __init__.py ├── bag2hdf5.py ├── data_slicing.py ├── dvbag_to_pandas_reader.py ├── estimate_R_and_Q.py ├── gui_trajectory_viewer.ui ├── image_analysis.py ├── plot.py ├── read_hdf5_file_to_pandas.py └── trajectory_analysis.py ├── nodes ├── data_association.py ├── delta_video_player.py ├── delta_video_simplebuffer.py ├── image_processing.py ├── liveviewer.py ├── record_user_input.py ├── republish_pref_obj_data.py ├── save_bag.py ├── save_data_to_hdf5.py ├── set_basename_and_path.py └── tracker_simplebuffer.py ├── package.xml ├── pointgreyusb3_notes.md ├── rules └── 99-point-grey-firefly.rules ├── setup.py ├── solidworks └── fly_arena_parts │ ├── light_panel_base_12x12.SLDDRW │ ├── light_panel_base_12x12.SLDPRT │ ├── light_panel_base_12x12_base.SLDDRW │ ├── light_panel_base_12x12_base.SLDPRT │ ├── light_panel_clamp.SLDDRW │ └── light_panel_clamp.SLDPRT └── srv ├── addImageToBackgroundService.srv └── resetBackgroundService.srv /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *~ 3 | include/* 4 | 5 | 6 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(multi_tracker) 3 | 4 | ## Find catkin macros and libraries 5 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 6 | ## is used, also find other catkin packages 7 | find_package(catkin REQUIRED COMPONENTS 8 | geometry_msgs 9 | roscpp 10 | rospy 11 | std_msgs 12 | tf 13 | message_generation 14 | ) 15 | 16 | ## System dependencies are found with CMake's conventions 17 | # find_package(Boost REQUIRED COMPONENTS system) 18 | 19 | 20 | ## Uncomment this if the package has a setup.py. This macro ensures 21 | ## modules and global scripts declared therein get installed 22 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 23 | # catkin_python_setup() 24 | 25 | ################################################ 26 | ## Declare ROS messages, services and actions ## 27 | ################################################ 28 | 29 | ## To declare and build messages, services or actions from within this 30 | ## package, follow these steps: 31 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 32 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 33 | ## * In the file package.xml: 34 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET 35 | ## * If MSG_DEP_SET isn't empty the following dependencies might have been 36 | ## pulled in transitively but can be declared for certainty nonetheless: 37 | ## * add a build_depend tag for "message_generation" 38 | ## * add a run_depend tag for "message_runtime" 39 | ## * In this file (CMakeLists.txt): 40 | ## * add "message_generation" and every package in MSG_DEP_SET to 41 | ## find_package(catkin REQUIRED COMPONENTS ...) 42 | ## * add "message_runtime" and every package in MSG_DEP_SET to 43 | ## catkin_package(CATKIN_DEPENDS ...) 44 | ## * uncomment the add_*_files sections below as needed 45 | ## and list every .msg/.srv/.action file to be processed 46 | ## * uncomment the generate_messages entry below 47 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 48 | 49 | ## Generate messages in the 'msg' folder 50 | add_message_files( 51 | FILES 52 | Contourinfo.msg 53 | Contourlist.msg 54 | Trackedobject.msg 55 | Trackedobjectlist.msg 56 | DeltaVid.msg 57 | ) 58 | 59 | ## Generate services in the 'srv' folder 60 | add_service_files( 61 | FILES 62 | resetBackgroundService.srv 63 | addImageToBackgroundService.srv 64 | # Service1.srv 65 | # Service2.srv 66 | ) 67 | 68 | ## Generate actions in the 'action' folder 69 | # add_action_files( 70 | # FILES 71 | # Action1.action 72 | # Action2.action 73 | # ) 74 | 75 | ## Generate added messages and services with any dependencies listed here 76 | generate_messages( 77 | DEPENDENCIES 78 | std_msgs 79 | geometry_msgs 80 | ) 81 | 82 | ################################### 83 | ## catkin specific configuration ## 84 | ################################### 85 | ## The catkin_package macro generates cmake config files for your package 86 | ## Declare things to be passed to dependent projects 87 | ## INCLUDE_DIRS: uncomment this if you package contains header files 88 | ## LIBRARIES: libraries you create in this project that dependent projects also need 89 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 90 | ## DEPENDS: system dependencies of this project that dependent projects also need 91 | catkin_package( 92 | # INCLUDE_DIRS include 93 | # LIBRARIES multi_tracker 94 | # CATKIN_DEPENDS geometry_msgs roscpp rospy std_msgs 95 | # DEPENDS system_lib 96 | ) 97 | 98 | ########### 99 | ## Build ## 100 | ########### 101 | 102 | ## Specify additional locations of header files 103 | ## Your package locations should be listed before other locations 104 | # include_directories(include) 105 | include_directories( 106 | ${catkin_INCLUDE_DIRS} 107 | ) 108 | 109 | ## Declare a cpp library 110 | # add_library(multi_tracker 111 | # src/${PROJECT_NAME}/multi_tracker.cpp 112 | # ) 113 | 114 | ## Declare a cpp executable 115 | # add_executable(multi_tracker_node src/multi_tracker_node.cpp) 116 | 117 | ## Add cmake target dependencies of the executable/library 118 | ## as an example, message headers may need to be generated before nodes 119 | # add_dependencies(multi_tracker_node multi_tracker_generate_messages_cpp) 120 | 121 | ## Specify libraries to link a library or executable target against 122 | # target_link_libraries(multi_tracker_node 123 | # ${catkin_LIBRARIES} 124 | # ) 125 | 126 | ############# 127 | ## Install ## 128 | ############# 129 | 130 | # all install targets should use catkin DESTINATION variables 131 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 132 | 133 | ## Mark executable scripts (Python etc.) for installation 134 | ## in contrast to setup.py, you can choose the destination 135 | # install(PROGRAMS 136 | # scripts/my_python_script 137 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 138 | # ) 139 | 140 | ## Mark executables and/or libraries for installation 141 | # install(TARGETS multi_tracker multi_tracker_node 142 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 143 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 144 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 145 | # ) 146 | 147 | ## Mark cpp header files for installation 148 | # install(DIRECTORY include/${PROJECT_NAME}/ 149 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 150 | # FILES_MATCHING PATTERN "*.h" 151 | # PATTERN ".svn" EXCLUDE 152 | # ) 153 | 154 | ## Mark other files for installation (e.g. launch and bag files, etc.) 155 | # install(FILES 156 | # # myfile1 157 | # # myfile2 158 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 159 | # ) 160 | 161 | ############# 162 | ## Testing ## 163 | ############# 164 | 165 | ## Add gtest based cpp test target and link libraries 166 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_multi_tracker.cpp) 167 | # if(TARGET ${PROJECT_NAME}-test) 168 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 169 | # endif() 170 | 171 | ## Add folders to be run by python nosetests 172 | # catkin_add_nosetests(test) 173 | -------------------------------------------------------------------------------- /COPYING.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | See the github page for detailed info: Multi Tracker Github Page 2 | -------------------------------------------------------------------------------- /aravis_install_notes/aravis_install_notes.md: -------------------------------------------------------------------------------- 1 | See: https://florisvanbreugel.com/2017/08/14/basler-gige-cameras-aravis-ros/ 2 | -------------------------------------------------------------------------------- /aravis_install_notes/dhcpd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Sample configuration file for ISC dhcpd for Debian 3 | # 4 | # Attention: If /etc/ltsp/dhcpd.conf exists, that will be used as 5 | # configuration file instead of this file. 6 | # 7 | # 8 | 9 | # The ddns-updates-style parameter controls whether or not the server will 10 | # attempt to do a DNS update when a lease is confirmed. We default to the 11 | # behavior of the version 2 packages ('none', since DHCP v2 didn't 12 | # have support for DDNS.) 13 | ddns-update-style none; 14 | 15 | # option definitions common to all supported networks... 16 | option domain-name "example.org"; 17 | option domain-name-servers ns1.example.org, ns2.example.org; 18 | 19 | default-lease-time 600; 20 | max-lease-time 7200; 21 | 22 | # If this DHCP server is the official DHCP server for the local 23 | # network, the authoritative directive should be uncommented. 24 | #authoritative; 25 | 26 | # Use this to send dhcp log messages to a different log file (you also 27 | # have to hack syslog.conf to complete the redirection). 28 | log-facility local7; 29 | 30 | # No service will be given on this subnet, but declaring it helps the 31 | # DHCP server to understand the network topology. 32 | 33 | #subnet 10.152.187.0 netmask 255.255.255.0 { 34 | #} 35 | 36 | # This is a very basic subnet declaration. 37 | 38 | #subnet 10.254.239.0 netmask 255.255.255.224 { 39 | # range 10.254.239.10 10.254.239.20; 40 | # option routers rtr-239-0-1.example.org, rtr-239-0-2.example.org; 41 | #} 42 | 43 | # This declaration allows BOOTP clients to get dynamic addresses, 44 | # which we don't really recommend. 45 | 46 | #subnet 10.254.239.32 netmask 255.255.255.224 { 47 | # range dynamic-bootp 10.254.239.40 10.254.239.60; 48 | # option broadcast-address 10.254.239.31; 49 | # option routers rtr-239-32-1.example.org; 50 | #} 51 | 52 | # A slightly different configuration for an internal subnet. 53 | #subnet 10.5.5.0 netmask 255.255.255.224 { 54 | # range 10.5.5.26 10.5.5.30; 55 | # option domain-name-servers ns1.internal.example.org; 56 | # option domain-name "internal.example.org"; 57 | # option routers 10.5.5.1; 58 | # option broadcast-address 10.5.5.31; 59 | # default-lease-time 600; 60 | # max-lease-time 7200; 61 | #} 62 | 63 | # Hosts which require special configuration options can be listed in 64 | # host statements. If no address is specified, the address will be 65 | # allocated dynamically (if possible), but the host-specific information 66 | # will still come from the host declaration. 67 | 68 | #host passacaglia { 69 | # hardware ethernet 0:0:c0:5d:bd:95; 70 | # filename "vmunix.passacaglia"; 71 | # server-name "toccata.fugue.com"; 72 | #} 73 | 74 | # Fixed IP addresses can also be specified for hosts. These addresses 75 | # should not also be listed as being available for dynamic assignment. 76 | # Hosts for which fixed IP addresses have been specified can boot using 77 | # BOOTP or DHCP. Hosts for which no fixed address is specified can only 78 | # be booted with DHCP, unless there is an address range on the subnet 79 | # to which a BOOTP client is connected which has the dynamic-bootp flag 80 | # set. 81 | #host fantasia { 82 | # hardware ethernet 08:00:07:26:c0:a5; 83 | # fixed-address fantasia.fugue.com; 84 | #} 85 | 86 | # You can declare a class of clients and then do address allocation 87 | # based on that. The example below shows a case where all clients 88 | # in a certain class get addresses on the 10.17.224/24 subnet, and all 89 | # other clients get addresses on the 10.0.29/24 subnet. 90 | 91 | #class "foo" { 92 | # match if substring (option vendor-class-identifier, 0, 4) = "SUNW"; 93 | #} 94 | 95 | #shared-network 224-29 { 96 | # subnet 10.17.224.0 netmask 255.255.255.0 { 97 | # option routers rtr-224.example.org; 98 | # } 99 | # subnet 10.0.29.0 netmask 255.255.255.0 { 100 | # option routers rtr-29.example.org; 101 | # } 102 | # pool { 103 | # allow members of "foo"; 104 | # range 10.17.224.10 10.17.224.250; 105 | # } 106 | # pool { 107 | # deny members of "foo"; 108 | # range 10.0.29.10 10.0.29.230; 109 | # } 110 | #} 111 | 112 | 113 | subnet 192.168.93.0 netmask 255.255.255.0 { 114 | range 192.168.93.10 192.168.93.20; 115 | option broadcast-address 192.168.93.255; 116 | } 117 | 118 | subnet 192.168.94.0 netmask 255.255.255.0 { 119 | range 192.168.94.10 192.168.94.20; 120 | option broadcast-address 192.168.94.255; 121 | } 122 | -------------------------------------------------------------------------------- /aravis_install_notes/interfaces: -------------------------------------------------------------------------------- 1 | auto lo eth2 eth3 eth4 2 | iface lo inet loopback 3 | 4 | iface eth2 inet dhcp 5 | 6 | iface eth3 inet static 7 | address 192.168.93.1 8 | netmask 255.255.255.0 9 | broadcast 192.168.93.255 10 | mtu 9000 11 | 12 | iface eth4 inet static 13 | address 192.168.94.1 14 | netmask 255.255.255.0 15 | broadcast 192.168.94.255 16 | mtu 9000 17 | 18 | -------------------------------------------------------------------------------- /aravis_install_notes/isc-dhcp-server: -------------------------------------------------------------------------------- 1 | # Defaults for isc-dhcp-server initscript 2 | # sourced by /etc/init.d/isc-dhcp-server 3 | # installed at /etc/default/isc-dhcp-server by the maintainer scripts 4 | 5 | # 6 | # This is a POSIX shell fragment 7 | # 8 | 9 | # Path to dhcpd's config file (default: /etc/dhcp/dhcpd.conf). 10 | #DHCPD_CONF=/etc/dhcp/dhcpd.conf 11 | 12 | # Path to dhcpd's PID file (default: /var/run/dhcpd.pid). 13 | #DHCPD_PID=/var/run/dhcpd.pid 14 | 15 | # Additional options to start dhcpd with. 16 | # Don't use options -cf or -pf here; use DHCPD_CONF/ DHCPD_PID instead 17 | #OPTIONS="" 18 | 19 | # On what interfaces should the DHCP server (dhcpd) serve DHCP requests? 20 | # Separate multiple interfaces with spaces, e.g. "eth0 eth1". 21 | INTERFACES="eth3 eth4" 22 | -------------------------------------------------------------------------------- /debug_notes_20210621/20210621_01: -------------------------------------------------------------------------------- 1 | drainfly@drainfly:~$ roslaunch demo/demo_1/src/tracking_launcher.launch 2 | ... logging to /home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/roslaunch-drainfly-32102.log 3 | Checking log directory for disk usage. This may take a while. 4 | Press Ctrl-C to interrupt 5 | Done checking log file disk usage. Usage is <1GB. 6 | 7 | started roslaunch server http://drainfly:36119/ 8 | 9 | SUMMARY 10 | ======== 11 | 12 | PARAMETERS 13 | * /multi_tracker/1/data_association/kalman_parameters_py_file: kalman_parameters.py 14 | * /multi_tracker/1/data_association/max_tracked_objects: 20 15 | * /multi_tracker/1/data_association/n_covariances_to_reject_data: 3 16 | * /multi_tracker/1/data_directory: ~/demo/demo_1/data 17 | * /multi_tracker/1/delta_video/camera_encoding: mono8 18 | * /multi_tracker/1/delta_video/image_topic: /camera/image_raw 19 | * /multi_tracker/1/delta_video/max_change_in_frame: 0.2 20 | * /multi_tracker/1/delta_video/roi_b: 0 21 | * /multi_tracker/1/delta_video/roi_l: 0 22 | * /multi_tracker/1/delta_video/roi_r: -1 23 | * /multi_tracker/1/delta_video/roi_t: -1 24 | * /multi_tracker/1/delta_video/threshold: 6 25 | * /multi_tracker/1/home_directory: ~/demo/demo_1/src 26 | * /multi_tracker/1/liveviewer/camera_encoding: mono8 27 | * /multi_tracker/1/liveviewer/image_topic: /camera/image_raw 28 | * /multi_tracker/1/liveviewer/max_frames_to_draw: 50 29 | * /multi_tracker/1/liveviewer/min_persistence_to_draw: 10 30 | * /multi_tracker/1/liveviewer/roi_b: 0 31 | * /multi_tracker/1/liveviewer/roi_l: 0 32 | * /multi_tracker/1/liveviewer/roi_r: -1 33 | * /multi_tracker/1/liveviewer/roi_t: -1 34 | * /multi_tracker/1/tracker/backgroundupdate: 0 35 | * /multi_tracker/1/tracker/camera_encoding: mono8 36 | * /multi_tracker/1/tracker/circular_mask_r: none 37 | * /multi_tracker/1/tracker/circular_mask_x: none 38 | * /multi_tracker/1/tracker/circular_mask_y: none 39 | * /multi_tracker/1/tracker/dilate: 2 40 | * /multi_tracker/1/tracker/erode: 2 41 | * /multi_tracker/1/tracker/image_processing_module: default 42 | * /multi_tracker/1/tracker/image_processor: dark_objects_only 43 | * /multi_tracker/1/tracker/image_topic: /pylon_camera_nod... 44 | * /multi_tracker/1/tracker/max_change_in_frame: 0.2 45 | * /multi_tracker/1/tracker/max_expected_area: 90 46 | * /multi_tracker/1/tracker/max_frames_to_draw: 50 47 | * /multi_tracker/1/tracker/max_size: 30000 48 | * /multi_tracker/1/tracker/medianbgupdateinterval: 0 49 | * /multi_tracker/1/tracker/min_persistence_to_draw: 10 50 | * /multi_tracker/1/tracker/min_size: 5 51 | * /multi_tracker/1/tracker/roi_b: 0 52 | * /multi_tracker/1/tracker/roi_l: 0 53 | * /multi_tracker/1/tracker/roi_r: -1 54 | * /multi_tracker/1/tracker/roi_t: -1 55 | * /multi_tracker/1/tracker/std_dev_update: 2e-05 56 | * /multi_tracker/1/tracker/threshold: 6 57 | * /rosdistro: melodic 58 | * /rosversion: 1.14.11 59 | 60 | NODES 61 | / 62 | data_association_1 (multi_tracker/data_association.py) 63 | delta_video_1 (multi_tracker/delta_video_simplebuffer.py) 64 | liveview_1 (multi_tracker/liveviewer.py) 65 | save_delta_video (multi_tracker/save_bag.py) 66 | save_hdf5_data_1 (multi_tracker/save_data_to_hdf5.py) 67 | set_exp_basename_1 (multi_tracker/set_basename_and_path.py) 68 | tracker_1 (multi_tracker/tracker_simplebuffer.py) 69 | 70 | ROS_MASTER_URI=http://localhost:11311 71 | 72 | process[set_exp_basename_1-1]: started with pid [32138] 73 | process[tracker_1-2]: started with pid [32139] 74 | process[data_association_1-3]: started with pid [32140] 75 | process[save_hdf5_data_1-4]: started with pid [32141] 76 | process[liveview_1-5]: started with pid [32142] 77 | process[delta_video_1-6]: started with pid [32143] 78 | process[save_delta_video-7]: started with pid [32144] 79 | Saving hdf5 data to: /home/drainfly/demo/demo_1/data/20210623_133303_N1_trackedobjects.hdf5 80 | Traceback (most recent call last): 81 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 149, in 82 | datalistener = DataListener(options.nodenum, options.record_time_hrs) 83 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 39, in __init__ 84 | self.hdf5 = h5py.File(filename, 'w') 85 | File "/home/drainfly/.local/lib/python2.7/site-packages/h5py/_hl/files.py", line 408, in __init__ 86 | swmr=swmr) 87 | File "/home/drainfly/.local/lib/python2.7/site-packages/h5py/_hl/files.py", line 179, in make_fid 88 | fid = h5f.create(name, h5f.ACC_TRUNC, fapl=fapl, fcpl=fcpl) 89 | File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper 90 | File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper 91 | File "h5py/h5f.pyx", line 108, in h5py.h5f.create 92 | IOError: Unable to create file (unable to open file: name = '/home/drainfly/demo/demo_1/data/20210623_133303_N1_trackedobjects.hdf5', errno = 2, error message = 'No such file or directory', flags = 13, o_flags = 242) 93 | 94 | Solution: 95 | 96 | mkdir /home/drainfly/demo/demo_1/data 97 | -------------------------------------------------------------------------------- /debug_notes_20210621/20210621_02: -------------------------------------------------------------------------------- 1 | drainfly@drainfly:~$ roslaunch demo/demo_1/src/tracking_launcher.launch 2 | ... logging to /home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/roslaunch-drainfly-32597.log 3 | Checking log directory for disk usage. This may take a while. 4 | Press Ctrl-C to interrupt 5 | Done checking log file disk usage. Usage is <1GB. 6 | 7 | started roslaunch server http://drainfly:40745/ 8 | 9 | SUMMARY 10 | ======== 11 | 12 | PARAMETERS 13 | * /multi_tracker/1/data_association/kalman_parameters_py_file: kalman_parameters.py 14 | * /multi_tracker/1/data_association/max_tracked_objects: 20 15 | * /multi_tracker/1/data_association/n_covariances_to_reject_data: 3 16 | * /multi_tracker/1/data_directory: ~/demo/demo_1/data 17 | * /multi_tracker/1/delta_video/camera_encoding: mono8 18 | * /multi_tracker/1/delta_video/image_topic: /camera/image_raw 19 | * /multi_tracker/1/delta_video/max_change_in_frame: 0.2 20 | * /multi_tracker/1/delta_video/roi_b: 0 21 | * /multi_tracker/1/delta_video/roi_l: 0 22 | * /multi_tracker/1/delta_video/roi_r: -1 23 | * /multi_tracker/1/delta_video/roi_t: -1 24 | * /multi_tracker/1/delta_video/threshold: 6 25 | * /multi_tracker/1/home_directory: ~/demo/demo_1/src 26 | * /multi_tracker/1/liveviewer/camera_encoding: mono8 27 | * /multi_tracker/1/liveviewer/image_topic: /camera/image_raw 28 | * /multi_tracker/1/liveviewer/max_frames_to_draw: 50 29 | * /multi_tracker/1/liveviewer/min_persistence_to_draw: 10 30 | * /multi_tracker/1/liveviewer/roi_b: 0 31 | * /multi_tracker/1/liveviewer/roi_l: 0 32 | * /multi_tracker/1/liveviewer/roi_r: -1 33 | * /multi_tracker/1/liveviewer/roi_t: -1 34 | * /multi_tracker/1/tracker/backgroundupdate: 0 35 | * /multi_tracker/1/tracker/camera_encoding: mono8 36 | * /multi_tracker/1/tracker/circular_mask_r: none 37 | * /multi_tracker/1/tracker/circular_mask_x: none 38 | * /multi_tracker/1/tracker/circular_mask_y: none 39 | * /multi_tracker/1/tracker/dilate: 2 40 | * /multi_tracker/1/tracker/erode: 2 41 | * /multi_tracker/1/tracker/image_processing_module: default 42 | * /multi_tracker/1/tracker/image_processor: dark_objects_only 43 | * /multi_tracker/1/tracker/image_topic: /pylon_camera_nod... 44 | * /multi_tracker/1/tracker/max_change_in_frame: 0.2 45 | * /multi_tracker/1/tracker/max_expected_area: 90 46 | * /multi_tracker/1/tracker/max_frames_to_draw: 50 47 | * /multi_tracker/1/tracker/max_size: 30000 48 | * /multi_tracker/1/tracker/medianbgupdateinterval: 0 49 | * /multi_tracker/1/tracker/min_persistence_to_draw: 10 50 | * /multi_tracker/1/tracker/min_size: 5 51 | * /multi_tracker/1/tracker/roi_b: 0 52 | * /multi_tracker/1/tracker/roi_l: 0 53 | * /multi_tracker/1/tracker/roi_r: -1 54 | * /multi_tracker/1/tracker/roi_t: -1 55 | * /multi_tracker/1/tracker/std_dev_update: 2e-05 56 | * /multi_tracker/1/tracker/threshold: 6 57 | * /rosdistro: melodic 58 | * /rosversion: 1.14.11 59 | 60 | NODES 61 | / 62 | data_association_1 (multi_tracker/data_association.py) 63 | delta_video_1 (multi_tracker/delta_video_simplebuffer.py) 64 | liveview_1 (multi_tracker/liveviewer.py) 65 | save_delta_video (multi_tracker/save_bag.py) 66 | save_hdf5_data_1 (multi_tracker/save_data_to_hdf5.py) 67 | set_exp_basename_1 (multi_tracker/set_basename_and_path.py) 68 | tracker_1 (multi_tracker/tracker_simplebuffer.py) 69 | 70 | ROS_MASTER_URI=http://localhost:11311 71 | 72 | process[set_exp_basename_1-1]: started with pid [32633] 73 | process[tracker_1-2]: started with pid [32634] 74 | process[data_association_1-3]: started with pid [32635] 75 | process[save_hdf5_data_1-4]: started with pid [32636] 76 | process[liveview_1-5]: started with pid [32637] 77 | process[delta_video_1-6]: started with pid [32638] 78 | process[save_delta_video-7]: started with pid [32639] 79 | Saving hdf5 data to: /home/drainfly/demo/demo_1/data/20210623_133413_N1_trackedobjects.hdf5 80 | Traceback (most recent call last): 81 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 149, in 82 | datalistener = DataListener(options.nodenum, options.record_time_hrs) 83 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 40, in __init__ 84 | self.hdf5.swmr_mode = True # helps prevent file corruption if closed improperly 85 | File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper 86 | File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper 87 | File "/home/drainfly/.local/lib/python2.7/site-packages/h5py/_hl/files.py", line 311, in swmr_mode 88 | self.id.start_swmr_write() 89 | File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper 90 | File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper 91 | File "h5py/h5f.pyx", line 541, in h5py.h5f.FileID.start_swmr_write 92 | ValueError: Unable to convert file format (file superblock version - should be at least 3) 93 | 94 | 95 | Solution: 96 | 97 | Comment this out: 98 | self.hdf5.swmr_mode = True 99 | 100 | From save_data_to_hdf5.py 101 | 102 | It seems in the documentation that swmr is only for reading files? Weird, perhaps something changed somewhere? 103 | 104 | 105 | 106 | 107 | 108 | Traceback (most recent call last): 109 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/data_association.py", line 18, in 110 | import multi_tracker_analysis.Kalman as Kalman 111 | ImportError: No module named multi_tracker_analysis.Kalman 112 | 113 | Solution: 114 | 115 | drainfly@drainfly:~$ cd ~/catkin_ws/src/multi_tracker/ 116 | drainfly@drainfly:~/catkin_ws/src/multi_tracker$ ls 117 | aravis_install_notes multi_tracker_analysis rules 118 | CMakeLists.txt nodes setup.py 119 | COPYING.txt package.xml solidworks 120 | examples pointgreyusb3_notes.md srv 121 | msg README.md 122 | drainfly@drainfly:~/catkin_ws/src/multi_tracker$ python ./setup.py install --user 123 | -------------------------------------------------------------------------------- /debug_notes_20210621/20210621_03: -------------------------------------------------------------------------------- 1 | drainfly@drainfly:~$ roslaunch demo/demo_1/src/tracking_launcher.launch 2 | ... logging to /home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/roslaunch-drainfly-31760.log 3 | Checking log directory for disk usage. This may take a while. 4 | Press Ctrl-C to interrupt 5 | Done checking log file disk usage. Usage is <1GB. 6 | 7 | started roslaunch server http://drainfly:45443/ 8 | 9 | SUMMARY 10 | ======== 11 | 12 | PARAMETERS 13 | * /multi_tracker/1/data_association/kalman_parameters_py_file: kalman_parameters.py 14 | * /multi_tracker/1/data_association/max_tracked_objects: 20 15 | * /multi_tracker/1/data_association/n_covariances_to_reject_data: 3 16 | * /multi_tracker/1/data_directory: ~/demo/demo_1/data 17 | * /multi_tracker/1/delta_video/camera_encoding: mono8 18 | * /multi_tracker/1/delta_video/image_topic: /camera/image_raw 19 | * /multi_tracker/1/delta_video/max_change_in_frame: 0.2 20 | * /multi_tracker/1/delta_video/roi_b: 0 21 | * /multi_tracker/1/delta_video/roi_l: 0 22 | * /multi_tracker/1/delta_video/roi_r: -1 23 | * /multi_tracker/1/delta_video/roi_t: -1 24 | * /multi_tracker/1/delta_video/threshold: 6 25 | * /multi_tracker/1/home_directory: ~/demo/demo_1/src 26 | * /multi_tracker/1/liveviewer/camera_encoding: mono8 27 | * /multi_tracker/1/liveviewer/image_topic: /camera/image_raw 28 | * /multi_tracker/1/liveviewer/max_frames_to_draw: 50 29 | * /multi_tracker/1/liveviewer/min_persistence_to_draw: 10 30 | * /multi_tracker/1/liveviewer/roi_b: 0 31 | * /multi_tracker/1/liveviewer/roi_l: 0 32 | * /multi_tracker/1/liveviewer/roi_r: -1 33 | * /multi_tracker/1/liveviewer/roi_t: -1 34 | * /multi_tracker/1/tracker/backgroundupdate: 0 35 | * /multi_tracker/1/tracker/camera_encoding: mono8 36 | * /multi_tracker/1/tracker/circular_mask_r: none 37 | * /multi_tracker/1/tracker/circular_mask_x: none 38 | * /multi_tracker/1/tracker/circular_mask_y: none 39 | * /multi_tracker/1/tracker/dilate: 2 40 | * /multi_tracker/1/tracker/erode: 2 41 | * /multi_tracker/1/tracker/image_processing_module: default 42 | * /multi_tracker/1/tracker/image_processor: dark_objects_only 43 | * /multi_tracker/1/tracker/image_topic: /pylon_camera_nod... 44 | * /multi_tracker/1/tracker/max_change_in_frame: 0.2 45 | * /multi_tracker/1/tracker/max_expected_area: 90 46 | * /multi_tracker/1/tracker/max_frames_to_draw: 50 47 | * /multi_tracker/1/tracker/max_size: 30000 48 | * /multi_tracker/1/tracker/medianbgupdateinterval: 0 49 | * /multi_tracker/1/tracker/min_persistence_to_draw: 10 50 | * /multi_tracker/1/tracker/min_size: 5 51 | * /multi_tracker/1/tracker/roi_b: 0 52 | * /multi_tracker/1/tracker/roi_l: 0 53 | * /multi_tracker/1/tracker/roi_r: -1 54 | * /multi_tracker/1/tracker/roi_t: -1 55 | * /multi_tracker/1/tracker/std_dev_update: 2e-05 56 | * /multi_tracker/1/tracker/threshold: 6 57 | * /rosdistro: melodic 58 | * /rosversion: 1.14.11 59 | 60 | NODES 61 | / 62 | data_association_1 (multi_tracker/data_association.py) 63 | delta_video_1 (multi_tracker/delta_video_simplebuffer.py) 64 | liveview_1 (multi_tracker/liveviewer.py) 65 | save_delta_video (multi_tracker/save_bag.py) 66 | save_hdf5_data_1 (multi_tracker/save_data_to_hdf5.py) 67 | set_exp_basename_1 (multi_tracker/set_basename_and_path.py) 68 | tracker_1 (multi_tracker/tracker_simplebuffer.py) 69 | 70 | ROS_MASTER_URI=http://localhost:11311 71 | 72 | process[set_exp_basename_1-1]: started with pid [31777] 73 | process[tracker_1-2]: started with pid [31778] 74 | process[data_association_1-3]: started with pid [31779] 75 | process[save_hdf5_data_1-4]: started with pid [31780] 76 | process[liveview_1-5]: started with pid [31781] 77 | process[delta_video_1-6]: started with pid [31782] 78 | process[save_delta_video-7]: started with pid [31783] 79 | Traceback (most recent call last): 80 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 13, in 81 | import h5py 82 | ImportError: No module named h5py 83 | 84 | Solution: 85 | 86 | pip install h5py 87 | 88 | *pip install h5py* 89 | 90 | -------------------------------------------------------------------------------- /debug_notes_20210621/20210621_04: -------------------------------------------------------------------------------- 1 | drainfly@drainfly:~$ roslaunch demo/demo_1/src/tracking_launcher.launch 2 | ... logging to /home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/roslaunch-drainfly-1863.log 3 | Checking log directory for disk usage. This may take a while. 4 | Press Ctrl-C to interrupt 5 | Done checking log file disk usage. Usage is <1GB. 6 | 7 | started roslaunch server http://drainfly:41993/ 8 | 9 | SUMMARY 10 | ======== 11 | 12 | PARAMETERS 13 | * /multi_tracker/1/data_association/kalman_parameters_py_file: kalman_parameters.py 14 | * /multi_tracker/1/data_association/max_tracked_objects: 20 15 | * /multi_tracker/1/data_association/n_covariances_to_reject_data: 3 16 | * /multi_tracker/1/data_directory: ~/demo/demo_1/data 17 | * /multi_tracker/1/delta_video/camera_encoding: mono8 18 | * /multi_tracker/1/delta_video/image_topic: /camera/image_raw 19 | * /multi_tracker/1/delta_video/max_change_in_frame: 0.2 20 | * /multi_tracker/1/delta_video/roi_b: 0 21 | * /multi_tracker/1/delta_video/roi_l: 0 22 | * /multi_tracker/1/delta_video/roi_r: -1 23 | * /multi_tracker/1/delta_video/roi_t: -1 24 | * /multi_tracker/1/delta_video/threshold: 6 25 | * /multi_tracker/1/home_directory: ~/demo/demo_1/src 26 | * /multi_tracker/1/liveviewer/camera_encoding: mono8 27 | * /multi_tracker/1/liveviewer/image_topic: /camera/image_raw 28 | * /multi_tracker/1/liveviewer/max_frames_to_draw: 50 29 | * /multi_tracker/1/liveviewer/min_persistence_to_draw: 10 30 | * /multi_tracker/1/liveviewer/roi_b: 0 31 | * /multi_tracker/1/liveviewer/roi_l: 0 32 | * /multi_tracker/1/liveviewer/roi_r: -1 33 | * /multi_tracker/1/liveviewer/roi_t: -1 34 | * /multi_tracker/1/tracker/backgroundupdate: 0 35 | * /multi_tracker/1/tracker/camera_encoding: mono8 36 | * /multi_tracker/1/tracker/circular_mask_r: none 37 | * /multi_tracker/1/tracker/circular_mask_x: none 38 | * /multi_tracker/1/tracker/circular_mask_y: none 39 | * /multi_tracker/1/tracker/dilate: 2 40 | * /multi_tracker/1/tracker/erode: 2 41 | * /multi_tracker/1/tracker/image_processing_module: default 42 | * /multi_tracker/1/tracker/image_processor: dark_objects_only 43 | * /multi_tracker/1/tracker/image_topic: /pylon_camera_nod... 44 | * /multi_tracker/1/tracker/max_change_in_frame: 0.2 45 | * /multi_tracker/1/tracker/max_expected_area: 90 46 | * /multi_tracker/1/tracker/max_frames_to_draw: 50 47 | * /multi_tracker/1/tracker/max_size: 30000 48 | * /multi_tracker/1/tracker/medianbgupdateinterval: 0 49 | * /multi_tracker/1/tracker/min_persistence_to_draw: 10 50 | * /multi_tracker/1/tracker/min_size: 5 51 | * /multi_tracker/1/tracker/roi_b: 0 52 | * /multi_tracker/1/tracker/roi_l: 0 53 | * /multi_tracker/1/tracker/roi_r: -1 54 | * /multi_tracker/1/tracker/roi_t: -1 55 | * /multi_tracker/1/tracker/std_dev_update: 2e-05 56 | * /multi_tracker/1/tracker/threshold: 6 57 | * /rosdistro: melodic 58 | * /rosversion: 1.14.11 59 | 60 | NODES 61 | / 62 | data_association_1 (multi_tracker/data_association.py) 63 | delta_video_1 (multi_tracker/delta_video_simplebuffer.py) 64 | liveview_1 (multi_tracker/liveviewer.py) 65 | save_delta_video (multi_tracker/save_bag.py) 66 | save_hdf5_data_1 (multi_tracker/save_data_to_hdf5.py) 67 | set_exp_basename_1 (multi_tracker/set_basename_and_path.py) 68 | tracker_1 (multi_tracker/tracker_simplebuffer.py) 69 | 70 | ROS_MASTER_URI=http://localhost:11311 71 | 72 | process[set_exp_basename_1-1]: started with pid [1910] 73 | process[tracker_1-2]: started with pid [1911] 74 | process[data_association_1-3]: started with pid [1912] 75 | process[save_hdf5_data_1-4]: started with pid [1915] 76 | process[liveview_1-5]: started with pid [1916] 77 | process[delta_video_1-6]: started with pid [1917] 78 | process[save_delta_video-7]: started with pid [1918] 79 | Saving hdf5 data to: /home/drainfly/demo/demo_1/data/20210623_133659_N1_trackedobjects.hdf5 80 | Traceback (most recent call last): 81 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 149, in 82 | datalistener = DataListener(options.nodenum, options.record_time_hrs) 83 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py", line 40, in __init__ 84 | self.hdf5.swmr_mode = True # helps prevent file corruption if closed improperly 85 | File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper 86 | File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper 87 | File "/home/drainfly/.local/lib/python2.7/site-packages/h5py/_hl/files.py", line 311, in swmr_mode 88 | self.id.start_swmr_write() 89 | File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper 90 | File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper 91 | File "h5py/h5f.pyx", line 541, in h5py.h5f.FileID.start_swmr_write 92 | ValueError: Unable to convert file format (file superblock version - should be at least 3) 93 | [set_exp_basename_1-1] process has finished cleanly 94 | log file: /home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/set_exp_basename_1-1*.log 95 | [save_hdf5_data_1-4] process has died [pid 1915, exit code 1, cmd /home/drainfly/catkin_ws/src/multi_tracker/nodes/save_data_to_hdf5.py --nodenum=1 __name:=save_hdf5_data_1 __log:=/home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/save_hdf5_data_1-4.log]. 96 | log file: /home/drainfly/.ros/log/0374adf4-d3b1-11eb-94fa-3c7c3f7ddc02/save_hdf5_data_1-4*.log 97 | Using open cv: 3.2.0 98 | Open CV 3 99 | Using open cv: 3.2.0 100 | Open CV 3 101 | Loaded configuration: /home/drainfly/demo/demo_1/src/delta_video_config.py 102 | Using open cv: 3.2.0 103 | Open CV 3 104 | Using default parameter: use_moments = True 105 | Using open cv: 3.2.0 106 | Open CV 3 107 | Using default parameter: liveview = False 108 | Using default parameter: morph_open_kernel_size = 3 109 | Traceback (most recent call last): 110 | File "/home/drainfly/catkin_ws/src/multi_tracker/nodes/data_association.py", line 18, in 111 | import multi_tracker_analysis.Kalman as Kalman 112 | File "/home/drainfly/.local/lib/python2.7/site-packages/multi_tracker_analysis/__init__.py", line 2, in 113 | import bag2hdf5 114 | File "/home/drainfly/.local/lib/python2.7/site-packages/multi_tracker_analysis/bag2hdf5.py", line 19, in 115 | import progressbar 116 | ImportError: No module named progressbar 117 | 118 | 119 | Solution: 120 | 121 | pip install progressbar --user 122 | 123 | pip install pandas --user 124 | pip install scipy --user 125 | 126 | git clone https://github.com/florisvb/FlyPlotLib 127 | remove sympy dependency, since sympy does not seem to want to install and it's not really needed 128 | python ./setup.py install --user 129 | -------------------------------------------------------------------------------- /debug_notes_20210621/20210621_05: -------------------------------------------------------------------------------- 1 | live viewer not showing up, but camera is working 2 | 3 | change liveviewer_parameter.yaml to get the right camera topic 4 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/camera_parameters.yaml: -------------------------------------------------------------------------------- 1 | camera: {ExposureTimeAbs: 1900, Gain: 100, softwaretriggerrate: 30, mtu: 9000, AcquisitionFrameRate: 30} 2 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/data_association_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/1/data_association/kalman_parameters_py_file: 'kalman_parameters.py' 2 | /multi_tracker/1/data_association/max_tracked_objects: 20 3 | /multi_tracker/1/data_association/n_covariances_to_reject_data: 3 4 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/delta_video_config.py: -------------------------------------------------------------------------------- 1 | class Config: 2 | def __init__(self): 3 | self.basename = 'delta_video' 4 | self.directory = '~/demo/demo_1/data' 5 | self.topics = ['/multi_tracker/1/delta_video',] 6 | self.record_length_hours = 1 7 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/delta_video_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/1/delta_video/image_topic: '/camera/image_raw' 2 | /multi_tracker/1/delta_video/threshold: 6 3 | /multi_tracker/1/delta_video/camera_encoding: 'mono8' 4 | /multi_tracker/1/delta_video/max_change_in_frame: 0.2 5 | /multi_tracker/1/delta_video/roi_l: 0 6 | /multi_tracker/1/delta_video/roi_r: -1 7 | /multi_tracker/1/delta_video/roi_b: 0 8 | /multi_tracker/1/delta_video/roi_t: -1 9 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/home_directory.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/1/home_directory: '~/demo/demo_1/src' 2 | /multi_tracker/1/data_directory: '~/demo/demo_1/data' 3 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/kalman_parameters.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | ### Define kalman filter properties ######## 4 | phi = np.matrix([ [1, 1, 0, 0, 0, 0, 0, 0, 0, 0], 5 | [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 6 | [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 7 | [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], 8 | [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], 9 | [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], 10 | [0, 0, 0, 0, 0, 0, 1, 1, 0, 0], 11 | [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 12 | [0, 0, 0, 0, 0, 0, 0, 0, 1, 1], 13 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]) 14 | 15 | H = np.matrix([ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 16 | [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 17 | [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 18 | [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], 19 | [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]) 20 | P0 = 10*np.eye(10) 21 | Q = .2*np.matrix(np.eye(10)) 22 | R = 50*np.matrix(np.eye(5)) 23 | 24 | gamma = None 25 | gammaW = None 26 | 27 | max_covariance = 30 28 | max_velocity = 10 29 | 30 | association_matrix = np.matrix([[1,1,0,0,0]], dtype=float).T 31 | association_matrix /= np.linalg.norm(association_matrix) 32 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/liveviewer_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/1/liveviewer/image_topic: '/camera/image_raw' 2 | /multi_tracker/1/liveviewer/camera_encoding: 'mono8' 3 | /multi_tracker/1/liveviewer/min_persistence_to_draw: 10 4 | /multi_tracker/1/liveviewer/max_frames_to_draw: 50 5 | /multi_tracker/1/liveviewer/roi_l: 0 6 | /multi_tracker/1/liveviewer/roi_r: -1 7 | /multi_tracker/1/liveviewer/roi_b: 0 8 | /multi_tracker/1/liveviewer/roi_t: -1 9 | 10 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/tracker_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/1/tracker/image_topic: '/camera/image_raw' 2 | /multi_tracker/1/tracker/threshold: 6 3 | /multi_tracker/1/tracker/backgroundupdate: 0 4 | /multi_tracker/1/tracker/medianbgupdateinterval: 0 5 | /multi_tracker/1/tracker/camera_encoding: 'mono8' 6 | /multi_tracker/1/tracker/min_persistence_to_draw: 10 7 | /multi_tracker/1/tracker/max_frames_to_draw: 50 8 | /multi_tracker/1/tracker/erode: 2 9 | /multi_tracker/1/tracker/dilate: 2 10 | /multi_tracker/1/tracker/max_change_in_frame: 0.2 11 | /multi_tracker/1/tracker/std_dev_update: 0.00002 12 | /multi_tracker/1/tracker/image_processor: 'dark_objects_only' 13 | /multi_tracker/1/tracker/image_processing_module: 'default' 14 | /multi_tracker/1/tracker/min_size: 5 15 | /multi_tracker/1/tracker/max_size: 30000 16 | /multi_tracker/1/tracker/max_expected_area: 90 17 | /multi_tracker/1/tracker/roi_l: 0 18 | /multi_tracker/1/tracker/roi_r: -1 19 | /multi_tracker/1/tracker/roi_b: 0 20 | /multi_tracker/1/tracker/roi_t: -1 21 | /multi_tracker/1/tracker/circular_mask_x: 'none' 22 | /multi_tracker/1/tracker/circular_mask_y: 'none' 23 | /multi_tracker/1/tracker/circular_mask_r: 'none' 24 | -------------------------------------------------------------------------------- /examples/demo/demo_1/src/tracking_launcher.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | # set up src and data directories and define prefix name 4 | 5 | 6 | 7 | # load parameters for the tracking, and the data association 8 | 9 | 10 | 11 | # launch tracking and data association nodes 12 | 13 | 14 | 15 | # launch node to save data to hdf5 16 | 17 | 18 | # load parameters for, and show live tracking 19 | 20 | 21 | 22 | # load parameters for, process, and save, delta video bag file 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/data_association_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/2/data_association/kalman_parameters_py_file: 'kalman_parameters.py' 2 | /multi_tracker/2/data_association/max_tracked_objects: 20 3 | /multi_tracker/2/data_association/n_covariances_to_reject_data: 3 4 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/delta_video_config.py: -------------------------------------------------------------------------------- 1 | class Config: 2 | def __init__(self): 3 | self.basename = 'delta_video' 4 | self.directory = '~/orchard/data' 5 | self.topics = ['/multi_tracker/delta_video',] 6 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/delta_video_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/2/delta_video/image_topic: '/camera/image_raw' 2 | /multi_tracker/2/delta_video/threshold: 10 3 | /multi_tracker/2/delta_video/camera_encoding: 'mono8' 4 | /multi_tracker/2/delta_video/max_change_in_frame: 0.2 5 | /multi_tracker/2/delta_video/roi_l: 207 6 | /multi_tracker/2/delta_video/roi_r: 480 7 | /multi_tracker/2/delta_video/roi_b: 45 8 | /multi_tracker/2/delta_video/roi_t: 260 9 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/home_directory.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/2/home_directory: '~/demo/demo_2/src' 2 | /multi_tracker/2/data_directory: '~/demo/demo_2/data' 3 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/kalman_parameters.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | ### Define kalman filter properties ######## 4 | phi = np.matrix([ [1, 1, 0, 0, 0, 0, 0, 0, 0, 0], 5 | [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 6 | [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 7 | [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], 8 | [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], 9 | [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], 10 | [0, 0, 0, 0, 0, 0, 1, 1, 0, 0], 11 | [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 12 | [0, 0, 0, 0, 0, 0, 0, 0, 1, 1], 13 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]) 14 | 15 | H = np.matrix([ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 16 | [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 17 | [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 18 | [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], 19 | [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]) 20 | P0 = 10*np.eye(10) 21 | Q = .5*np.matrix(np.eye(10)) 22 | R = 20*np.matrix(np.eye(5)) 23 | 24 | gamma = None 25 | gammaW = None 26 | 27 | max_covariance = 50 28 | max_velocity = 10 29 | 30 | association_matrix = np.matrix([[1,1,0,0,0]], dtype=float).T 31 | association_matrix /= np.linalg.norm(association_matrix) 32 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/raw_data_bag_config.py: -------------------------------------------------------------------------------- 1 | class Config: 2 | def __init__(self): 3 | self.basename = 'raw_data_N2' 4 | self.directory = '~/orchard/data' 5 | self.topics = ['/multi_tracker/2/tracked_objects'] 6 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/tracker_parameters.yaml: -------------------------------------------------------------------------------- 1 | /multi_tracker/2/tracker/image_topic: '/camera/image_raw' 2 | /multi_tracker/2/tracker/threshold: 10 3 | /multi_tracker/2/tracker/backgroundupdate: 0 4 | /multi_tracker/2/tracker/medianbgupdateinterval: 600 5 | /multi_tracker/2/tracker/camera_encoding: 'mono8' 6 | /multi_tracker/2/tracker/min_persistence_to_draw: 10 7 | /multi_tracker/2/tracker/max_frames_to_draw: 50 8 | /multi_tracker/2/tracker/erode: 0 9 | /multi_tracker/2/tracker/dilate: 0 10 | /multi_tracker/2/tracker/max_change_in_frame: 0.2 11 | /multi_tracker/2/tracker/std_dev_update: 0.00002 12 | /multi_tracker/2/tracker/image_processor: 'dark_objects_only' 13 | /multi_tracker/2/tracker/image_processing_module: 'default' 14 | /multi_tracker/2/tracker/min_size: 5 15 | /multi_tracker/2/tracker/max_size: 30000 16 | /multi_tracker/2/tracker/roi_l: 207 17 | /multi_tracker/2/tracker/roi_r: 480 18 | /multi_tracker/2/tracker/roi_b: 45 19 | /multi_tracker/2/tracker/roi_t: 260 20 | -------------------------------------------------------------------------------- /examples/demo/demo_2/src/tracking_launcher.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/sample_data/20151014_145627_N1_deltavideo_bgimg_20151014_1456.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/examples/sample_data/20151014_145627_N1_deltavideo_bgimg_20151014_1456.png -------------------------------------------------------------------------------- /examples/sample_data/20160412_134708_N1_delta_video.bag: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/examples/sample_data/20160412_134708_N1_delta_video.bag -------------------------------------------------------------------------------- /examples/sample_data/20160412_134708_N1_trackedobjects.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/examples/sample_data/20160412_134708_N1_trackedobjects.hdf5 -------------------------------------------------------------------------------- /examples/sample_data/config_20160412_134708_N1.py: -------------------------------------------------------------------------------- 1 | import imp 2 | import os 3 | import numpy as np 4 | import multi_tracker_analysis as mta 5 | 6 | def load_sensory_stimulus_on(): 7 | # write some code that reads a file and reformats stimulus on/off times to have the form of a list of epoch timestamps e.g. [[t1,t2],[t3,4]] 8 | return [[1444888597, 1444888599], [1444888602, 1444888603]] 9 | 10 | class Config(object): 11 | def __init__(self, path, identifiercode=''): 12 | if '.py' in path: 13 | self.path = os.path.dirname(path) 14 | else: 15 | self.path = path 16 | self.identifiercode = identifiercode 17 | 18 | # pre-processing data parameters 19 | self.preprocess_data_function = self.preprocess_data 20 | self.minlength = 5 # in frames 21 | self.maxspeed = 10 # pixels / frame 22 | self.minspeed = 0 23 | self.minimal_cumulative_distance_travelled = 4 24 | 25 | # other parameters 26 | if self.identifiercode == '20160412_134708_N1': # only do this for this particular example 27 | self.sensory_stimulus_on = load_sensory_stimulus_on() 28 | else: 29 | self.sensory_stimulus_on = [] 30 | 31 | def preprocess_data(self, pandas_dataframe): 32 | print 'Preprocessing data - see config file for details!' 33 | 34 | pandas_dataframe = mta.read_hdf5_file_to_pandas.cull_short_trajectories(pandas_dataframe, self.minlength) 35 | pandas_dataframe = mta.read_hdf5_file_to_pandas.remove_rows_above_speed_threshold(pandas_dataframe, speed_threshold=self.maxspeed) 36 | pandas_dataframe = mta.read_hdf5_file_to_pandas.remove_objects_that_never_exceed_minimum_speed(pandas_dataframe, speed_threshold=self.minspeed) 37 | pandas_dataframe = mta.read_hdf5_file_to_pandas.cull_trajectories_that_do_not_cover_much_x_or_y_distance(pandas_dataframe, min_distance_travelled=self.minimal_cumulative_distance_travelled) 38 | 39 | # Delete cut and join trajectories 40 | instructions_filename = mta.read_hdf5_file_to_pandas.get_filename(self.path, 'delete_cut_join_instructions.pickle') 41 | if instructions_filename is not None: 42 | pandas_dataframe = mta.read_hdf5_file_to_pandas.delete_cut_join_trajectories_according_to_instructions(pandas_dataframe, instructions_filename) 43 | else: 44 | print 'No delete cut join instructions found in path!' 45 | 46 | return pandas_dataframe 47 | 48 | -------------------------------------------------------------------------------- /examples/sample_data/launch_delta_video.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /examples/sample_data/launch_delta_video_parameters.yaml: -------------------------------------------------------------------------------- 1 | use_sim_time: true 2 | -------------------------------------------------------------------------------- /msg/Contourinfo.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | float64 dt 3 | float64 x 4 | float64 y 5 | float64 angle 6 | float64 area 7 | float64 ecc 8 | -------------------------------------------------------------------------------- /msg/Contourlist.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | Contourinfo[] contours 3 | -------------------------------------------------------------------------------- /msg/DeltaVid.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | string background_image 3 | uint16[] xpixels 4 | uint16[] ypixels 5 | uint16[] values 6 | -------------------------------------------------------------------------------- /msg/Trackedobject.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | geometry_msgs/Point position 3 | geometry_msgs/Vector3 velocity 4 | float64 angle 5 | float64 size 6 | float64 covariance 7 | uint32 objid 8 | uint32 persistence 9 | geometry_msgs/Point measurement 10 | -------------------------------------------------------------------------------- /msg/Trackedobjectlist.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | Trackedobject[] tracked_objects 3 | 4 | -------------------------------------------------------------------------------- /multi_tracker_analysis/Kalman.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.stats 3 | import matplotlib.pyplot as plt 4 | 5 | # class for running the discrete kalman filter algorithm from table 3.1 6 | class DiscreteKalmanFilter(object): 7 | def __init__(self, x0=None, P0=None, phi=None, gamma=None, H=None, Q=None, R=None, gammaW=None): 8 | ''' 9 | Discrete Kalman Filter 10 | 11 | nstates ------- number of states 12 | nmeasurements - number of measurements 13 | x0 ----------- initial state 14 | P0 ----------- initial covariance 15 | phi -------- drift dynamics model (e.g. A in xdot = Ax + Bu) 16 | gamma -------- control dynamics model (e.g. B in xdot = Ax + Bu) 17 | H ------------ observation model, maps states to observations (e.g. C in y = Cx + Du) 18 | Q ------------ state covariance 19 | R ------------ measurement covariance 20 | gammaW ------- 21 | 22 | ''' 23 | 24 | # initialize variables, to make class more user friendly 25 | assert phi is not None, "need to define phi" 26 | assert H is not None, "need to define H" 27 | 28 | self.nstates = phi.shape[0] 29 | self.nmeasurements = H.shape[0] 30 | 31 | if gamma is None: 32 | ncontrols = 1 33 | gamma = np.matrix(np.zeros([self.nstates, ncontrols])) 34 | if Q is None: 35 | Q = np.matrix(np.eye(self.nstates)) 36 | if R is None: 37 | R = np.matrix(np.eye(self.nmeasurements)) 38 | if gammaW is None: 39 | gammaW = np.matrix(np.eye(self.nstates)) 40 | if x0 is None: 41 | x0 = np.ones([self.nstates, 1]) 42 | if P0 is None: 43 | P0 = np.matrix(np.eye(self.nstates)) 44 | 45 | # save variables for later use 46 | self.xhat_apriori = x0 47 | self.xhat = x0 48 | self.P_apriori = P0 49 | self.P = P0 50 | self.P_aposteriori = P0 51 | self.phi = phi 52 | self.gamma = gamma 53 | self.Q = Q 54 | self.R = R 55 | self.H = H 56 | self.gammaW = gammaW 57 | 58 | # the meat of the algorithm 59 | def update(self, measurement=None, control=None): 60 | 61 | if control is None: 62 | control = np.matrix(np.zeros([1, 1])) 63 | 64 | # get apriori terms from memory 65 | xhat_apriori = self.xhat_apriori 66 | P_apriori = self.P_apriori 67 | phi = self.phi 68 | gamma = self.gamma 69 | Q = self.Q 70 | R = self.R 71 | H = self.H 72 | gammaW = self.gammaW 73 | 74 | # calculate kalman gain 75 | K = P_apriori*H.T*(H*P_apriori*H.T+R).I 76 | 77 | # update step 78 | if measurement is not None: 79 | xhat_aposteriori = xhat_apriori + K*(measurement - H*xhat_apriori) 80 | I = np.matrix(np.eye(self.nstates)) 81 | P_aposteriori = (I-K*H)*P_apriori 82 | else: 83 | xhat_aposteriori = xhat_apriori 84 | P_aposteriori = P_apriori 85 | 86 | # propagate step 87 | xhat_new_apriori = phi*xhat_aposteriori + gamma*control 88 | P_new_apriori = phi*P_aposteriori*phi.T + gammaW*Q*gammaW.T 89 | 90 | # save new terms 91 | self.xhat_apriori = xhat_new_apriori 92 | self.P_apriori = P_new_apriori 93 | self.xhat = xhat_aposteriori 94 | self.P = P_aposteriori 95 | 96 | # return the current aposteriori estimates 97 | return self.xhat, self.P, K 98 | 99 | 100 | if __name__ == '__main__': 101 | 102 | # Generate synthetic data 103 | time_points = np.linspace(0, 20, 500) 104 | 105 | clean_pos_x = np.sin(time_points) 106 | clean_pos_y = np.sin(time_points*3) 107 | 108 | dynamics_noise = 1e-6 109 | observation_noise = 1e-1 110 | v = scipy.stats.norm(0,dynamics_noise) # noise 111 | w = scipy.stats.norm(0,observation_noise) # noise 112 | noisy_pos_x = clean_pos_x + v.rvs(len(clean_pos_x)) 113 | noisy_pos_y = clean_pos_y + v.rvs(len(clean_pos_y)) 114 | 115 | # Initialize kalman filter 116 | # Note: number of states = 4 (2 position states, 2 velocity states) 117 | # State vector: [[pos_x], [vel_x], [pos_y], [vel_y]] (note shape will be a column vector) 118 | 119 | # Drift dynamics (constant velocity model) 120 | phi = np.matrix([[1, 1, 0, 0], 121 | [0, 1, 0, 0], 122 | [0, 0, 1, 1], 123 | [0, 0, 0, 1]]) 124 | 125 | # Observation dynamics (observe position and velocity) 126 | H = np.matrix([[1, 0, 0, 0], 127 | [0, 0, 1, 0]]) 128 | 129 | # Initial covariance 130 | P0 = 10*np.eye(4) 131 | 132 | # Dynamics covariance 133 | Q = dynamics_noise**.5*np.matrix(np.eye(4)) 134 | 135 | # Observation covariance 136 | R = observation_noise**.5*np.matrix(np.eye(2)) 137 | 138 | # Initial state 139 | x0 = np.matrix([noisy_pos_x[0], 0, noisy_pos_y[0], 0]).T 140 | 141 | kalman_filter = DiscreteKalmanFilter( x0=x0, 142 | P0=P0, 143 | phi=phi, 144 | gamma=None, 145 | H=H, 146 | Q=Q, 147 | R=R, 148 | gammaW=None) 149 | 150 | # Run the filter 151 | 152 | # Initialize filtered data arrays 153 | filtered_pos_x = np.zeros_like(noisy_pos_x) 154 | filtered_vel_x = np.zeros_like(noisy_pos_x) 155 | filtered_pos_y = np.zeros_like(noisy_pos_y) 156 | filtered_vel_y = np.zeros_like(noisy_pos_y) 157 | observation_x = np.zeros_like(noisy_pos_x) 158 | observation_y = np.zeros_like(noisy_pos_y) 159 | covariance = np.zeros_like(noisy_pos_y) 160 | 161 | # Iterate through the data 162 | for i in range(1, len(noisy_pos_x)): 163 | observation = np.matrix([noisy_pos_x[i]+w.rvs(), noisy_pos_y[i]+w.rvs()]).T 164 | xhat, P, K = kalman_filter.update(observation) 165 | filtered_pos_x[i] = xhat[0,0] 166 | filtered_vel_x[i] = xhat[1,0] 167 | filtered_pos_y[i] = xhat[2,0] 168 | filtered_vel_y[i] = xhat[3,0] 169 | observation_x[i] = observation[0] 170 | observation_y[i] = observation[1] 171 | covariance[i] = np.linalg.norm(P.diagonal()) 172 | 173 | # Plot the results 174 | # Black = real data, '*' is noisy data 175 | # Red = filtered data 176 | fig = plt.figure() 177 | ax_x = fig.add_subplot(311) 178 | ax_y = fig.add_subplot(312) 179 | ax_cov = fig.add_subplot(313) 180 | 181 | ax_x.plot(time_points, clean_pos_x, 'black') 182 | ax_x.plot(time_points, noisy_pos_x, '*', color='black') 183 | ax_x.plot(time_points, observation_x, '*', color='green') 184 | ax_x.plot(time_points, filtered_pos_x, color='red') 185 | ax_x.set_xlabel('time') 186 | ax_x.set_ylabel('x position') 187 | 188 | ax_y.plot(time_points, clean_pos_y, 'black') 189 | ax_y.plot(time_points, noisy_pos_y, '*', color='black') 190 | ax_y.plot(time_points, observation_y, '*', color='green') 191 | ax_y.plot(time_points, filtered_pos_y, color='red') 192 | ax_y.set_xlabel('time') 193 | ax_y.set_ylabel('y position') 194 | 195 | ax_cov.plot(time_points, 3*np.sqrt(covariance), 'blue') 196 | ax_cov.plot(time_points, np.abs(filtered_pos_x - noisy_pos_x), 'red') 197 | ax_cov.set_xlabel('time') 198 | ax_cov.set_ylabel('covariance') 199 | -------------------------------------------------------------------------------- /multi_tracker_analysis/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | version_info = sys.version_info 3 | python_version = sys.version_info[0] 4 | 5 | import multi_tracker_analysis.data_slicing as data_slicing 6 | import multi_tracker_analysis.read_hdf5_file_to_pandas as read_hdf5_file_to_pandas 7 | import multi_tracker_analysis.plot as plot 8 | import multi_tracker_analysis.estimate_R_and_Q as estimate_R_and_Q 9 | import multi_tracker_analysis.Kalman as Kalman 10 | import multi_tracker_analysis.trajectory_analysis as trajectory_analysis 11 | 12 | if python_version == 2: 13 | import multi_tracker_analysis.bag2hdf5 as bag2hdf5 14 | import multi_tracker_analysis.dvbag_to_pandas_reader as dvbag_to_pandas_reader 15 | else: 16 | print('Cannot import ROS related packages in python 3') -------------------------------------------------------------------------------- /multi_tracker_analysis/bag2hdf5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # written by sdvillal / StrawLab: https://github.com/strawlab/bag2hdf5/blob/master/bag2hdf5 4 | # copied here for convenience 5 | from __future__ import print_function 6 | 7 | import os 8 | import sys 9 | 10 | import numpy as np 11 | import argparse 12 | import h5py 13 | 14 | import roslib 15 | 16 | roslib.load_manifest('rosbag') 17 | import rosbag 18 | 19 | import warnings 20 | import progressbar 21 | 22 | 23 | 24 | FLOAT_TIME = True # False to prevent saving of float redundant timestamps 25 | 26 | 27 | def flatten_msg(msg, t, max_strlen=None): 28 | assert max_strlen is not None # don't accept default 29 | result = [] 30 | 31 | if rostype == 'sensor_msgs/NavSatFix': 32 | print('navsatfix') 33 | p = getattr(msg, attr) 34 | result.extend([p.latitude, p.longitude, p.altitude, 35 | p.position_covariance[0], p.position_covariance[4], p.position_covariance[8], 36 | p.status, p.service]) 37 | print(result[-1]) 38 | 39 | else: 40 | for i, attr in enumerate(msg.__slots__): 41 | rostype = msg._slot_types[i] 42 | 43 | if attr == 'header': 44 | h = msg.header 45 | result.extend([h.seq, 46 | h.stamp.secs, 47 | h.stamp.nsecs, 48 | h.frame_id, 49 | ]) 50 | if FLOAT_TIME: 51 | result.append(h.stamp.secs + h.stamp.nsecs*1e-9) 52 | 53 | elif rostype == 'time': 54 | p = getattr(msg, attr) 55 | result.extend([p.secs, p.nsecs]) 56 | if FLOAT_TIME: 57 | result.append(p.secs + p.nsecs*1e-9) 58 | 59 | elif rostype == 'geometry_msgs/Point': 60 | p = getattr(msg, attr) 61 | result.extend([p.x, p.y, p.z]) 62 | 63 | elif rostype == 'geometry_msgs/Point': 64 | p = getattr(msg, attr) 65 | result.extend([p.x, p.y, p.z]) 66 | 67 | 68 | elif rostype == 'std_msgs/MultiArrayLayout': 69 | pass 70 | 71 | elif '[]' in rostype and 'string' not in rostype: 72 | p = getattr(msg, attr) 73 | l = [i for i in p] 74 | result.extend(l) 75 | 76 | else: 77 | p = getattr(msg, attr) 78 | if rostype == 'string' or rostype == 'string[]': 79 | if rostype == 'string[]': 80 | # List of strings gets joined to one string 81 | warnings.warn('string array is joined to single string', RuntimeWarning, stacklevel=2) 82 | p = ','.join(p) 83 | assert len(p) <= max_strlen 84 | result.append(p) 85 | # also do timestamp 86 | result.extend([t.secs, t.nsecs]) 87 | if FLOAT_TIME: 88 | result.append(t.secs + t.nsecs * 1e-9) 89 | 90 | return tuple(result) 91 | 92 | 93 | def rostype2dtype(rostype, max_strlen=None): 94 | assert max_strlen is not None # don't accept default 95 | 96 | if rostype == 'float32': 97 | dtype = np.float32 98 | elif rostype == 'float64': 99 | dtype = np.float64 100 | elif rostype == 'uint8' or rostype == 'byte': 101 | dtype = np.uint8 102 | elif rostype == 'uint16': 103 | dtype = np.uint16 104 | elif rostype == 'uint32': 105 | dtype = np.uint32 106 | elif rostype == 'uint64': 107 | dtype = np.uint64 108 | elif rostype == 'int8': 109 | dtype = np.int8 110 | elif rostype == 'int16': 111 | dtype = np.int16 112 | elif rostype == 'int32': 113 | dtype = np.int32 114 | elif rostype == 'int64': 115 | dtype = np.int64 116 | elif rostype == 'bool' or rostype == 'bool[]': 117 | dtype = np.bool 118 | elif rostype == 'string' or rostype == 'string[]': 119 | dtype = 'S' + str(max_strlen) 120 | else: 121 | print('unknown ROS type: %s trying float' % rostype ) 122 | dtype = np.float64 123 | return dtype 124 | 125 | 126 | def make_dtype(msg, max_strlen=None): 127 | assert max_strlen is not None # don't accept default 128 | 129 | result = [] 130 | for i, attr in enumerate(msg.__slots__): 131 | rostype = msg._slot_types[i] 132 | 133 | if '[]' in rostype and 'string' not in rostype: 134 | p = getattr(msg, attr) 135 | length_of_msg = len(p) 136 | 137 | if rostype == 'Header' or rostype == 'std_msgs/Header': 138 | result.extend([('header_seq', np.uint32), 139 | ('header_stamp_secs', np.int32), 140 | ('header_stamp_nsecs', np.int32), 141 | ('header_frame_id', 'S'+str(max_strlen))]) 142 | if FLOAT_TIME: 143 | result.append(('header_stamp', np.float64)) 144 | elif rostype == 'time': 145 | result.extend([('time_secs', np.int32), 146 | ('time_nsecs', np.int32)]) 147 | if FLOAT_TIME: 148 | result.append(('time', np.float64)) 149 | elif rostype == 'geometry_msgs/Point': 150 | result.extend([(attr+'_x', np.float32), 151 | (attr+'_y', np.float32), 152 | (attr+'_z', np.float32), 153 | ]) 154 | elif rostype == 'sensor_msgs/NavSatFix': 155 | result.extend([(attr+'_lat', np.float32), 156 | (attr+'_lon', np.float32), 157 | (attr+'_alt', np.float32), 158 | (attr+'_cov1', np.float32), 159 | (attr+'_cov2', np.float32), 160 | (attr+'_cov3', np.float32), 161 | (attr+'_status', np.float32), 162 | (attr+'_service', np.float32), 163 | ]) 164 | #print(result[-1]) 165 | elif rostype == 'geometry_msgs/Quaternion': 166 | result.extend([(attr+'_x', np.float32), 167 | (attr+'_y', np.float32), 168 | (attr+'_z', np.float32), 169 | (attr+'_w', np.float32), 170 | ]) 171 | elif rostype == 'std_msgs/MultiArrayLayout': 172 | pass 173 | elif '[]' in rostype and 'string' not in rostype: 174 | basetype = rostype.split('[]')[0] 175 | r = [] 176 | for i in range(length_of_msg): 177 | r.append( (attr+'_'+str(i), np.__getattribute__(basetype)) ) 178 | result.extend(r) 179 | 180 | else: 181 | nptype = rostype2dtype(rostype, max_strlen=max_strlen) 182 | result.append((attr, nptype)) 183 | # also do timestamp 184 | result.extend([('t_secs', np.int32), ('t_nsecs', np.int32)]) 185 | if FLOAT_TIME: 186 | result.append(('t', np.float64)) 187 | return result 188 | 189 | 190 | def h5append(dset, arr): 191 | n_old_rows = dset.shape[0] 192 | n_new_rows = len(arr) + n_old_rows 193 | dset.resize(n_new_rows, axis=0) 194 | dset[n_old_rows:] = arr 195 | 196 | 197 | def bag2hdf5(fname, out_fname, topics=None, max_strlen=None, skip_messages={}): 198 | assert max_strlen is not None # don't accept default 199 | 200 | bag = rosbag.Bag(fname) 201 | results2 = {} 202 | chunksize = 10000 203 | dsets = {} 204 | 205 | # progressbar 206 | _pbw = ['converting %s: ' % fname, progressbar.Percentage()] 207 | pbar = progressbar.ProgressBar(widgets=_pbw, maxval=bag.size).start() 208 | 209 | if topics is None: 210 | print( 'AUTO FIND TOPICS') 211 | topics = [] 212 | for topic, msg, t in bag.read_messages(): 213 | topics.append(topic) 214 | topics = np.unique(topics).tolist() 215 | print (topics) 216 | 217 | print ('skip messages: ') 218 | print (skip_messages) 219 | print() 220 | 221 | print( 'topics: ') 222 | print (topics) 223 | print() 224 | 225 | try: 226 | with h5py.File(out_fname, mode='w') as out_f: 227 | for topic in topics: 228 | m = -1 229 | for topic, msg, t in bag.read_messages(topics=[topic]): 230 | m += 1 231 | 232 | if topic not in skip_messages.keys(): 233 | skip_messages[topic] = [] 234 | 235 | #print topic, m, skip_messages[topic] 236 | 237 | # update progressbar 238 | pbar.update(bag._file.tell()) 239 | # get the data 240 | 241 | if m not in skip_messages[topic]: 242 | #print(msg) 243 | this_row = flatten_msg(msg, t, max_strlen=max_strlen) 244 | #print(this_row) 245 | 246 | # convert it to numpy element (and dtype) 247 | if topic not in results2: 248 | try: 249 | dtype = make_dtype(msg, max_strlen=max_strlen) 250 | except: 251 | print("*********************************") 252 | print ('topic:', topic, file=sys.stderr) 253 | print ("\nerror while processing message:\n\n%r" % msg, file=sys.stderr) 254 | print ('\nROW:', this_row, file=sys.stderr) 255 | print ("*********************************") 256 | raise 257 | results2[topic] = dict(dtype=dtype, 258 | object=[this_row]) 259 | else: 260 | results2[topic]['object'].append(this_row) 261 | 262 | 263 | # now flush our caches periodically 264 | if len(results2[topic]['object']) >= chunksize: 265 | arr = np.array(**results2[topic]) 266 | if topic not in dsets: 267 | # initial creation 268 | dset = out_f.create_dataset(topic, data=arr, maxshape=(None,), 269 | compression='gzip', 270 | compression_opts=9) 271 | assert dset.compression == 'gzip' 272 | assert dset.compression_opts == 9 273 | dsets[topic] = dset 274 | else: 275 | # append to existing dataset 276 | h5append(dsets[topic], arr) 277 | del arr 278 | # clear the cached values 279 | results2[topic]['object'] = [] 280 | 281 | else: 282 | print ('skipping message: ', m) 283 | # done reading bag file. flush remaining data to h5 file 284 | for topic in results2: 285 | print (topic) 286 | #print results2[topic] 287 | print() 288 | if not len(results2[topic]['object']): 289 | # no data 290 | continue 291 | print('*****') 292 | #print(results2[topic]) 293 | arr = np.array(**results2[topic]) 294 | if topic in dsets: 295 | h5append(dsets[topic], arr) 296 | else: 297 | out_f.create_dataset(topic, 298 | data=arr, 299 | compression='gzip', 300 | compression_opts=9) 301 | 302 | except: 303 | if os.path.exists(out_fname): 304 | os.unlink(out_fname) 305 | raise 306 | finally: 307 | pass 308 | pbar.finish() 309 | 310 | if __name__ == '__main__': 311 | parser = argparse.ArgumentParser() 312 | parser.add_argument('filename', type=str, help="the .bag file") 313 | parser.add_argument('--max_strlen', type=int, default=255, 314 | help="maximum length of encoded strings") 315 | parser.add_argument('--out', type=str, default=None, 316 | help="name of output file") 317 | parser.add_argument('--topic', type=str, nargs='*', 318 | help="topic name to convert. defaults to all. " 319 | "multiple may be specified.") 320 | args = parser.parse_args() 321 | 322 | if not os.path.exists(args.filename): 323 | print ('No file %s' % args.filename, file=sys.stderr) 324 | sys.exit(1) 325 | fname = os.path.splitext(args.filename)[0] 326 | if args.out is not None: 327 | output_fname = args.out 328 | output_fname = os.path.expanduser(output_fname) 329 | else: 330 | output_fname = fname + '.hdf5' 331 | if os.path.exists(output_fname): 332 | print ('will not overwrite %s' % output_fname, file=sys.stderr) 333 | sys.exit(1) 334 | 335 | bag2hdf5(args.filename, 336 | output_fname, 337 | max_strlen=args.max_strlen, 338 | topics=args.topic) 339 | -------------------------------------------------------------------------------- /multi_tracker_analysis/data_slicing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def get_keys_in_framerange(pd, framerange): 4 | return np.unique(pd.ix[framerange[0]:framerange[-1]].objid) 5 | 6 | def get_frames_for_key(pd, key): 7 | return pd[pd.objid==key].frames.values 8 | 9 | def get_data_in_framerange(pd, framerange): 10 | # pd_subset 11 | return pd.ix[framerange[0]:framerange[-1]] 12 | 13 | def get_data_in_epoch_timerange(pd, timerange): 14 | # pd_subset 15 | return pd[(pd.time_epoch>timerange[0]) & (pd.time_epoch radius**2 ) 61 | pd[region_name] = np.zeros_like(pd.position_x) 62 | pd[region_name].iloc[indices] = 1 63 | return pd 64 | 65 | def remove_objects_that_enter_area_outside_circular_region(pd, center, radius, region_name='outofbounds'): 66 | pd = calc_frames_with_object_NOT_in_circular_region(pd, center, radius, region_name=region_name) 67 | outofbounds = np.unique(pd[pd[region_name]==1].objid.values) 68 | keys_ok = [key for key in pd.objid if key not in outofbounds] 69 | indices_where_object_acceptable = pd.objid.isin(keys_ok) 70 | culled_pd = pd[indices_where_object_acceptable] 71 | return culled_pd 72 | 73 | def calc_frames_with_object_in_rectangular_region(pd, x_range, y_range, z_range=None, region_name='region'): 74 | ''' 75 | center - list (x,y) units should match units of position_x and position_y 76 | ''' 77 | if z_range is None: 78 | x = pd.position_x 79 | y = pd.position_y 80 | indices = np.where( (x>x_range[0]) & (xy_range[0]) & (yx_range[0]) & (xy_range[0]) & (yz_range[0]) & (z 0: 48 | index = int(delta_vid.header.frame_id) 49 | indices = [index for i in range(len(delta_vid.values))] 50 | secs = [delta_vid.header.stamp.secs for i in range(len(delta_vid.values))] 51 | nsecs = [delta_vid.header.stamp.nsecs for i in range(len(delta_vid.values))] 52 | 53 | if os.path.basename(delta_vid.background_image) != self.background_image_filename: 54 | print('Warning: not using new background image!') 55 | 56 | x = pandas.Series(delta_vid.xpixels) 57 | y = pandas.Series(delta_vid.ypixels) 58 | value = pandas.Series(delta_vid.values) 59 | frames = pandas.Series(indices) 60 | 61 | df = pandas.DataFrame({'x': x, 62 | 'y': y, 63 | 'value': value, 64 | 'difference': value - self.background_image[x,y], 65 | 'frames': frames, 66 | 'time_secs': secs, 67 | 'time_nsecs': nsecs}) 68 | print (df) 69 | if self.df_buffer is None: 70 | self.df_buffer = df 71 | else: 72 | self.df_buffer = self.df_buffer.append(df) 73 | 74 | if len(self.df_buffer) > 50000: 75 | self.dump_buffer_to_disk() 76 | 77 | def dump_buffer_to_disk(self): 78 | with self.lockBuffer: 79 | self.hdf.append('data', self.df_buffer, format='table', data_columns=True) 80 | self.df_buffer = None 81 | 82 | def process_timerange(self, timestart, timeend): 83 | ''' 84 | For realtime processing: 85 | timestart and timeend determine msg range to read. They must be integers. 86 | ''' 87 | timestart = int(timestart) 88 | timeend = int(timeend) 89 | rt0 = rospy.Time(timestart) 90 | rt1 = rospy.Time(timeend) 91 | msgs = self.dvbag.read_messages(start_time=rt0, end_time=rt1) 92 | total_time = timeend - timestart 93 | realtime = False 94 | 95 | pbar = progressbar.ProgressBar().start() 96 | if timestart in self.dataframe.time_secs.values and timeend in self.dataframe.time_secs.values: 97 | q = 'time_secs == ' + str(timestart) 98 | framestart = np.min(self.dataframe.query(q).frames) 99 | q = 'time_secs == ' + str(timeend) 100 | frameend = np.max(self.dataframe.query(q).frames) 101 | frame_index_list = np.arange(framestart, frameend).tolist() 102 | print ('Loading already processed data') 103 | else: 104 | print ('loading image sequence from delta video bag - may take a moment') 105 | pbar = progressbar.ProgressBar().start() 106 | frame_index_list = [] 107 | for msg in msgs: 108 | delta_vid = msg[1] 109 | index = int(delta_vid.header.frame_id) 110 | frame_index_list.append(index) 111 | t = delta_vid.header.stamp.secs + delta_vid.header.stamp.nsecs*1e-9 112 | time_elapsed = t-timestart 113 | s = int((time_elapsed / total_time)*100) 114 | pbar.update(s) 115 | if index in self.dataframe.frames.values: 116 | continue 117 | 118 | self.process_message(delta_vid) 119 | 120 | pbar.finish() 121 | 122 | self.save_pandas_dataframe() 123 | self.dataframe = pandas.read_hdf(self.saveto) 124 | return frame_index_list, self.dataframe 125 | 126 | def save_pandas_dataframe(self): 127 | self.dump_buffer_to_disk() 128 | self.hdf.close() # closes the file 129 | return 130 | 131 | def plot_heatmap_of_dvbag(dvbag_pandas, threshold, bgimg_filename=None): 132 | 133 | if bgimg_filename is not None: 134 | background_image = cv2.imread(bgimg_filename, 0 ) 135 | shape = background_image.shape 136 | else: 137 | shape = (600,800) 138 | 139 | binsx = np.arange(0,shape[0],5) 140 | binsy = np.arange(0,shape[1],5) 141 | 142 | 143 | 144 | 145 | dvbag_pandas_subset = dvbag_pandas.query('difference < -30') 146 | hist, binsx, binsy = np.histogram2d(dvbag_pandas_subset.x.values, dvbag_pandas_subset.y.values, (binsx, binsy)) 147 | 148 | 149 | ##################################################################################################### 150 | 151 | if __name__ == '__main__': 152 | parser = OptionParser() 153 | parser.add_option("--dvbag", type="str", dest="dvbag", default='', 154 | help="filename of dvbag file") 155 | parser.add_option("--saveto", type="str", dest="saveto", default='', 156 | help="filename where to save video, default is none") 157 | 158 | (options, args) = parser.parse_args() 159 | 160 | dvbag2pandasreader = DVBag2PandasReader(options.dvbag, options.saveto) 161 | -------------------------------------------------------------------------------- /multi_tracker_analysis/estimate_R_and_Q.py: -------------------------------------------------------------------------------- 1 | from multi_tracker_analysis import data_slicing 2 | import numpy as np 3 | import sys 4 | sys.path.append('../nodes') 5 | from multi_tracker_analysis import Kalman 6 | import matplotlib.pyplot as plt 7 | import imp 8 | 9 | 10 | def find_covariance_R_given_static_object_data(x): 11 | ''' 12 | x - list or np.array - time series of measured x position of a static object (e.g. a dead fly) 13 | ideally data is collected using the same image processing algorithm, arena, etc. 14 | ''' 15 | if type(x) is list: 16 | x = np.array(x) 17 | x_expected = np.tile(np.mean(x), len(x)) 18 | x_centered = x-x_expected 19 | x_cov = np.dot(x_centered, x_centered.T) 20 | return x_cov 21 | 22 | def calc_actual_covariance_for_trajectory_given_q(trajec, kalman_parameter_py_file, q=1, r=10, no_data_after_nth_frame=30): 23 | ''' 24 | This function propagates kalman filter estimates and covariances with no new information after frame=no_data_after_nth_frame for a given Q=q*I 25 | The actual covariances of the data are calculated as (cov_i = (xi-xi_expected)*(xi-xi_expected)') 26 | These can then be compared to the covariances from the kalman filter 27 | 28 | Returns: actual positional covariances, kalman filter positional covariances 29 | 30 | Strategy: run the function for various values of q, until the actual and kalman estimated covariances grow at approximately the same rate 31 | 32 | trajec - trajectory object from multi_tracker_analysis.read_csv_file_to_python.load_data_as_python_object_from_csv_file 33 | trajectory should be minimum 2*no_data_after_nth_frame frames long, the longer the better 34 | ''' 35 | 36 | kalman_parameters = imp.load_source('kalman_parameters', kalman_parameter_py_file) 37 | Q = q*np.matrix(np.eye(kalman_parameters.phi.shape[0])) 38 | R = r*np.matrix(np.eye(5)) 39 | 40 | KF_nm = Kalman.DiscreteKalmanFilter(x0=np.matrix([trajec.position_x.iloc[0], 0, trajec.position_y.iloc[0], 0, 0, 0, 0, 0, 0, 0]).T, 41 | P0=kalman_parameters.P0, 42 | phi=kalman_parameters.phi, 43 | gamma=None, 44 | H=kalman_parameters.H, 45 | Q=Q, 46 | R=kalman_parameters.R, 47 | gammaW=None, 48 | ) 49 | 50 | errors = [] 51 | positional_covariances = [] 52 | KF_covariances = [] 53 | 54 | for frame in range(0,no_data_after_nth_frame): 55 | measurement = np.matrix([ trajec.position_x.iloc[frame], trajec.position_y.iloc[frame], 0, 0, 0]).T 56 | xhat, P, K = KF_nm.update(measurement) 57 | estimated_position = np.array([xhat[0,0], xhat[2,0]]) 58 | actual_position = np.array([trajec.measurement_x.iloc[frame], trajec.measurement_y.iloc[frame]] ) 59 | error = np.linalg.norm(estimated_position - actual_position) 60 | errors.append(error) 61 | cov = error*error 62 | positional_covariances.append(cov) 63 | KF_covariances.append( np.linalg.norm(np.array([P[0,0], P[2,2]])) ) 64 | 65 | for frame in range(no_data_after_nth_frame,len(trajec.measurement_x)): 66 | xhat, P, K = KF_nm.update(None) 67 | estimated_position = np.array([xhat[0,0], xhat[2,0]]) 68 | actual_position = np.array([trajec.measurement_x.iloc[frame], trajec.measurement_y.iloc[frame]] ) 69 | error = np.linalg.norm(estimated_position - actual_position) 70 | errors.append(error) 71 | cov = error*error 72 | positional_covariances.append(cov) 73 | KF_covariances.append( np.linalg.norm(np.array([P[0,0], P[2,2]])) ) 74 | 75 | return positional_covariances, KF_covariances 76 | 77 | 78 | def calc_actual_covariance_for_all_trajecs_given_q(dataset, keys, kalman_parameter_py_file, q=1, r=10, no_data_after_nth_frame=30): 79 | ''' 80 | Runs the function "calc_actual_covariance_for_trajectory_given_q" on all trajectories associated with the given keys, and plots the resulting covariances 81 | 82 | Use "keys = multi_tracker_analysis.data_slicing.get_keys_of_length_greater_than(data, no_data_after_nth_frame*2)" to get the keys 83 | ''' 84 | p = [] 85 | k = [] 86 | 87 | good_keys = [] 88 | for key in keys: 89 | trajec = dataset.trajec(key) 90 | if len(trajec.measurement_x) < no_data_after_nth_frame*2: 91 | continue 92 | p_, k_ = calc_actual_covariance_for_trajectory_given_q(trajec, kalman_parameter_py_file, q, r, no_data_after_nth_frame) 93 | p.append(p_) 94 | k.append(k_) 95 | good_keys.append(key) 96 | 97 | fig = plt.figure() 98 | ax = fig.add_subplot(111) 99 | 100 | for i, key in enumerate(good_keys): 101 | ax.plot(p[i], color='green') 102 | ax.plot(k[i], color='red') 103 | 104 | plt.show() 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /multi_tracker_analysis/image_analysis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | 4 | import numpy as np 5 | import cv2 6 | 7 | from optparse import OptionParser 8 | 9 | import copy 10 | 11 | from scipy import optimize 12 | import data_fit 13 | 14 | ############################################################################################## 15 | # Circle 16 | 17 | def estimate_circle_from_data_points(x_m, y_m): 18 | model = data_fit.models.CircleModel() 19 | data = np.zeros(len(x_m)) 20 | model.fit(data, [np.array(x_m), np.array(y_m)]) 21 | model.parameters['radius'] = np.abs(model.parameters['radius']) 22 | model.parameters['center_x'] = np.abs(model.parameters['center_x']) 23 | model.parameters['center_y'] = np.abs(model.parameters['center_y']) 24 | 25 | print ('Circle Estimates') 26 | print ('Center (x,y): ', model.parameters['center_x'], model.parameters['center_y']) 27 | print ('Radius: ', model.parameters['radius']) 28 | 29 | return model.parameters['center_x'], model.parameters['center_y'], model.parameters['radius'] 30 | 31 | # Iterative Optimization Method 32 | #print 'Fitting Linear Model with: scipy.optimize.leastsq' 33 | def f(parameter_values, parameter_names): 34 | self.set_parameters(parameter_names, parameter_values) 35 | ans = self.get_errors(data, inputs) 36 | if len(ans.shape) == 2 and ans.shape[0] == 1: 37 | ans = ans.reshape(ans.shape[1]) 38 | return ans 39 | 40 | parameter_values = [] 41 | parameter_names = [] 42 | for name, value in self.parameters.items(): 43 | if name in ignore_parameter_names: 44 | continue 45 | else: 46 | parameter_values.append(value) 47 | parameter_names.append(name) 48 | optimize.leastsq(f, parameter_values, parameter_names) 49 | 50 | class ClickCircle(object): 51 | def __init__(self, filename): 52 | self.image = cv2.imread(filename) 53 | 54 | self.display_name = "Display" 55 | cv2.namedWindow(self.display_name) 56 | 57 | cv2.setMouseCallback(self.display_name, self.on_mouse_click) 58 | 59 | self.circle_points_x = [] 60 | self.circle_points_y = [] 61 | self.circle_fit = None 62 | 63 | def on_mouse_click(self, event, x, y, flags, param): 64 | if event == cv2.EVENT_LBUTTONUP: 65 | self.circle_points_x.append(x) 66 | self.circle_points_y.append(y) 67 | 68 | if len(self.circle_points_x) >= 3: 69 | x,y,R = estimate_circle_from_data_points(self.circle_points_x, self.circle_points_y) 70 | self.circle_fit = [x,y,R] 71 | 72 | def draw(self): 73 | canvas = copy.copy(self.image) 74 | for i in range(len(self.circle_points_x)): 75 | cv2.circle(canvas, (self.circle_points_x[i], self.circle_points_y[i]), 2, [0,0,255], 2) 76 | if self.circle_fit is not None: 77 | cv2.circle(canvas, (int(self.circle_fit[0]), int(self.circle_fit[1])), int(self.circle_fit[2]), [0,255,0], 2) 78 | 79 | cv2.imshow("Display", canvas) 80 | #cv2.waitKey(1) 81 | 82 | def run(self): 83 | while (cv2.waitKey(30) != 27): 84 | self.draw() 85 | cv.destroyAllWindows(); 86 | 87 | ############################################################################################## 88 | # Ellipse 89 | 90 | def estimate_ellipse_from_data_points(x_m, y_m): 91 | points = [] 92 | for i in range(len(x_m)): 93 | points.append((x_m[i], y_m[i])) 94 | ellipse = cv2.fitEllipse(np.array(points)) 95 | return ellipse 96 | 97 | class ClickEllipse(object): 98 | def __init__(self, filename): 99 | self.image = cv2.imread(filename) 100 | 101 | self.display_name = "Display" 102 | cv2.namedWindow(self.display_name) 103 | 104 | cv2.setMouseCallback(self.display_name, self.on_mouse_click) 105 | 106 | self.circle_points_x = [] 107 | self.circle_points_y = [] 108 | self.circle_fit = None 109 | 110 | def on_mouse_click(self, event, x, y, flags, param): 111 | if event == cv2.EVENT_LBUTTONUP: 112 | self.circle_points_x.append(x) 113 | self.circle_points_y.append(y) 114 | 115 | if len(self.circle_points_x) >= 5: 116 | ellipse = estimate_ellipse_from_data_points(self.circle_points_x, self.circle_points_y) 117 | self.circle_fit = ellipse 118 | 119 | def draw(self): 120 | canvas = copy.copy(self.image) 121 | for i in range(len(self.circle_points_x)): 122 | cv2.circle(canvas, (self.circle_points_x[i], self.circle_points_y[i]), 2, [0,0,255], 2) 123 | if self.circle_fit is not None: 124 | print (self.circle_fit) 125 | print (int(self.circle_fit[0][0]), int(self.circle_fit[0][1])) 126 | cv2.ellipse(canvas, (int(self.circle_fit[0][0]), int(self.circle_fit[0][1])), (int(self.circle_fit[1][0]/2.), int(self.circle_fit[1][1]/2.)), int(self.circle_fit[2]), 0, 360, (0,255,0), 2 ) 127 | 128 | cv2.imshow("Display", canvas) 129 | #cv2.waitKey(1) 130 | 131 | def run(self): 132 | while (cv2.waitKey(30) != 27): 133 | self.draw() 134 | cv.destroyAllWindows(); 135 | 136 | ############################################################################################## 137 | # Pixels 138 | 139 | class ClickPixels(object): 140 | def __init__(self, filename): 141 | self.image = cv2.imread(filename) 142 | 143 | self.display_name = "Display" 144 | cv2.namedWindow(self.display_name) 145 | 146 | cv2.setMouseCallback(self.display_name, self.on_mouse_click) 147 | 148 | self.points = [] 149 | 150 | def on_mouse_click(self, event, x, y, flags, param): 151 | if event == cv2.EVENT_LBUTTONUP: 152 | print( x, y) 153 | self.points.append([x,y]) 154 | 155 | def draw(self): 156 | canvas = copy.copy(self.image) 157 | for point in self.points: 158 | cv2.circle(canvas, (point[0], point[1]), 2, [0,0,255], 2) 159 | cv2.imshow("Display", canvas) 160 | #cv2.waitKey(1) 161 | 162 | def run(self): 163 | while (cv2.waitKey(30) != 27): 164 | self.draw() 165 | cv2.destroyAllWindows(); 166 | 167 | 168 | ############################################################################################## 169 | # Line 170 | 171 | class ClickLine(object): 172 | def __init__(self, filename): 173 | self.image = cv2.imread(filename) 174 | 175 | self.display_name = "Display" 176 | cv2.namedWindow(self.display_name) 177 | 178 | cv2.setMouseCallback(self.display_name, self.on_mouse_click) 179 | 180 | self.points = [] 181 | 182 | def on_mouse_click(self, event, x, y, flags, param): 183 | if event == cv2.EVENT_LBUTTONUP: 184 | print (x, y) 185 | self.points.append(np.array([x,y])) 186 | 187 | def draw(self): 188 | canvas = copy.copy(self.image) 189 | for point in self.points: 190 | cv2.circle(canvas, (point[0], point[1]), 2, [0,0,255], 2) 191 | if len(self.points) > 1: 192 | print (np.linalg.norm(self.points[1] - self.points[0])) 193 | cv2.imshow("Display", canvas) 194 | #cv2.waitKey(1) 195 | 196 | def run(self): 197 | while (cv2.waitKey(30) != 27): 198 | self.draw() 199 | cv2.destroyAllWindows(); 200 | 201 | if __name__ == '__main__': 202 | 203 | parser = OptionParser() 204 | parser.add_option("--filename", type="str", dest="filename", default='', 205 | help="filename of image") 206 | parser.add_option("--analysis", type="str", dest="analysis", default='', 207 | help="pixels or circle") 208 | (options, args) = parser.parse_args() 209 | 210 | if options.analysis == 'pixels': 211 | imageGUI = ClickPixels(options.filename) 212 | elif options.analysis == 'circle': 213 | imageGUI = ClickCircle(options.filename) 214 | elif options.analysis == 'ellipse': 215 | imageGUI = ClickEllipse(options.filename) 216 | elif options.analysis == 'line': 217 | imageGUI = ClickLine(options.filename) 218 | 219 | imageGUI.run() 220 | 221 | -------------------------------------------------------------------------------- /multi_tracker_analysis/plot.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import os 4 | 5 | def get_filename(path, contains): 6 | cmd = 'ls ' + path 7 | ls = os.popen(cmd).read() 8 | all_filelist = ls.split('\n') 9 | try: 10 | all_filelist.remove('') 11 | except: 12 | pass 13 | filelist = [] 14 | for i, filename in enumerate(all_filelist): 15 | if contains in filename: 16 | return os.path.join(path, filename) 17 | 18 | def get_bins_from_backgroundimage(backgroundimage, pixel_resolution=1): 19 | if type(backgroundimage) is str: 20 | backgroundimage = plt.imread(backgroundimage) 21 | binsx = np.arange(0, backgroundimage.shape[1]+1, pixel_resolution) 22 | binsy = np.arange(0, backgroundimage.shape[0]+1, pixel_resolution) 23 | return binsx, binsy 24 | 25 | def get_heatmap(pd, binsx, binsy, position_x='position_x', position_y='position_y', position_z='position_z', position_z_slice=None): 26 | if position_z_slice is not None: 27 | pd_subset = pd[ (pd[position_z]>position_z_slice[0]) & (pd[position_z] jump)[0] 14 | break_points = np.insert(break_points, 0, 0) 15 | break_points = np.insert(break_points, len(break_points), len(array)) 16 | 17 | chunks = [] 18 | array2_chunks = [] 19 | index = [] 20 | for i, break_point in enumerate(break_points): 21 | if break_point >= len(array): 22 | break 23 | chunk = array[break_point:break_points[i+1]] 24 | if type(chunk) is not list: 25 | chunk = chunk.tolist() 26 | chunks.append(chunk) 27 | 28 | if array2 is not None: 29 | array2_chunk = array2[break_point:break_points[i+1]] 30 | if type(array2_chunk) is not list: 31 | array2_chunk = array2_chunk.tolist() 32 | array2_chunks.append(array2_chunk) 33 | 34 | if return_index: 35 | indices_for_chunk = np.arange(break_point,break_points[i+1]) 36 | index.append(indices_for_chunk) 37 | 38 | if type(break_points) is not list: 39 | break_points = break_points.tolist() 40 | 41 | if return_index: 42 | return index 43 | 44 | if array2 is None: 45 | return chunks, break_points 46 | 47 | else: 48 | return chunks, array2_chunks, break_points 49 | 50 | def plot(trajec, ax=None, x='position_x', y='position_y', linestyle='-'): 51 | 52 | if ax is None: 53 | fig = plt.figure() 54 | ax = fig.add_subplot(111) 55 | ax.plot(trajec.__dict__[x], trajec.__dict__[y], linestyle=linestyle) 56 | 57 | def calculate_stopped_and_walking_frames(trajec): 58 | 59 | stopped_threshold = 1 # mm/sec 60 | walking_threshold = 2 # mm/sec 61 | minimum_stop_length = 6 # frames, = 0.2 sec at 30 fps 62 | 63 | trajec.walking = np.zeros_like(trajec.position_x) 64 | 65 | for frame in range(len(trajec.walking)): 66 | if trajec.speed[frame] < stopped_threshold: 67 | trajec.walking[frame] = 0 68 | elif trajec.speed[frame] > walking_threshold: 69 | trajec.walking[frame] = 1 70 | else: 71 | try: 72 | trajec.walking[frame] = trajec.walking_threshold[frame-1] 73 | except: 74 | trajec.walking[frame] = 0 75 | 76 | chunks, indices = get_continuous_chunks(trajec.walking, array2=None, jump=0.5, return_index=False) 77 | 78 | for c, chunk in enumerate(chunks): 79 | ind = np.arange(indices[c], indices[c]+len(chunk)) 80 | if len(ind) <= 10 and np.median(trajec.walking[ind])==0: 81 | trajec.walking[ind] = 1 82 | elif len(ind) <= 10 and np.median(trajec.walking[ind])==1: 83 | trajec.walking[ind] = 1 84 | 85 | def calculate_fraction_of_time_spent_walking(trajec): 86 | if 'walking' not in trajec.__dict__.keys(): 87 | calculate_stopped_and_walking_frames(trajec) 88 | trajec.fraction_of_time_spent_walking = np.mean(trajec.walking) 89 | 90 | def calculate_mean_walking_speed_while_walking(trajec): 91 | if 'walking' not in trajec.__dict__.keys(): 92 | calculate_stopped_and_walking_frames(trajec) 93 | trajec.mean_walking_speed = np.mean(trajec.speed) # np.mean(trajec.speed[np.where(trajec.walking)]) 94 | -------------------------------------------------------------------------------- /nodes/data_association.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | from optparse import OptionParser 4 | import roslib 5 | import rospy 6 | import rosparam 7 | import copy 8 | import numpy as np 9 | import os, sys 10 | import time 11 | 12 | from std_msgs.msg import Float32, Header, String 13 | from geometry_msgs.msg import Point, Vector3 14 | from multi_tracker.msg import Contourinfo, Contourlist 15 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 16 | 17 | import matplotlib.pyplot as plt 18 | import multi_tracker_analysis.Kalman as Kalman 19 | import imp 20 | 21 | import threading 22 | 23 | 24 | class DataAssociator(object): 25 | def __init__(self, nodenum): 26 | kalman_parameter_py_file = os.path.expanduser( rospy.get_param('/multi_tracker/' + nodenum + '/data_association/kalman_parameters_py_file') ) 27 | self.lockBuffer = threading.Lock() 28 | 29 | try: 30 | self.kalman_parameters = imp.load_source('kalman_parameters', kalman_parameter_py_file) 31 | print('Kalman py file: ', kalman_parameter_py_file) 32 | except: # look in home directory for kalman parameter file 33 | home_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + nodenum + '/home_directory') ) 34 | kalman_parameter_py_file = os.path.join(home_directory, kalman_parameter_py_file) 35 | self.kalman_parameters = imp.load_source('kalman_parameters', kalman_parameter_py_file) 36 | print('Kalman py file: ', kalman_parameter_py_file) 37 | 38 | self.association_matrix = self.kalman_parameters.association_matrix 39 | self.association_matrix /= np.linalg.norm(self.association_matrix) 40 | self.max_covariance = self.kalman_parameters.max_covariance 41 | self.max_velocity = self.kalman_parameters.max_velocity 42 | 43 | self.tracked_objects = {} 44 | self.current_objid = 0 45 | 46 | #self.min_size = rospy.get_param('/multi_tracker/data_association/min_size') 47 | #self.max_size = rospy.get_param('/multi_tracker/data_association/max_size') 48 | self.max_tracked_objects = rospy.get_param('/multi_tracker/' + nodenum + '/data_association/max_tracked_objects') 49 | self.n_covariances_to_reject_data = rospy.get_param('/multi_tracker/' + nodenum + '/data_association/n_covariances_to_reject_data') 50 | 51 | self.contour_buffer = [] 52 | 53 | # initialize the node 54 | rospy.init_node('data_associator_' + nodenum) 55 | self.time_start = time.time() 56 | 57 | # Publishers. 58 | self.pubTrackedObjects = rospy.Publisher('/multi_tracker/' + nodenum + '/tracked_objects', Trackedobjectlist, queue_size=300) 59 | 60 | # Subscriptions. 61 | self.subImage = rospy.Subscriber('/multi_tracker/' + nodenum + '/contours', Contourlist, self.contour_callback, queue_size=300) 62 | 63 | def contour_callback(self, contourlist): 64 | with self.lockBuffer: 65 | self.contour_buffer.append(contourlist) 66 | 67 | def contour_identifier(self, contourlist): 68 | 69 | # keep track of which new objects have been "taken" 70 | contours_accounted_for = [] 71 | 72 | update_dict = {} 73 | 74 | def update_tracked_object(tracked_object, measurement, contourlist): 75 | if measurement is None: 76 | m = np.matrix([np.nan for i in range( tracked_object['measurement'].shape[0] ) ]).T 77 | xhat, P, K = tracked_object['kalmanfilter'].update( None ) # run kalman filter 78 | else: 79 | tracked_object['measurement'] = np.hstack( (tracked_object['measurement'], measurement) ) # add object's data to the tracked object 80 | xhat, P, K = tracked_object['kalmanfilter'].update( tracked_object['measurement'][:,-1] ) # run kalman filter 81 | tracked_object['frames'].append(int(contourlist.header.frame_id)) 82 | tracked_object['frames'].pop(0) 83 | tracked_object['nframes'] += 1 84 | tracked_object['timestamp'].append(contourlist.header.stamp) 85 | tracked_object['timestamp'].pop(0) 86 | tracked_object['state'] = np.hstack( (tracked_object['state'][:,-1], xhat) ) 87 | 88 | 89 | # iterate through objects first 90 | # get order of persistence 91 | objid_in_order_of_persistance = [] 92 | if len(self.tracked_objects.keys()) > 0: 93 | persistance = [] 94 | objids = [] 95 | for objid, tracked_object in self.tracked_objects.items(): 96 | persistance.append(tracked_object['nframes']) 97 | objids.append(objid) 98 | order = np.argsort(persistance)[::-1] 99 | objid_in_order_of_persistance = [objids[o] for o in order] 100 | 101 | 102 | # loop through contours and find errors to all tracked objects (if less than allowed error) 103 | # then loop through the errors in order of increasing error and assigned contours to objects 104 | contour_to_object_error = [] 105 | tracked_object_state_estimates = None 106 | tracked_object_covariances = None 107 | tracked_object_ids = [] 108 | for objid, tracked_object in self.tracked_objects.items(): 109 | tose = np.array([[tracked_object['kalmanfilter'].xhat_apriori[0,0], tracked_object['kalmanfilter'].xhat_apriori[2,0]]]) 110 | cov = np.array([np.linalg.norm( (tracked_object['kalmanfilter'].H*tracked_object['kalmanfilter'].P).T*self.association_matrix )]) 111 | tracked_object_ids.append(objid) 112 | if tracked_object_state_estimates is None: 113 | tracked_object_state_estimates = tose 114 | tracked_object_covariances = cov 115 | else: 116 | tracked_object_state_estimates = np.vstack((tracked_object_state_estimates, tose)) 117 | tracked_object_covariances = np.vstack((tracked_object_covariances, cov)) 118 | 119 | if tracked_object_state_estimates is not None: 120 | for c, contour in enumerate(contourlist.contours): 121 | m = np.array([[contour.x, contour.y]]) 122 | error = np.array([np.linalg.norm(m-e) for e in tracked_object_state_estimates]) 123 | ncov = self.n_covariances_to_reject_data*np.sqrt(tracked_object_covariances) 124 | indices = np.where( (error < ncov ) )[0] 125 | if len(indices) > 0: 126 | new_contour_object_errors = [[error[i], tracked_object_ids[i], c] for i in indices] 127 | contour_to_object_error.extend(new_contour_object_errors) 128 | 129 | # Association and Propagation 130 | #o = [] 131 | if len(contour_to_object_error) > 0: 132 | contour_to_object_error = np.array(contour_to_object_error) 133 | sorted_indices = np.argsort(contour_to_object_error[:,0]) 134 | contour_to_object_error = contour_to_object_error[sorted_indices,:] 135 | contours_accounted_for = [] 136 | objects_accounted_for = [] 137 | for data in contour_to_object_error: 138 | c = int(data[2]) 139 | objid = int(data[1]) 140 | if objid not in objects_accounted_for: 141 | if c not in contours_accounted_for: 142 | contour = contourlist.contours[c] 143 | measurement = np.matrix([contour.x, contour.y, 0, contour.area, contour.angle]).T 144 | tracked_object = self.tracked_objects[objid] 145 | update_tracked_object(tracked_object, measurement, contourlist) 146 | contours_accounted_for.append(c) 147 | objects_accounted_for.append(objid) 148 | #e = [ tracked_object['kalmanfilter'].xhat_apriori[0] - contour.x, 149 | # tracked_object['kalmanfilter'].xhat_apriori[2] - contour.y] 150 | #tracked_object_covariance = np.linalg.norm( (tracked_object['kalmanfilter'].H*tracked_object['kalmanfilter'].P).T*self.association_matrix ) 151 | #o.append([objid, e, tracked_object_covariance]) 152 | 153 | # any unnaccounted contours should spawn new objects 154 | for c, contour in enumerate(contourlist.contours): 155 | if c not in contours_accounted_for: 156 | 157 | obj_state = np.matrix([contour.x, 0, contour.y, 0, 0, 0, contour.area, 0, contour.angle, 0]).T # pretending 3-d tracking (z and zvel) for now 158 | obj_measurement = np.matrix([contour.x, contour.y, 0, contour.area, contour.angle]).T 159 | # If not associated with previous object, spawn a new object 160 | new_obj = { 'objid': self.current_objid, 161 | 'statenames': { 'position': [0, 2, 4], 162 | 'velocity': [1, 3, 5], 163 | 'size': 6, 164 | 'd_size': 7, 165 | 'angle': 8, 166 | 'd_angle': 9, 167 | }, 168 | 'state': obj_state, 169 | 'measurement': obj_measurement, 170 | 'timestamp': [contour.header.stamp], 171 | 'frames': [int(contour.header.frame_id)], 172 | 'kalmanfilter': Kalman.DiscreteKalmanFilter(x0 = obj_state, 173 | P0 = self.kalman_parameters.P0, 174 | phi = self.kalman_parameters.phi, 175 | gamma = self.kalman_parameters.gamma, 176 | H = self.kalman_parameters.H, 177 | Q = self.kalman_parameters.Q, 178 | R = self.kalman_parameters.R, 179 | gammaW = self.kalman_parameters.gammaW, 180 | ), 181 | 'nframes': 0, 182 | } 183 | self.tracked_objects.setdefault(new_obj['objid'], new_obj) 184 | self.current_objid += 1 185 | 186 | 187 | 188 | # propagate unmatched objects 189 | for objid, tracked_object in self.tracked_objects.items(): 190 | if tracked_object['frames'][-1] != int(contourlist.header.frame_id): 191 | update_tracked_object(tracked_object, None, contourlist) 192 | 193 | # make sure we don't get too many objects - delete the oldest ones, and ones with high covariances 194 | objects_to_destroy = [] 195 | if len(objid_in_order_of_persistance) > self.max_tracked_objects: 196 | for objid in objid_in_order_of_persistance[self.max_tracked_objects:]: 197 | objects_to_destroy.append(objid) 198 | 199 | # check covariance, and velocity 200 | for objid, tracked_object in self.tracked_objects.items(): 201 | tracked_object_covariance = np.linalg.norm( (tracked_object['kalmanfilter'].H*tracked_object['kalmanfilter'].P).T*self.association_matrix ) 202 | if tracked_object_covariance > self.max_covariance: 203 | if objid not in objects_to_destroy: 204 | objects_to_destroy.append(objid) 205 | v = np.linalg.norm( np.array( tracked_object['state'][tracked_object['statenames']['velocity'],-1] ).flatten().tolist() ) 206 | if v > self.max_velocity: 207 | if objid not in objects_to_destroy: 208 | objects_to_destroy.append(objid) 209 | for objid in objects_to_destroy: 210 | del(self.tracked_objects[objid]) 211 | 212 | # recalculate persistance (not necessary, but convenient) 213 | objid_in_order_of_persistance = [] 214 | if len(self.tracked_objects.keys()) > 0: 215 | persistance = [] 216 | for objid, tracked_object in self.tracked_objects.items(): 217 | persistance.append(len(tracked_object['frames'])) 218 | objid_in_order_of_persistance.append(objid) 219 | order = np.argsort(persistance)[::-1] 220 | objid_in_order_of_persistance = [objid_in_order_of_persistance[o] for o in order] 221 | if len(objid_in_order_of_persistance) > 0: 222 | most_persistant_objid = objid_in_order_of_persistance[0] 223 | 224 | # publish tracked objects 225 | if 1: 226 | object_info_to_publish = [] 227 | t = contourlist.header.stamp 228 | for objid in objid_in_order_of_persistance: 229 | if objid not in objects_to_destroy: 230 | tracked_object = self.tracked_objects[objid] 231 | data = Trackedobject() 232 | data.header = Header(stamp=t, frame_id=contourlist.header.frame_id) 233 | p = np.array( tracked_object['state'][tracked_object['statenames']['position'],-1] ).flatten().tolist() 234 | v = np.array( tracked_object['state'][tracked_object['statenames']['velocity'],-1] ).flatten().tolist() 235 | data.position = Point( p[0], p[1], p[2] ) 236 | data.velocity = Vector3( v[0], v[1], v[2] ) 237 | data.angle = tracked_object['state'][tracked_object['statenames']['angle'],-1] 238 | data.size = tracked_object['state'][tracked_object['statenames']['size'],-1]#np.linalg.norm(tracked_object['kalmanfilter'].P.diagonal()) 239 | data.measurement = Point( tracked_object['measurement'][0, -1], tracked_object['measurement'][1, -1], 0) 240 | tracked_object_covariance = np.linalg.norm( (tracked_object['kalmanfilter'].H*tracked_object['kalmanfilter'].P).T*self.association_matrix ) 241 | data.covariance = tracked_object_covariance # position covariance only 242 | data.objid = tracked_object['objid'] 243 | data.persistence = tracked_object['nframes'] 244 | object_info_to_publish.append(data) 245 | header = Header(stamp=t) 246 | self.pubTrackedObjects.publish( Trackedobjectlist(header=header, tracked_objects=object_info_to_publish) ) 247 | 248 | def main(self): 249 | while not rospy.is_shutdown(): 250 | t = time.time() - self.time_start 251 | with self.lockBuffer: 252 | time_now = rospy.Time.now() 253 | if len(self.contour_buffer) > 0: 254 | self.contour_identifier(self.contour_buffer.pop(0)) 255 | pt = (rospy.Time.now()-time_now).to_sec() 256 | if len(self.contour_buffer) > 9: 257 | rospy.logwarn("Data association processing time exceeds acquisition rate. Processing time: %f, Buffer: %d", pt, len(self.contour_buffer)) 258 | 259 | 260 | 261 | if __name__ == '__main__': 262 | parser = OptionParser() 263 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 264 | help="node number, for example, if running multiple tracker instances on one computer") 265 | (options, args) = parser.parse_args() 266 | 267 | data_associator = DataAssociator(options.nodenum) 268 | data_associator.main() 269 | -------------------------------------------------------------------------------- /nodes/delta_video_player.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | import roslib 4 | import rospy 5 | import rosparam 6 | import copy 7 | import cv2 8 | import numpy as np 9 | import threading 10 | import dynamic_reconfigure.server 11 | from cv_bridge import CvBridge, CvBridgeError 12 | from sensor_msgs.msg import Image 13 | from std_msgs.msg import Float32, Header, String 14 | import imp 15 | 16 | from multi_tracker.msg import Contourinfo, Contourlist, DeltaVid 17 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 18 | from multi_tracker.srv import resetBackgroundService 19 | import os 20 | 21 | from optparse import OptionParser 22 | 23 | import image_processing 24 | 25 | import matplotlib.pyplot as plt 26 | 27 | # for basler ace cameras, use camera_aravis 28 | # https://github.com/ssafarik/camera_aravis 29 | # rosrun camera_aravis camnode 30 | # default image: /camera/image_raw 31 | 32 | # for firefley cameras, camera1394 does not provide timestamps but otherwise works 33 | # use point grey drivers 34 | # http://wiki.ros.org/pointgrey_camera_driver 35 | # rosrun pointgrey_camera_driver camera_node 36 | # default image: /camera/image_mono 37 | 38 | 39 | from distutils.version import LooseVersion, StrictVersion 40 | print('Using open cv: ' + cv2.__version__) 41 | if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"): 42 | OPENCV_VERSION = 3 43 | print('Open CV 3') 44 | else: 45 | OPENCV_VERSION = 2 46 | print('Open CV 2') 47 | 48 | # The main tracking class, a ROS node 49 | class DeCompressor: 50 | def __init__(self, topic_in, topic_out, directory, config=None, mode='mono', saveto=''): 51 | ''' 52 | Default image_topic for: 53 | Basler ace cameras with camera_aravis driver: camera/image_raw 54 | Pt Grey Firefly cameras with pt grey driver : camera/image_mono 55 | ''' 56 | # default parameters (parameter server overides them) 57 | 58 | # initialize the node 59 | rospy.init_node('delta_decompressor') 60 | self.nodename = rospy.get_name().rstrip('/') 61 | 62 | # Publishers - publish contours 63 | self.pubDeltaVid = rospy.Publisher(topic_out, Image, queue_size=30) 64 | self.subDeltaVid = rospy.Subscriber(topic_in, DeltaVid, self.delta_image_callback, queue_size=30) 65 | 66 | self.directory = directory #rospy.get_param('/multi_tracker/delta_video/directory', default='') 67 | print('Directory with background images: ', self.directory) 68 | 69 | self.cvbridge = CvBridge() 70 | 71 | self.backgroundImage = None 72 | self.background_img_filename = 'none' 73 | 74 | self.config = config 75 | self.mode = mode 76 | 77 | if len(saveto) > 0: 78 | self.saveto = saveto 79 | self.videowriter = None 80 | else: 81 | self.saveto = None 82 | self.videowriter = None 83 | 84 | 85 | def delta_image_callback(self, delta_vid): 86 | if (self.background_img_filename != delta_vid.background_image) or (self.backgroundImage is None): 87 | self.background_img_filename = delta_vid.background_image 88 | basename = os.path.basename(self.background_img_filename) 89 | directory_with_basename = os.path.join(self.directory, basename) 90 | self.backgroundImage = cv2.imread(directory_with_basename, cv2.CV_8UC1) 91 | try: 92 | self.backgroundImage = self.backgroundImage.reshape([self.backgroundImage.shape[0], self.backgroundImage[1], 1]) # for hydro 93 | except: 94 | pass # for indigo 95 | 96 | if self.backgroundImage is not None: 97 | new_image = copy.copy(self.backgroundImage) 98 | 99 | if delta_vid.values is not None: 100 | if len(delta_vid.values) > 0: 101 | try: 102 | new_image[delta_vid.xpixels, delta_vid.ypixels, 0] = delta_vid.values # for hydro 103 | except: 104 | new_image[delta_vid.xpixels, delta_vid.ypixels] = delta_vid.values # for indigo 105 | 106 | if self.mode == 'color': 107 | new_image = cv2.cvtColor(new_image, cv2.COLOR_GRAY2RGB) 108 | 109 | if self.config is not None: 110 | #print delta_vid.header.stamp.secs + delta_vid.header.stamp.nsecs*1e-9 111 | t = delta_vid.header.stamp.secs + delta_vid.header.stamp.nsecs*1e-9 112 | self.config.draw(new_image, t) 113 | 114 | if self.saveto is not None: 115 | if self.videowriter is not None: 116 | self.videowriter.write(new_image) 117 | else: 118 | if OPENCV_VERSION == 2: 119 | self.videowriter = cv2.VideoWriter(self.saveto,cv2.cv.CV_FOURCC('m','p','4','v'), 300,(new_image.shape[1], new_image.shape[0]),True) # works on Linux and Windows 120 | elif OPENCV_VERSION == 3: 121 | self.videowriter = cv2.VideoWriter(self.saveto,cv2.VideoWriter_fourcc('m','p','4','v'), 300,(new_image.shape[1], new_image.shape[0]),True) 122 | #self.videowriter.open(self.saveto, cv.CV_FOURCC('P','I','M','1'), 30, (new_image.shape[0], new_image.shape[1])) 123 | 124 | if self.mode == 'mono': 125 | image_message = self.cvbridge.cv2_to_imgmsg(new_image, encoding="mono8") 126 | elif self.mode == 'color': 127 | image_message = self.cvbridge.cv2_to_imgmsg(new_image, encoding="bgr8") 128 | image_message.header = delta_vid.header 129 | self.pubDeltaVid.publish(image_message) 130 | 131 | def Main(self): 132 | rospy.spin() 133 | if self.videowriter is not None: 134 | self.videowriter.release() 135 | print("Note: use this command to make a mac / quicktime friendly video: avconv -i test.avi -c:v libx264 -c:a copy outputfile.mp4") 136 | ##################################################################################################### 137 | 138 | if __name__ == '__main__': 139 | parser = OptionParser() 140 | parser.add_option("--in", type="str", dest="input", default='/multi_tracker/delta_video', 141 | help="input topic name") 142 | parser.add_option("--out", type="str", dest="output", default='/camera/image_decompressed', 143 | help="output topic name") 144 | parser.add_option("--directory", type="str", dest="directory", default='', 145 | help="directory where background images can be found") 146 | parser.add_option("--config", type="str", dest="config", default='', 147 | help="configuration file, which should describe a class that has a method draw") 148 | parser.add_option("--mode", type="str", dest="mode", default='mono', 149 | help="color if desired to convert to color image") 150 | parser.add_option("--saveto", type="str", dest="saveto", default='', 151 | help="filename where to save video, default is none. Note: use this command to make a mac / quicktime friendly video: avconv -i test.avi -c:v libx264 -c:a copy outputfile.mp4") 152 | 153 | (options, args) = parser.parse_args() 154 | 155 | if len(options.config) > 0: 156 | config = imp.load_source('config', options.config) 157 | c = config.Config(options.config) 158 | else: 159 | c = None 160 | 161 | decompressor = DeCompressor(options.input, options.output, options.directory, c, options.mode, options.saveto) 162 | decompressor.Main() 163 | -------------------------------------------------------------------------------- /nodes/delta_video_simplebuffer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | from optparse import OptionParser 4 | import rospy 5 | import rosparam 6 | import copy 7 | # import cv: open cv 1 not used 8 | import cv2 9 | import numpy as np 10 | import threading 11 | import dynamic_reconfigure.server 12 | from cv_bridge import CvBridge, CvBridgeError 13 | from sensor_msgs.msg import Image 14 | from std_msgs.msg import Float32, Header, String 15 | 16 | from multi_tracker.msg import Contourinfo, Contourlist, DeltaVid 17 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 18 | from multi_tracker.srv import resetBackgroundService 19 | 20 | import time 21 | import os 22 | import image_processing 23 | 24 | import matplotlib.pyplot as plt 25 | 26 | # for basler ace cameras, use camera_aravis 27 | # https://github.com/ssafarik/camera_aravis 28 | # rosrun camera_aravis camnode 29 | # default image: /camera/image_raw 30 | 31 | # for firefley cameras, camera1394 does not provide timestamps but otherwise works 32 | # use point grey drivers 33 | # http://wiki.ros.org/pointgrey_camera_driver 34 | # rosrun pointgrey_camera_driver camera_node 35 | # default image: /camera/image_mono 36 | 37 | # The main tracking class, a ROS node 38 | class Compressor: 39 | def __init__(self, nodenum): 40 | ''' 41 | Default image_topic for: 42 | Basler ace cameras with camera_aravis driver: camera/image_raw 43 | Pt Grey Firefly cameras with pt grey driver : camera/image_mono 44 | ''' 45 | # default parameters (parameter server overides them) 46 | self.nodenum = nodenum 47 | self.params = { 'image_topic' : '/camera/image_raw', 48 | 'threshold' : 10, 49 | 'camera_encoding' : 'mono8', # fireflies are bgr8, basler gige cams are mono8 50 | 'max_change_in_frame' : 0.2, 51 | 'roi_l' : 0, 52 | 'roi_r' : -1, 53 | 'roi_b' : 0, 54 | 'roi_t' : -1, 55 | 'circular_mask_x' : 'none', 56 | 'circular_mask_y' : 'none', 57 | 'circular_mask_r' : 'none', 58 | } 59 | for parameter, value in self.params.items(): 60 | try: 61 | p = '/multi_tracker/' + nodenum + '/delta_video/' + parameter 62 | self.params[parameter] = rospy.get_param(p) 63 | except: 64 | print('Using default parameter: ', parameter, ' = ', value) 65 | 66 | # initialize the node 67 | rospy.init_node('delta_compressor_' + nodenum) 68 | self.nodename = rospy.get_name().rstrip('/') 69 | self.time_start = time.time() 70 | 71 | # experiment basename 72 | self.experiment_basename = rospy.get_param('/multi_tracker/' + nodenum + '/experiment_basename', 'none') 73 | if self.experiment_basename == 'none': 74 | self.experiment_basename = time.strftime("%Y%m%d_%H%M%S_N" + nodenum, time.localtime()) 75 | 76 | # Publishers - publish pixel changes 77 | self.pubDeltaVid = rospy.Publisher('/multi_tracker/' + nodenum + '/delta_video', DeltaVid, queue_size=30) 78 | 79 | # background reset service 80 | self.reset_background_flag = False 81 | self.reset_background_service = rospy.Service('/multi_tracker/' + nodenum + '/reset_background', resetBackgroundService, self.reset_background) 82 | 83 | self.cvbridge = CvBridge() 84 | self.imgScaled = None 85 | self.backgroundImage = None 86 | self.background_img_filename = 'none' 87 | 88 | # buffer locking 89 | self.lockBuffer = threading.Lock() 90 | self.image_buffer = [] 91 | self.framestamp = None 92 | 93 | self.current_background_img = 0 94 | 95 | # Subscriptions - subscribe to images, and tracked objects 96 | self.image_mask = None 97 | sizeImage = 128+1024*1024*3 # Size of header + data. 98 | self.subImage = rospy.Subscriber(self.params['image_topic'], Image, self.image_callback, queue_size=5, buff_size=2*sizeImage, tcp_nodelay=True) 99 | 100 | def reset_background(self, service_call): 101 | self.reset_background_flag = True 102 | return 1 103 | 104 | def image_callback(self, rosimg): 105 | with self.lockBuffer: 106 | self.image_buffer.append(rosimg) 107 | 108 | def process_image_buffer(self, rosimg): 109 | if self.framestamp is not None: 110 | self.dtCamera = (rosimg.header.stamp - self.framestamp).to_sec() 111 | else: 112 | self.dtCamera = 0.03 113 | self.framenumber = rosimg.header.seq 114 | self.framestamp = rosimg.header.stamp 115 | 116 | # Convert the image. 117 | try: 118 | img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough') # might need to change to bgr for color cameras 119 | except CvBridgeError as e: 120 | rospy.logwarn ('Exception converting background image from ROS to opencv: %s' % e) 121 | img = np.zeros((320,240)) 122 | 123 | self.imgScaled = img #[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']] 124 | self.shapeImage = self.imgScaled.shape # (height,width) 125 | 126 | # add roi as mask 127 | if self.image_mask is None: 128 | self.image_mask = np.zeros_like(self.imgScaled) 129 | self.image_mask[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']] = 1 130 | self.imgScaled = self.image_mask*self.imgScaled 131 | 132 | if self.params['circular_mask_x'] != 'none': 133 | if self.image_mask is None: 134 | self.image_mask = np.zeros_like(self.imgScaled) 135 | cv2.circle(self.image_mask,(self.params['circular_mask_x'], self.params['circular_mask_y']),int(self.params['circular_mask_r']),[1,1,1],-1) 136 | self.imgScaled = self.image_mask*self.imgScaled 137 | 138 | ########### image processing function ############################################################## 139 | 140 | # If there is no background image, grab one, and move on to the next frame 141 | if self.backgroundImage is None: 142 | self.backgroundImage = copy.copy(self.imgScaled) 143 | self.background_img_filename = self.experiment_basename + '_deltavideo_bgimg_' + time.strftime("%Y%m%d_%H%M.png", time.localtime()) 144 | data_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + self.nodenum + '/data_directory') ) 145 | self.background_img_filename = os.path.join(data_directory, self.background_img_filename) 146 | 147 | cv2.imwrite(self.background_img_filename, self.backgroundImage) 148 | self.current_background_img += 1 149 | return 150 | if self.reset_background_flag: 151 | self.backgroundImage = copy.copy(self.imgScaled) 152 | self.background_img_filename = time.strftime("%Y%m%d_%H%M_deltavideo_bgimg_N" + self.nodenum, time.localtime()) + '.png' 153 | data_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + self.nodenum + '/data_directory') ) 154 | self.background_img_filename = os.path.join(data_directory, self.background_img_filename) 155 | cv2.imwrite(self.background_img_filename, self.backgroundImage) 156 | self.current_background_img += 1 157 | self.reset_background_flag = False 158 | return 159 | # Absdiff 160 | self.absdiff = cv2.absdiff(self.imgScaled, self.backgroundImage) 161 | 162 | changed_pixels = np.where(self.absdiff>self.params['threshold']) 163 | delta_msg = DeltaVid() 164 | header = Header(stamp=self.framestamp,frame_id=str(self.framenumber)) 165 | delta_msg.header = header 166 | delta_msg.background_image = self.background_img_filename 167 | if len(changed_pixels[0]) > 0: 168 | delta_msg.xpixels = changed_pixels[0].tolist() 169 | delta_msg.ypixels = changed_pixels[1].tolist() 170 | delta_msg.values = self.imgScaled[changed_pixels].reshape(len(changed_pixels[0])).tolist() 171 | else: 172 | delta_msg.xpixels = [0] 173 | delta_msg.ypixels = [0] 174 | #delta_msg.values = [0] 175 | self.pubDeltaVid.publish(delta_msg) 176 | 177 | # if the thresholded absolute difference is too large, reset the background 178 | if len(changed_pixels[0]) / (self.absdiff.shape[0]*self.absdiff.shape[1])>self.params['max_change_in_frame']: 179 | self.reset_background_flag = True 180 | 181 | #self.backgroundImage[delta_msg.xpixels, delta_msg.ypixels] = delta_msg.values 182 | 183 | def Main(self): 184 | while (not rospy.is_shutdown()): 185 | t = time.time() - self.time_start 186 | if t > 24*3600: 187 | cv2.destroyAllWindows() 188 | return 189 | with self.lockBuffer: 190 | time_now = rospy.Time.now() 191 | if len(self.image_buffer) > 0: 192 | self.process_image_buffer(self.image_buffer.pop(0)) 193 | pt = (rospy.Time.now()-time_now).to_sec() 194 | if len(self.image_buffer) > 3: 195 | rospy.logwarn("Delta video processing time exceeds acquisition rate. Processing time: %f, Buffer: %d", pt, len(self.image_buffer)) 196 | 197 | cv2.destroyAllWindows() 198 | 199 | ##################################################################################################### 200 | 201 | if __name__ == '__main__': 202 | parser = OptionParser() 203 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 204 | help="node number, for example, if running multiple tracker instances on one computer") 205 | (options, args) = parser.parse_args() 206 | 207 | compressor = Compressor(options.nodenum) 208 | compressor.Main() 209 | -------------------------------------------------------------------------------- /nodes/image_processing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | import roslib 4 | import rospy 5 | import rosparam 6 | import copy 7 | import cv2 8 | import numpy as np 9 | import threading 10 | import dynamic_reconfigure.server 11 | from cv_bridge import CvBridge, CvBridgeError 12 | from sensor_msgs.msg import Image 13 | from std_msgs.msg import Float32, Header, String 14 | 15 | from multi_tracker.msg import Contourinfo, Contourlist 16 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 17 | from multi_tracker.srv import resetBackgroundService 18 | 19 | import time, os 20 | 21 | from distutils.version import LooseVersion, StrictVersion 22 | print('Using open cv: ' + cv2.__version__) 23 | 24 | # video would not load before installing most recent version of pyqtgraph from github repo 25 | # this is the version of the commit that fixed the 26 | # issue with current numpy: pyqtgraph-0.9.10-118-ge495bbc (in commit e495bbc...) 27 | # version checking with distutils.version. See: http://stackoverflow.com/questions/11887762/compare-version-strings 28 | if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"): 29 | OPENCV_VERSION = 3 30 | print('Open CV 3') 31 | else: 32 | OPENCV_VERSION = 2 33 | print('Open CV 2') 34 | 35 | ########################################################################################################### 36 | # Incredibly basic image processing function (self contained), to demonstrate the format custom image processing functions should follow 37 | ####################### 38 | 39 | def incredibly_basic(self): 40 | # If there is no background image, grab one, and move on to the next frame 41 | if self.backgroundImage is None: 42 | reset_background(self) 43 | return 44 | if self.reset_background_flag: 45 | reset_background(self) 46 | self.reset_background_flag = False 47 | return 48 | 49 | self.absdiff = cv2.absdiff(np.float32(self.imgScaled), self.backgroundImage) 50 | self.imgproc = copy.copy(self.imgScaled) 51 | 52 | retval, self.threshed = cv2.threshold(self.absdiff, self.params['threshold'], 255, 0) 53 | 54 | # convert to gray if necessary 55 | if len(self.threshed.shape) == 3: 56 | self.threshed = np.uint8(cv2.cvtColor(self.threshed, cv2.COLOR_BGR2GRAY)) 57 | 58 | # extract and publish contours 59 | # http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html 60 | if OPENCV_VERSION == 2: 61 | contours, hierarchy = cv2.findContours(self.threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 62 | elif OPENCV_VERSION == 3: 63 | self.threshed = np.uint8(self.threshed) 64 | contours, hierarchy = cv2.findContours(self.threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 65 | 66 | try: 67 | header = Header(stamp=self.framestamp,frame_id=str(self.framenumber)) 68 | except: 69 | header = Header(stamp=None,frame_id=str(self.framenumber)) 70 | print('could not get framestamp, run tracker_nobuffer instead') 71 | 72 | contour_info = [] 73 | for contour in contours: 74 | if len(contour) > 5: # Large objects are approximated by an ellipse 75 | ellipse = cv2.fitEllipse(contour) 76 | (x,y), (a,b), angle = ellipse 77 | a /= 2. 78 | b /= 2. 79 | ecc = np.min((a,b)) / np.max((a,b)) 80 | area = np.pi*a*b 81 | 82 | data = Contourinfo() 83 | data.header = header 84 | data.dt = dtCamera 85 | data.x = x 86 | data.y = y 87 | data.area = area 88 | data.angle = angle 89 | data.ecc = ecc 90 | 91 | contour_info.append(data) 92 | 93 | else: # Small ones get ignored 94 | pass 95 | 96 | # publish the contours 97 | self.pubContours.publish( Contourlist(header = header, contours=contour_info) ) 98 | 99 | 100 | ########################################################################################################### 101 | # General use functions 102 | ####################### 103 | 104 | def is_point_below_line(point, slope, intercept): 105 | x = point[0] 106 | y = point[1] 107 | result = y-slope*x-intercept 108 | if result > 0: 109 | return False 110 | else: 111 | return True 112 | 113 | def fit_ellipse_to_contour(self, contour): 114 | ellipse = cv2.fitEllipse(contour) 115 | (x,y), (a,b), angle = ellipse 116 | a /= 2. 117 | b /= 2. 118 | ecc = np.min((a,b)) / np.max((a,b)) 119 | area = np.pi*a*b 120 | if self.params['use_moments']: # inefficient - double calculating. Is ecc use somewhere else? If not, the ellipse is not at all needed 121 | try: 122 | moments = get_centroid_from_moments(contour) # get these values from moments - might be more robust? 123 | except: 124 | moments = None 125 | if moments is not None: 126 | x, y, area = moments 127 | return x, y, ecc, area, angle 128 | 129 | def get_centroid_from_moments(contour): 130 | M = cv2.moments(contour) 131 | if M['m00'] != 0: 132 | cx = int(M['m10']/M['m00']) 133 | cy = int(M['m01']/M['m00']) 134 | area = cv2.contourArea(contour) 135 | return cx, cy, area 136 | else: 137 | return None 138 | 139 | def add_data_to_contour_info(x,y,ecc,area,angle,dtCamera,header): 140 | # Prepare to publish the contour info 141 | # contour message info: dt, x, y, angle, area, ecc 142 | data = Contourinfo() 143 | data.header = header 144 | data.dt = dtCamera 145 | data.x = x 146 | data.y = y 147 | data.area = area 148 | data.angle = angle 149 | data.ecc = ecc 150 | return data 151 | 152 | def extract_and_publish_contours(self): 153 | if OPENCV_VERSION == 2: 154 | contours, hierarchy = cv2.findContours(self.threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 155 | elif OPENCV_VERSION == 3: 156 | contours, hierarchy = cv2.findContours(self.threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 157 | # http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html 158 | 159 | try: 160 | header = Header(stamp=self.framestamp,frame_id=str(self.framenumber)) 161 | except: 162 | header = Header(stamp=None,frame_id=str(self.framenumber)) 163 | print('could not get framestamp, run tracker_nobuffer instead') 164 | 165 | contour_info = [] 166 | for contour in contours: 167 | # Large objects are approximated by an ellipse 168 | if len(contour) > 5: 169 | x, y, ecc, area, angle = fit_ellipse_to_contour(self, contour) 170 | 171 | # if object is too large, split it in two, this helps with colliding objects, but is not 100% correct 172 | if area > self.params['max_expected_area']: 173 | slope = np.tan(angle) 174 | intercept = y - slope*x 175 | c1 = [] 176 | c2 = [] 177 | for point in contour: 178 | point = point.reshape(2) 179 | if is_point_below_line(point, slope, intercept): 180 | c1.append([point]) 181 | else: 182 | c2.append([point]) 183 | c1 = np.array(c1) 184 | c2 = np.array(c2) 185 | 186 | if len(c1) > 5: 187 | x, y, ecc, area, angle = fit_ellipse_to_contour(self, np.array(c1)) 188 | if area < self.params['max_size'] and area > self.params['min_size']: 189 | data = add_data_to_contour_info(x,y,ecc,area,angle,self.dtCamera,header) 190 | contour_info.append(data) 191 | 192 | if len(c2) > 5: 193 | x, y, ecc, area, angle = fit_ellipse_to_contour(self, np.array(c2)) 194 | if area < self.params['max_size'] and area > self.params['min_size']: 195 | data = add_data_to_contour_info(x,y,ecc,area,angle,self.dtCamera,header) 196 | contour_info.append(data) 197 | else: 198 | if area < self.params['max_size'] and area > self.params['min_size']: 199 | data = add_data_to_contour_info(x,y,ecc,area,angle,self.dtCamera,header) 200 | contour_info.append(data) 201 | 202 | 203 | # Small ones just get a point 204 | else: 205 | area = 0 206 | 207 | # publish the contours 208 | self.pubContours.publish( Contourlist(header = header, contours=contour_info) ) 209 | 210 | return 211 | 212 | def convert_to_gray_if_necessary(self): 213 | if len(self.threshed.shape) == 3: 214 | self.threshed = np.uint8(cv2.cvtColor(self.threshed, cv2.COLOR_BGR2GRAY)) 215 | 216 | def erode_and_dialate(self): 217 | kernel = np.ones((3,3), np.uint8) 218 | self.threshed = cv2.dilate(self.threshed, kernel, iterations=self.params['dilate']) 219 | self.threshed = cv2.erode(self.threshed, kernel, iterations=self.params['erode']) 220 | 221 | def reset_background_if_difference_is_very_large(self, color='dark'): 222 | if color == 'dark': 223 | # if the thresholded absolute difference is too large, reset the background 224 | if np.sum(self.threshed>0) / float(self.shapeImage[0]*self.shapeImage[1]) > self.params['max_change_in_frame']: 225 | reset_background(self) 226 | return 227 | elif color == 'light': 228 | # NOT IMPLEMENTED 229 | pass 230 | 231 | def reset_background(self): 232 | self.backgroundImage = copy.copy(np.float32(self.imgScaled)) 233 | print(self.imgScaled.shape, self.backgroundImage.shape) 234 | filename = self.experiment_basename + '_' + time.strftime("%Y%m%d_%H%M%S_bgimg_N" + self.nodenum, time.localtime()) + '.png' 235 | home_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + self.nodenum + '/data_directory') ) 236 | filename = os.path.join(home_directory, filename) 237 | 238 | try: 239 | cv2.imwrite(filename, self.backgroundImage) # requires opencv > 2.4.9 240 | print('Background reset: ', filename) 241 | except: 242 | print('failed to save background image, might need opencv 2.4.9?') 243 | 244 | def add_image_to_background(self, color='dark'): 245 | tmp_backgroundImage = copy.copy(np.float32(self.imgScaled)) 246 | if color == 'dark': 247 | self.backgroundImage = np.max([self.backgroundImage, tmp_backgroundImage], axis=0) 248 | elif color == 'light': 249 | self.backgroundImage = np.min([self.backgroundImage, tmp_backgroundImage], axis=0) 250 | filename = self.experiment_basename + '_' + time.strftime("%Y%m%d_%H%M%S_bgimg_N" + self.nodenum, time.localtime()) + '.png' 251 | home_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + self.nodenum + '/data_directory') ) 252 | filename = os.path.join(home_directory, filename) 253 | 254 | try: 255 | cv2.imwrite(filename, self.backgroundImage) # requires opencv > 2.4.9 256 | print('Background reset: ', filename) 257 | except: 258 | print('failed to save background image, might need opencv 2.4.9?') 259 | ########################################################################################################### 260 | 261 | ########################################################################################################### 262 | # Simple background subtraction 263 | ############################### 264 | 265 | def background_subtraction(self): 266 | # If there is no background image, grab one, and move on to the next frame 267 | if self.backgroundImage is None: 268 | reset_background(self) 269 | return 270 | if self.reset_background_flag: 271 | reset_background(self) 272 | self.reset_background_flag = False 273 | return 274 | 275 | # Absdiff, threshold, and contours 276 | # cv2.RETR_EXTERNAL only extracts the outer most contours - good for speed, and most simple objects 277 | self.absdiff = cv2.absdiff(np.float32(self.imgScaled), self.backgroundImage) 278 | self.imgproc = copy.copy(self.imgScaled) 279 | # running background update 280 | cv2.accumulateWeighted(np.float32(self.imgScaled), self.backgroundImage, self.params['backgroundupdate']) # this needs to be here, otherwise there's an accumulation of something in the background 281 | 282 | retval, self.threshed = cv2.threshold(self.absdiff, self.params['threshold'], 255, 0) 283 | 284 | convert_to_gray_if_necessary(self) 285 | erode_and_dialate(self) 286 | extract_and_publish_contours(self) 287 | reset_background_if_difference_is_very_large(self) 288 | 289 | ########################################################################################################### 290 | # Only track dark or light objects 291 | ######################### 292 | 293 | def dark_objects_only(self): 294 | dark_or_light_objects_only(self, color='dark') 295 | 296 | def light_objects_only(self): 297 | dark_or_light_objects_only(self, color='light') 298 | 299 | def dark_or_light_objects(self): 300 | dark_or_light_objects_only(self, color='darkorlight') 301 | 302 | def dark_or_light_objects_only(self, color='dark'): 303 | if self.params['circular_mask_x'] != 'none': 304 | if self.image_mask is None: 305 | self.image_mask = np.zeros_like(self.imgScaled) 306 | cv2.circle(self.image_mask,(self.params['circular_mask_x'], self.params['circular_mask_y']),int(self.params['circular_mask_r']),[1,1,1],-1) 307 | self.imgScaled = self.image_mask*self.imgScaled 308 | 309 | # If there is no background image, grab one, and move on to the next frame 310 | if self.backgroundImage is None: 311 | reset_background(self) 312 | return 313 | if self.reset_background_flag: 314 | reset_background(self) 315 | self.reset_background_flag = False 316 | return 317 | if self.add_image_to_background_flag: 318 | add_image_to_background(self, color) 319 | self.add_image_to_background_flag = False 320 | return 321 | 322 | 323 | if self.params['backgroundupdate'] != 0: 324 | cv2.accumulateWeighted(np.float32(self.imgScaled), self.backgroundImage, self.params['backgroundupdate']) # this needs to be here, otherwise there's an accumulation of something in the background 325 | if self.params['medianbgupdateinterval'] != 0: 326 | t = rospy.Time.now().secs 327 | if not self.__dict__.has_key('medianbgimages'): 328 | self.medianbgimages = [self.imgScaled] 329 | self.medianbgimages_times = [t] 330 | if t-self.medianbgimages_times[-1] > self.params['medianbgupdateinterval']: 331 | self.medianbgimages.append(self.imgScaled) 332 | self.medianbgimages_times.append(t) 333 | if len(self.medianbgimages) > 3: 334 | self.backgroundImage = copy.copy(np.float32(np.median(self.medianbgimages, axis=0))) 335 | self.medianbgimages.pop(0) 336 | self.medianbgimages_times.pop(0) 337 | print('reset background with median image') 338 | 339 | try: 340 | kernel = self.kernel 341 | except: 342 | kern_d = self.params['morph_open_kernel_size'] 343 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kern_d,kern_d)) 344 | self.kernel = kernel 345 | 346 | if color == 'dark': 347 | self.threshed = cv2.compare(np.float32(self.imgScaled), self.backgroundImage-self.params['threshold'], cv2.CMP_LT) # CMP_LT is less than 348 | elif color == 'light': 349 | self.threshed = cv2.compare(np.float32(self.imgScaled), self.backgroundImage+self.params['threshold'], cv2.CMP_GT) # CMP_GT is greater than 350 | elif color == 'darkorlight': 351 | #absdiff = cv2.absdiff(np.float32(self.imgScaled), self.backgroundImage) 352 | #retval, self.threshed = cv2.threshold(absdiff, self.params['threshold'], 255, 0) 353 | #self.threshed = np.uint8(self.threshed) 354 | dark = cv2.compare(np.float32(self.imgScaled), self.backgroundImage-self.params['threshold'], cv2.CMP_LT) # CMP_LT is less than 355 | light = cv2.compare(np.float32(self.imgScaled), self.backgroundImage+self.params['threshold'], cv2.CMP_GT) # CMP_GT is greater than 356 | self.threshed = dark+light 357 | 358 | convert_to_gray_if_necessary(self) 359 | 360 | # noise removal 361 | self.threshed = cv2.morphologyEx(self.threshed,cv2.MORPH_OPEN, kernel, iterations = 1) 362 | 363 | # sure background area 364 | #sure_bg = cv2.dilate(opening,kernel,iterations=3) 365 | 366 | # Finding sure foreground area 367 | #dist_transform = cv2.distanceTransform(opening,cv.CV_DIST_L2,3) 368 | #dist_transform = dist_transform / (np.max(dist_transform)) * 255 369 | #ret, sure_fg = cv2.threshold(dist_transform,0.2*dist_transform.max(),255,0) 370 | 371 | # Finding unknown region 372 | #sure_fg = np.uint8(sure_fg) 373 | 374 | #self.threshed = sure_fg 375 | erode_and_dialate(self) 376 | 377 | # publish the processed image 378 | c = cv2.cvtColor(np.uint8(self.threshed), cv2.COLOR_GRAY2BGR) 379 | # commented for now, because publishing unthresholded difference 380 | 381 | if OPENCV_VERSION == 2: # cv bridge not compatible with open cv 3, at least at this time 382 | img = self.cvbridge.cv2_to_imgmsg(c, 'bgr8') # might need to change to bgr for color cameras 383 | self.pubProcessedImage.publish(img) 384 | 385 | extract_and_publish_contours(self) 386 | #reset_background_if_difference_is_very_large(self, color) 387 | 388 | -------------------------------------------------------------------------------- /nodes/liveviewer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | from optparse import OptionParser 4 | import roslib 5 | import rospy 6 | import rosparam 7 | import copy 8 | # import cv: open cv 1 not used 9 | import cv2 10 | import numpy as np 11 | import threading 12 | import dynamic_reconfigure.server 13 | from cv_bridge import CvBridge, CvBridgeError 14 | from sensor_msgs.msg import Image 15 | from std_msgs.msg import Float32, Header, String 16 | 17 | from multi_tracker.msg import Contourinfo, Contourlist 18 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 19 | from multi_tracker.srv import resetBackgroundService, addImageToBackgroundService 20 | 21 | import image_processing 22 | 23 | from distutils.version import LooseVersion, StrictVersion 24 | print('Using open cv: ' + cv2.__version__) 25 | if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"): 26 | OPENCV_VERSION = 3 27 | print('Open CV 3') 28 | else: 29 | OPENCV_VERSION = 2 30 | print('Open CV 2') 31 | 32 | if 0:#OPENCV_VERSION == 3: 33 | raise ImportError('cv bridge not compatible with opencv 3, killing live viewer') 34 | 35 | # for basler ace cameras, use camera_aravis 36 | # https://github.com/ssafarik/camera_aravis 37 | # rosrun camera_aravis camnode 38 | # default image: /camera/image_raw 39 | 40 | # for firefley cameras, camera1394 does not provide timestamps but otherwise works 41 | # use point grey drivers 42 | # http://wiki.ros.org/pointgrey_camera_driver 43 | # rosrun pointgrey_camera_driver camera_node 44 | # default image: /camera/image_mono 45 | 46 | # Trajectory class to aid in drawing colored tracked trajectories with opencv 47 | class Trajectory(object): 48 | def __init__(self, objid): 49 | self.objid = objid 50 | self.positions = [] 51 | self.color = None 52 | self.covariances = [] 53 | self.popout = 0 54 | 55 | def draw_trajectory(img, pts, color, thickness): 56 | for i in range(len(pts)-3): 57 | try: 58 | cv2.line(img, (int(pts[i][0]), int(pts[i][1])), (int(pts[i+1][0]), int(pts[i+1][1])), color, thickness) 59 | except: 60 | pass 61 | print('could not draw trajectory line, length pts: ', len(pts), 'i: ', i) 62 | 63 | # The main tracking class, a ROS node 64 | class LiveViewer: 65 | def __init__(self, nodenum): 66 | ''' 67 | Default image_topic for: 68 | Basler ace cameras with camera_aravis driver: camera/image_raw 69 | Pt Grey Firefly cameras with pt grey driver : camera/image_mono 70 | ''' 71 | # default parameters (parameter server overides them) 72 | self.params = { 'image_topic' : '/camera/image_mono', 73 | 'min_persistence_to_draw' : 10, 74 | 'max_frames_to_draw' : 50, 75 | 'camera_encoding' : 'mono8', # fireflies are bgr8, basler gige cams are mono8 76 | 'roi_l' : 0, 77 | 'roi_r' : -1, 78 | 'roi_b' : 0, 79 | 'roi_t' : -1, 80 | 'circular_mask_x' : 'none', 81 | 'circular_mask_y' : 'none', 82 | 'circular_mask_r' : 'none', 83 | } 84 | for parameter, value in self.params.items(): 85 | try: 86 | # allows image processed view to be overlaid with tracked objects 87 | p = '/multi_tracker/' + nodenum + '/liveviewer/' + parameter 88 | self.params[parameter] = rospy.get_param(p) 89 | except: 90 | try: 91 | p = '/multi_tracker/' + nodenum + '/tracker/' + parameter 92 | self.params[parameter] = rospy.get_param(p) 93 | except: 94 | print('Using default parameter: ', parameter, ' = ', value) 95 | 96 | # initialize the node 97 | rospy.init_node('liveviewer_' + nodenum) 98 | self.nodename = rospy.get_name().rstrip('/') 99 | self.nodenum = nodenum 100 | 101 | # initialize display 102 | self.window_name = 'liveviewer_' + nodenum 103 | self.subTrackedObjects = rospy.Subscriber('/multi_tracker/' + nodenum + '/tracked_objects', Trackedobjectlist, self.tracked_object_callback) 104 | self.subContours = rospy.Subscriber('/multi_tracker/' + nodenum + '/contours', Contourlist, self.contour_callback) 105 | 106 | self.cvbridge = CvBridge() 107 | self.tracked_trajectories = {} 108 | self.contours = None 109 | 110 | self.window_initiated = False 111 | 112 | # Subscriptions - subscribe to images, and tracked objects 113 | self.image_mask = None 114 | sizeImage = 128+1024*1024*3 # Size of header + data. 115 | self.subImage = rospy.Subscriber(self.params['image_topic'], Image, self.image_callback, queue_size=5, buff_size=2*sizeImage, tcp_nodelay=True) 116 | 117 | # for adding images to background 118 | add_image_to_background_service_name = '/multi_tracker/' + self.nodenum + '/' + 'tracker/' + "add_image_to_background" 119 | rospy.wait_for_service(add_image_to_background_service_name) 120 | try: 121 | self.add_image_to_background = rospy.ServiceProxy(add_image_to_background_service_name, addImageToBackgroundService) 122 | except: 123 | print( 'could not connect to add image to background service - is tracker running?') 124 | 125 | def reset_background(self, service_call): 126 | self.reset_background_flag = True 127 | return 1 128 | 129 | def tracked_object_callback(self, tracked_objects): 130 | for trajec in self.tracked_trajectories.values(): 131 | trajec.popout = 1 132 | 133 | for tracked_object in tracked_objects.tracked_objects: 134 | if tracked_object.persistence > self.params['min_persistence_to_draw']: 135 | if tracked_object.objid not in self.tracked_trajectories.keys(): # create new object 136 | self.tracked_trajectories.setdefault(tracked_object.objid, Trajectory(tracked_object.objid)) 137 | self.tracked_trajectories[tracked_object.objid].color = np.random.randint(0,255,3).tolist() 138 | # update tracked objects 139 | self.tracked_trajectories[tracked_object.objid].covariances.append(tracked_object.covariance) 140 | self.tracked_trajectories[tracked_object.objid].positions.append([tracked_object.position.x, tracked_object.position.y]) 141 | 142 | # if it is a young object, let it grow to length 100 143 | if len(self.tracked_trajectories[tracked_object.objid].positions) < self.params['max_frames_to_draw']: 144 | self.tracked_trajectories[tracked_object.objid].popout = 0 145 | 146 | # cull old objects 147 | for objid, trajec in self.tracked_trajectories.items(): 148 | if trajec.popout: 149 | trajec.positions.pop(0) 150 | trajec.covariances.pop(0) 151 | if len(trajec.positions) <= 1: 152 | del(self.tracked_trajectories[objid]) 153 | 154 | def contour_callback(self, contours): 155 | self.contours = contours 156 | 157 | def image_callback(self, rosimg): 158 | # Convert the image. 159 | try: 160 | img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough') # might need to change to bgr for color cameras 161 | except CvBridgeError as e: 162 | rospy.logwarn ('Exception converting background image from ROS to opencv: %s' % e) 163 | img = np.zeros((320,240)) 164 | 165 | self.imgScaled = img[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']] 166 | self.shapeImage = self.imgScaled.shape # (height,width) 167 | 168 | if self.params['circular_mask_x'] != 'none': 169 | if self.image_mask is None: 170 | self.image_mask = np.zeros_like(self.imgScaled) 171 | cv2.circle(self.image_mask,(self.params['circular_mask_x'], self.params['circular_mask_y']),int(self.params['circular_mask_r']),[1,1,1],-1) 172 | self.imgScaled = self.image_mask*self.imgScaled 173 | 174 | # Image for display 175 | if self.params['camera_encoding'] == 'mono8': 176 | try: 177 | self.imgOutput = cv2.cvtColor(self.imgScaled, cv2.COLOR_GRAY2RGB) 178 | except: 179 | self.imgOutput = self.imgScaled 180 | print("To get rid of this error warning, set rosparam /multi_tracker/1/liveviewer/camera_encoding to something other than mono8 (e.g. color)") 181 | elif self.params['camera_encoding'] == 'binary': 182 | self.imgOutput = self.imgScaled 183 | else: 184 | self.imgOutput = self.imgScaled 185 | 186 | 187 | # Draw ellipses from contours 188 | if self.contours is not None: 189 | for c, contour in enumerate(self.contours.contours): 190 | # b = contour.area / (np.pi*a) 191 | # b = ecc*a 192 | if contour.ecc != 0: # eccentricity of ellipse < 1 but > 0 193 | a = np.sqrt( contour.area / (np.pi*contour.ecc) ) 194 | b = contour.ecc*a 195 | else: # eccentricity of circle is 0 196 | a = 1 197 | b = 1 198 | center = (int(contour.x), int(contour.y)) 199 | angle = int(contour.angle) 200 | axes = (int(np.min([a,b])), int(np.max([a,b]))) 201 | cv2.ellipse(self.imgOutput, center, axes, angle, 0, 360, (0,255,0), 2 ) 202 | 203 | # Display the image | Draw the tracked trajectories 204 | for objid, trajec in self.tracked_trajectories.items(): 205 | if len(trajec.positions) > 5: 206 | draw_trajectory(self.imgOutput, trajec.positions, trajec.color, 2) 207 | cv2.circle(self.imgOutput,(int(trajec.positions[-1][0]),int(trajec.positions[-1][1])),int(trajec.covariances[-1]),trajec.color,2) 208 | cv2.imshow(self.window_name, self.imgOutput) 209 | 210 | if not self.window_initiated: # for some reason this approach works in opencv 3 instead of previous approach 211 | cv2.setMouseCallback(self.window_name, self.on_mouse_click) 212 | self.window_initiated = True 213 | 214 | ascii_key = cv2.waitKey(1) 215 | if ascii_key != -1: 216 | self.on_key_press(ascii_key) 217 | 218 | def on_mouse_click(self, event, x, y, flags, param): 219 | if event == cv2.EVENT_LBUTTONUP: 220 | print('clicked pixel: ', [x, y]) 221 | 222 | def on_key_press(self, ascii_key): 223 | key = chr(ascii_key) 224 | if key == 'a': 225 | resp = self.add_image_to_background() 226 | print('added image to background') 227 | 228 | def Main(self): 229 | while (not rospy.is_shutdown()): 230 | rospy.spin() 231 | cv2.destroyAllWindows() 232 | 233 | ##################################################################################################### 234 | 235 | if __name__ == '__main__': 236 | parser = OptionParser() 237 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 238 | help="node number, for example, if running multiple tracker instances on one computer") 239 | (options, args) = parser.parse_args() 240 | 241 | liveviewer = LiveViewer(options.nodenum) 242 | liveviewer.Main() 243 | -------------------------------------------------------------------------------- /nodes/record_user_input.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' 4 | This node records mouse clicks and keypresses to a csv file. Requires a image topic feed - though that requirement could be relaxed in the future. 5 | ''' 6 | 7 | 8 | from __future__ import division 9 | 10 | import numpy as np 11 | import cv 12 | import cv2 13 | 14 | from optparse import OptionParser 15 | 16 | import copy 17 | import rospy 18 | from cv_bridge import CvBridge, CvBridgeError 19 | from sensor_msgs.msg import Image 20 | 21 | import csv 22 | 23 | class ImageGUI(object): 24 | def __init__(self, filename, image_topic): 25 | rospy.init_node('record_user_input') 26 | 27 | # initialize display 28 | self.window_name = 'user_input' 29 | cv2.namedWindow(self.window_name,1) 30 | self.subImage = rospy.Subscriber(image_topic, Image, self.image_callback, queue_size=5, tcp_nodelay=True) 31 | self.cvbridge = CvBridge() 32 | 33 | # initialize file 34 | self.csvfile = open(filename, 'wb') 35 | self.datawrite = csv.writer(self.csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) 36 | 37 | # user input 38 | cv2.setMouseCallback(self.window_name, self.on_mouse_click) 39 | self.mouse_position = [0,0] 40 | 41 | def image_callback(self, rosimg): 42 | # Convert the image. 43 | try: 44 | img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough') # might need to change to bgr for color cameras 45 | except CvBridgeError, e: 46 | rospy.logwarn ('Exception converting background image from ROS to opencv: %s' % e) 47 | img = np.zeros((320,240)) 48 | cv2.imshow(self.window_name, img) 49 | ascii_key = cv2.waitKey(1) 50 | if ascii_key != -1: 51 | self.on_key_press(ascii_key) 52 | 53 | def run(self): 54 | while not rospy.is_shutdown(): 55 | rospy.spin() 56 | self.csvfile.close() 57 | cv2.destroyAllWindows() 58 | 59 | def on_mouse_click(self, event, x, y, flags, param): 60 | if event == cv2.EVENT_LBUTTONDOWN: 61 | data = ['EVENT_LBUTTONDOWN', rospy.get_time(), x, y, 'nokey'] 62 | self.datawrite.writerow(data) 63 | print data 64 | if event == cv2.EVENT_MOUSEMOVE: 65 | self.mouse_position = [x,y] 66 | 67 | def on_key_press(self, ascii_key): 68 | key = chr(ascii_key) 69 | x,y = self.mouse_position 70 | data = ['KEYPRESS', rospy.get_time(), x, y, key] 71 | self.datawrite.writerow(data) 72 | print data 73 | 74 | if __name__ == '__main__': 75 | 76 | parser = OptionParser() 77 | parser.add_option("--image", type="str", dest="image", default='', 78 | help="topic of image") 79 | 80 | parser.add_option("--filename", type="str", dest="filename", default='', 81 | help="name of file to which user input will be recorded") 82 | 83 | (options, args) = parser.parse_args() 84 | 85 | imageGUI = ImageGUI(options.filename, options.image) 86 | imageGUI.run() 87 | 88 | 89 | 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /nodes/republish_pref_obj_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | Republish a single object selected based on persistance. 4 | Use standard ROS message types (Float32MultiArray for now) 5 | ''' 6 | from __future__ import division 7 | from optparse import OptionParser 8 | import roslib 9 | import rospy 10 | import numpy as np 11 | 12 | from std_msgs.msg import Float32MultiArray 13 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 14 | 15 | 16 | # The main tracking class, a ROS node 17 | class PrefObjPicker: 18 | def __init__(self, nodenum, rate, simulate=False): 19 | ''' 20 | Default image_topic for: 21 | Basler ace cameras with camera_aravis driver: camera/image_raw 22 | Pt Grey Firefly cameras with pt grey driver : camera/image_mono 23 | ''' 24 | # default parameters (parameter server overides them) 25 | self.params = { 'min_persistence' : 300, # min number of frames 26 | 'min_travel' : 50, # min number of pixels object must travel, helps prevent noise triggers 27 | } 28 | for parameter, value in self.params.items(): 29 | try: 30 | p = '/multi_tracker/' + nodenum + '/prefobj/' + parameter 31 | self.params[parameter] = rospy.get_param(p) 32 | except: 33 | pass 34 | 35 | # initialize the node 36 | rospy.init_node('prefobj_' + nodenum) 37 | self.nodename = rospy.get_name().rstrip('/') 38 | self.nodenum = nodenum 39 | 40 | self.rate = rate 41 | self.msg = None 42 | 43 | self.prefObjId = None 44 | self.pref_obj_position_x = [] 45 | self.pref_obj_position_y = [] 46 | 47 | if not simulate: 48 | self.subTrackedObjects = rospy.Subscriber('/multi_tracker/' + nodenum + '/tracked_objects', Trackedobjectlist, self.tracked_object_callback) 49 | else: 50 | print 'Simulating!' 51 | self.create_simulated_msg() 52 | self.pubPrefObj = rospy.Publisher('/multi_tracker/' + nodenum + '/prefobj', Float32MultiArray, queue_size=3) 53 | 54 | def create_simulated_msg(self): 55 | self.msg = Float32MultiArray() 56 | self.msg.data = [0, 0, 0, 0, 0] 57 | 58 | def tracked_object_callback(self, tracked_objects): 59 | 60 | obj_ids = [] 61 | persistances = [] 62 | index = [] 63 | 64 | i = -1 65 | for tracked_object in tracked_objects.tracked_objects: 66 | i += 1 67 | #if tracked_object.persistence > self.params['min_persistence']: 68 | persistances.append(tracked_object.persistence) 69 | obj_ids.append(tracked_object.objid) 70 | index.append(i) 71 | 72 | if len(persistances) > 0: 73 | p = np.argmax(persistances) 74 | 75 | if obj_ids[p] == self.prefObjId: # same object as before 76 | self.pref_obj_position_x.append(tracked_object.position.x) 77 | self.pref_obj_position_y.append(tracked_object.position.y) 78 | else: # new object 79 | self.prefObjId = obj_ids[p] 80 | self.pref_obj_position_x = [] 81 | self.pref_obj_position_y = [] 82 | 83 | tracked_object = tracked_objects.tracked_objects[p] 84 | 85 | 86 | if len(self.pref_obj_position_x) > 1: 87 | xtravel = np.max(np.abs(np.array(self.pref_obj_position_x) - tracked_object.position.x)) 88 | ytravel = np.max(np.abs(np.array(self.pref_obj_position_y) - tracked_object.position.y)) 89 | travel = np.sqrt(xtravel**2 + ytravel**2) 90 | else: 91 | travel = 0 92 | 93 | if travel > self.params['min_travel'] and persistances[p] > self.params['min_persistence']: 94 | self.msg = Float32MultiArray() 95 | self.msg.data = [tracked_object.objid, 96 | tracked_object.position.x, 97 | tracked_object.position.y, 98 | tracked_object.velocity.x, 99 | tracked_object.velocity.y] 100 | self.pref_obj_position_x.append(tracked_object.position.x) 101 | self.pref_obj_position_y.append(tracked_object.position.y) 102 | 103 | if self.rate == 0: 104 | self.pubPrefObj.publish(self.msg) 105 | else: 106 | self.msg = None 107 | 108 | else: 109 | self.msg = None 110 | 111 | def Main(self): 112 | if self.rate == 0: 113 | while (not rospy.is_shutdown()): 114 | rospy.spin() 115 | else: 116 | rate = rospy.Rate(self.rate) # 10hz 117 | while not rospy.is_shutdown(): 118 | if self.msg is not None: 119 | self.pubPrefObj.publish(self.msg) 120 | rate.sleep() 121 | 122 | ##################################################################################################### 123 | 124 | if __name__ == '__main__': 125 | parser = OptionParser() 126 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 127 | help="node number, for example, if running multiple tracker instances on one computer") 128 | parser.add_option("--rate", type="float", dest="rate", default='0', 129 | help="rate at which to (re)publish preferred object location. default=0 means continuous.") 130 | parser.add_option("--simulate", action="store_true", dest="simulate", default=False, 131 | help="if simulate is true, will continuously send out a meaningless msg at the specified rate.") 132 | (options, args) = parser.parse_args() 133 | 134 | prefobjpicker = PrefObjPicker(options.nodenum, options.rate, simulate=options.simulate) 135 | prefobjpicker.Main() 136 | -------------------------------------------------------------------------------- /nodes/save_bag.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | import rospy 4 | from optparse import OptionParser 5 | import tf 6 | import sys 7 | import time, os, subprocess 8 | import threading 9 | import numpy as np 10 | 11 | from sensor_msgs.msg import Image 12 | # import cv: open cv 1 not used 13 | from cv_bridge import CvBridge, CvBridgeError 14 | 15 | import imp 16 | 17 | ############################################################################### 18 | # 19 | class SaveBag: 20 | def __init__(self, config, nodenum): 21 | basename = config.basename 22 | directory = config.directory 23 | self.topics = config.topics 24 | try: 25 | self.record_length_seconds = config.record_length_hours*3600 26 | except: 27 | self.record_length_seconds = 24*3600 28 | self.time_start = time.time() 29 | 30 | try: 31 | filename = config.filename 32 | except: 33 | try: 34 | experiment_basename = rospy.get_param('/multi_tracker/' + nodenum + '/experiment_basename', 'none') 35 | if experiment_basename == 'none': 36 | experiment_basename = time.strftime("%Y%m%d_%H%M%S_N" + nodenum, time.localtime()) 37 | 38 | filename = experiment_basename + '_' + basename + '.bag' 39 | except: 40 | raise ValueError('Need to define filename in config, or use the multi_tracker infrastructure') 41 | 42 | # Make sure path exists. 43 | try: 44 | os.makedirs(directory) 45 | except OSError: 46 | pass 47 | 48 | self.filenameBag = os.path.expanduser(os.path.join(directory, filename)) 49 | self.processRosbag = None 50 | 51 | rospy.on_shutdown(self.OnShutdown_callback) 52 | 53 | def OnShutdown_callback(self): 54 | self.StopRecordingBag() 55 | 56 | def StartRecordingBag(self): 57 | rospy.logwarn('Saving bag file: %s' % (self.filenameBag)) 58 | cmdline = ['rosbag', 'record','-O', self.filenameBag] 59 | cmdline.extend(self.topics) 60 | print(cmdline) 61 | self.processRosbag = subprocess.Popen(cmdline, preexec_fn=subprocess.os.setpgrp) 62 | 63 | def StopRecordingBag(self): 64 | subprocess.os.killpg(self.processRosbag.pid, subprocess.signal.SIGINT) 65 | rospy.logwarn('Closed bag file.') 66 | 67 | def Main(self): 68 | savebag.StartRecordingBag() 69 | rate = rospy.Rate(0.01) 70 | while not rospy.is_shutdown(): 71 | t = time.time() - self.time_start 72 | if t > self.record_length_seconds: 73 | self.StopRecordingBag() 74 | return 75 | 76 | 77 | if __name__ == '__main__': 78 | parser = OptionParser() 79 | parser.add_option("--config", type="str", dest="config", default='', 80 | help="filename of configuration file") 81 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 82 | help="node number, for example, if running multiple tracker instances on one computer") 83 | (options, args) = parser.parse_args() 84 | 85 | 86 | try: 87 | configuration = imp.load_source('configuration', options.config) 88 | print("Loaded configuration: ", options.config) 89 | except: # look in home directory for config file 90 | home_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + options.nodenum + '/home_directory') ) 91 | config_file = os.path.join(home_directory, options.config) 92 | configuration = imp.load_source('configuration', config_file) 93 | print("Loaded configuration: ", config_file) 94 | 95 | config = configuration.Config() 96 | 97 | rospy.init_node('SaveBag', log_level=rospy.INFO) 98 | rospy.sleep(1) 99 | savebag = SaveBag(config, nodenum=options.nodenum) 100 | savebag.Main() 101 | 102 | -------------------------------------------------------------------------------- /nodes/save_data_to_hdf5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | from optparse import OptionParser 4 | import roslib 5 | import rospy 6 | import os 7 | import time 8 | import threading 9 | 10 | import numpy as np 11 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 12 | 13 | import h5py 14 | 15 | import atexit 16 | 17 | class DataListener: 18 | def __init__(self, nodenum, info='data information', record_time_hrs=24): 19 | self.subTrackedObjects = rospy.Subscriber('multi_tracker/' + nodenum + '/tracked_objects', Trackedobjectlist, self.tracked_object_callback, queue_size=300) 20 | 21 | experiment_basename = rospy.get_param('/multi_tracker/' + nodenum + '/experiment_basename', 'none') 22 | if experiment_basename == 'none': 23 | experiment_basename = time.strftime("%Y%m%d_%H%M%S_N" + nodenum, time.localtime()) 24 | 25 | filename = experiment_basename + '_trackedobjects.hdf5' 26 | home_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + nodenum + '/data_directory') ) 27 | filename = os.path.join(home_directory, filename) 28 | print('Saving hdf5 data to: ', filename) 29 | self.time_start = time.time() 30 | self.record_time_hrs = record_time_hrs 31 | 32 | self.buffer = [] 33 | self.array_buffer = [] 34 | # set up thread locks 35 | self.lockParams = threading.Lock() 36 | self.lockBuffer = threading.Lock() 37 | 38 | self.chunk_size = 5000 39 | self.hdf5 = h5py.File(filename, 'w') 40 | #self.hdf5.swmr_mode = True # helps prevent file corruption if closed improperly 41 | self.hdf5.attrs.create("info", info) 42 | 43 | self.data_to_save = [ 'objid', 44 | 'header.stamp.secs', 45 | 'header.stamp.nsecs', 46 | 'header.frame_id', 47 | 'position.x', 48 | 'position.y', 49 | 'position.z', 50 | 'velocity.x', 51 | 'velocity.y', 52 | 'velocity.z', 53 | 'angle', 54 | 'size', 55 | 'covariance', 56 | 'measurement.x', 57 | 'measurement.y', 58 | ] 59 | self.data_format = { 'objid': int, 60 | 'header.stamp.secs': int, 61 | 'header.stamp.nsecs': int, 62 | 'header.frame_id': int, 63 | 'position.x': float, 64 | 'position.y': float, 65 | 'position.z': float, 66 | 'velocity.x': float, 67 | 'velocity.y': float, 68 | 'velocity.z': float, 69 | 'angle': float, 70 | 'size': float, 71 | 'covariance': float, 72 | 'measurement.x': float, 73 | 'measurement.y': float, 74 | } 75 | 76 | self.dtype = [(data,self.data_format[data]) for data in self.data_to_save] 77 | rospy.init_node('save_data_to_hdf5_' + nodenum) 78 | 79 | self.hdf5.create_dataset('data', (self.chunk_size, 1), maxshape=(None,1), dtype=self.dtype) 80 | self.hdf5['data'].attrs.create('current_frame', 0) 81 | self.hdf5['data'].attrs.create('line', 0) 82 | self.hdf5['data'].attrs.create('length', self.chunk_size) 83 | 84 | def add_chunk(self): 85 | length = self.hdf5['data'].attrs.get('length') 86 | new_length = length + self.chunk_size 87 | self.hdf5['data'].resize(new_length, axis=0) 88 | self.hdf5['data'].attrs.modify('length', new_length) 89 | 90 | def save_array_data(self): 91 | newline = self.hdf5['data'].attrs.get('line') + 1 92 | nrows_to_add = len(self.array_buffer) 93 | 94 | self.hdf5['data'].attrs.modify('line', newline+nrows_to_add) 95 | if newline+nrows_to_add >= self.hdf5['data'].attrs.get('length')-50: 96 | self.hdf5.flush() 97 | self.add_chunk() 98 | 99 | self.hdf5['data'][newline:newline+nrows_to_add] = self.array_buffer 100 | self.array_buffer = [] 101 | 102 | def tracked_object_callback(self, tracked_objects): 103 | with self.lockBuffer: 104 | for tracked_object in tracked_objects.tracked_objects: 105 | a = np.array([( tracked_object.objid, 106 | tracked_object.header.stamp.secs, 107 | tracked_object.header.stamp.nsecs, 108 | tracked_object.header.frame_id, 109 | tracked_object.position.x, tracked_object.position.y, tracked_object.position.z, 110 | tracked_object.velocity.x, tracked_object.velocity.y, tracked_object.velocity.z, 111 | tracked_object.angle, 112 | tracked_object.size, 113 | tracked_object.covariance, 114 | tracked_object.measurement.x, tracked_object.measurement.y, 115 | )], dtype=self.dtype) 116 | self.array_buffer.append(a) 117 | 118 | def process_buffer(self): 119 | self.save_array_data() 120 | 121 | def main(self): 122 | atexit.register(self.stop_saving_data) 123 | while (not rospy.is_shutdown()): 124 | t = time.time() - self.time_start 125 | if t > self.record_time_hrs*3600: 126 | self.stop_saving_data() 127 | return 128 | with self.lockBuffer: 129 | time_now = rospy.Time.now() 130 | if len(self.array_buffer) > 0: 131 | self.process_buffer() 132 | pt = (rospy.Time.now()-time_now).to_sec() 133 | if len(self.buffer) > 9: 134 | rospy.logwarn("Data saving processing time exceeds acquisition rate. Processing time: %f, Buffer: %d", pt, len(self.buffer)) 135 | 136 | 137 | def stop_saving_data(self): 138 | self.hdf5.close() 139 | print('shut down nicely') 140 | 141 | if __name__ == '__main__': 142 | parser = OptionParser() 143 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 144 | help="node number, for example, if running multiple tracker instances on one computer") 145 | parser.add_option("--record-time-hrs", type="int", dest="record_time_hrs", default=24, 146 | help="number of hours to record data for") 147 | (options, args) = parser.parse_args() 148 | 149 | datalistener = DataListener(options.nodenum, options.record_time_hrs) 150 | datalistener.main() 151 | -------------------------------------------------------------------------------- /nodes/set_basename_and_path.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from optparse import OptionParser 3 | import roslib 4 | import rospy 5 | import rosparam 6 | import time 7 | 8 | ##################################################################################################### 9 | 10 | if __name__ == '__main__': 11 | parser = OptionParser() 12 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 13 | help="node number, for example, if running multiple tracker instances on one computer") 14 | (options, args) = parser.parse_args() 15 | 16 | experiment_basename = time.strftime("%Y%m%d_%H%M%S_N" + options.nodenum, time.localtime()) 17 | rospy.set_param('/multi_tracker/' + options.nodenum + '/experiment_basename', experiment_basename) 18 | -------------------------------------------------------------------------------- /nodes/tracker_simplebuffer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import division 3 | from optparse import OptionParser 4 | import imp, os 5 | import roslib 6 | import rospy 7 | import rosparam 8 | import copy 9 | # import cv: open cv 1 not used 10 | import cv2 11 | import numpy as np 12 | import threading 13 | import dynamic_reconfigure.server 14 | from cv_bridge import CvBridge, CvBridgeError 15 | from sensor_msgs.msg import Image 16 | from std_msgs.msg import Float32, Header, String 17 | import time 18 | 19 | from multi_tracker.msg import Contourinfo, Contourlist 20 | from multi_tracker.msg import Trackedobject, Trackedobjectlist 21 | from multi_tracker.srv import resetBackgroundService, addImageToBackgroundService 22 | 23 | #import image_processing 24 | 25 | # for basler ace cameras, use camera_aravis 26 | # https://github.com/ssafarik/camera_aravis 27 | # rosrun camera_aravis camnode 28 | # default image: /camera/image_raw 29 | 30 | # for firefley cameras, camera1394 does not provide timestamps but otherwise works 31 | # use point grey drivers 32 | # http://wiki.ros.org/pointgrey_camera_driver 33 | # rosrun pointgrey_camera_driver camera_node 34 | # default image: /camera/image_mono 35 | 36 | # The main tracking class, a ROS node 37 | class Tracker: 38 | def __init__(self, nodenum): 39 | ''' 40 | Default image_topic for: 41 | Basler ace cameras with camera_aravis driver: camera/image_raw 42 | Pt Grey Firefly cameras with pt grey driver : camera/image_mono 43 | ''' 44 | self.nodenum = nodenum 45 | # default parameters (parameter server overides them) 46 | self.params = { 'image_topic' : '/camera/image_mono', 47 | 'threshold' : 20, 48 | 'backgroundupdate' : 0.001, 49 | 'medianbgupdateinterval' : 30, 50 | 'camera_encoding' : 'mono8', # fireflies are bgr8, basler gige cams are mono8 51 | 'erode' : 1, 52 | 'dilate' : 2, 53 | 'morph_open_kernel_size' : 3, 54 | 'max_change_in_frame' : 0.2, 55 | 'min_size' : 5, 56 | 'max_size' : 200, 57 | 'max_expected_area' : 500, 58 | 'liveview' : False, 59 | 'roi_l' : 0, 60 | 'roi_r' : -1, 61 | 'roi_b' : 0, 62 | 'roi_t' : -1, 63 | 'circular_mask_x' : 'none', 64 | 'circular_mask_y' : 'none', 65 | 'circular_mask_r' : 'none', 66 | 'use_moments' : True, # use moments for x,y,area instead of fitted ellipse 67 | } 68 | for parameter, value in self.params.items(): 69 | try: 70 | p = '/multi_tracker/' + nodenum + '/tracker/' + parameter 71 | self.params[parameter] = rospy.get_param(p) 72 | except: 73 | print('Using default parameter: ', parameter, ' = ', value) 74 | 75 | self.experiment_basename = rospy.get_param('/multi_tracker/' + nodenum + '/experiment_basename', 'none') 76 | if self.experiment_basename == 'none': 77 | self.experiment_basename = time.strftime("%Y%m%d_%H%M%S_N" + nodenum, time.localtime()) 78 | 79 | # initialize the node 80 | rospy.init_node('multi_tracker_' + nodenum) 81 | self.nodename = rospy.get_name().rstrip('/') 82 | self.time_start = time.time() 83 | 84 | # background reset service 85 | self.reset_background_flag = False 86 | self.add_image_to_background_flag = False 87 | self.reset_background_service = rospy.Service('/multi_tracker/' + nodenum + '/' + 'tracker/' + "reset_background", resetBackgroundService, self.reset_background) 88 | self.add_image_to_background_service = rospy.Service('/multi_tracker/' + nodenum + '/' + 'tracker/' + "add_image_to_background", addImageToBackgroundService, self.add_image_to_background) 89 | 90 | # init cvbridge 91 | self.cvbridge = CvBridge() 92 | self.imgScaled = None 93 | self.backgroundImage = None 94 | 95 | # buffer locking 96 | self.lockBuffer = threading.Lock() 97 | self.image_buffer = [] 98 | self.framestamp = None 99 | 100 | # Publishers - publish contours 101 | self.pubContours = rospy.Publisher('/multi_tracker/' + nodenum + '/contours', Contourlist, queue_size=300) 102 | 103 | # Subscriptions - subscribe to images, and tracked objects 104 | self.image_mask = None 105 | sizeImage = 128+1024*1024*3 # Size of header + data. 106 | self.subImage = rospy.Subscriber(self.params['image_topic'], Image, self.image_callback, queue_size=60, buff_size=2*sizeImage, tcp_nodelay=True) 107 | self.pubProcessedImage = rospy.Publisher('/multi_tracker/' + nodenum + '/processed_image', Image, queue_size=5) 108 | 109 | def image_callback(self, rosimg): 110 | with self.lockBuffer: 111 | self.image_buffer.append(rosimg) 112 | 113 | def reset_background(self, service_call): 114 | self.reset_background_flag = True 115 | return 1 116 | 117 | def add_image_to_background(self, service_call): 118 | self.add_image_to_background_flag = True 119 | return 1 120 | 121 | def process_image_buffer(self, rosimg): 122 | if self.framestamp is not None: 123 | self.dtCamera = (rosimg.header.stamp - self.framestamp).to_sec() 124 | else: 125 | self.dtCamera = 0.03 126 | self.framenumber = rosimg.header.seq 127 | self.framestamp = rosimg.header.stamp 128 | 129 | # Convert the image. 130 | try: 131 | img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough') # might need to change to bgr for color cameras 132 | except CvBridgeError as e: 133 | rospy.logwarn ('Exception converting background image from ROS to opencv: %s' % e) 134 | img = np.zeros((320,240)) 135 | 136 | if img is None: 137 | return 138 | 139 | self.imgScaled = img[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']] 140 | self.shapeImage = self.imgScaled.shape # (height,width) 141 | 142 | if self.backgroundImage is not None: 143 | if self.backgroundImage.shape != self.imgScaled.shape: 144 | self.backgroundImage = None 145 | self.reset_background_flag = True 146 | 147 | ########### Call to image processing function ############################################################## 148 | self.process_image() # must be defined seperately - see "main" code at the bottom of this script 149 | ############################################################################################################ 150 | 151 | def Main(self): 152 | while (not rospy.is_shutdown()): 153 | t = time.time() - self.time_start 154 | if t > 24*3600: 155 | return 156 | with self.lockBuffer: 157 | time_now = rospy.Time.now() 158 | if len(self.image_buffer) > 0: 159 | self.process_image_buffer(self.image_buffer.pop(0)) 160 | pt = (rospy.Time.now()-time_now).to_sec() 161 | if len(self.image_buffer) > 9: 162 | rospy.logwarn("Tracking processing time exceeds acquisition rate. Processing time: %f, Buffer: %d", pt, len(self.image_buffer)) 163 | cv2.destroyAllWindows() 164 | 165 | 166 | 167 | ##################################################################################################### 168 | 169 | if __name__ == '__main__': 170 | parser = OptionParser() 171 | parser.add_option("--nodenum", type="str", dest="nodenum", default='1', 172 | help="node number, for example, if running multiple tracker instances on one computer") 173 | (options, args) = parser.parse_args() 174 | 175 | catkin_node_directory = os.path.dirname(os.path.realpath(__file__)) 176 | 177 | tracker_node_basename = '/multi_tracker/' + options.nodenum + '/tracker' 178 | image_processing_function = rospy.get_param(tracker_node_basename + '/image_processor') 179 | 180 | image_processing_module = rospy.get_param(tracker_node_basename + '/image_processing_module') 181 | if image_processing_module == 'default': 182 | image_processing_module = os.path.join(catkin_node_directory, 'image_processing.py') 183 | image_processing = imp.load_source('image_processing', image_processing_module) 184 | 185 | image_processor = image_processing.__getattribute__(image_processing_function) 186 | Tracker.process_image = image_processor 187 | tracker = Tracker(options.nodenum) 188 | tracker.Main() 189 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | multi_tracker 4 | 0.0.1 5 | The multi_tracker package 6 | 7 | 8 | 9 | 10 | Floris van Breugel 11 | 12 | 13 | 14 | 15 | 16 | MIT 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | Floris van Breugel 29 | Steve Safarik 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | catkin 44 | geometry_msgs 45 | roscpp 46 | rospy 47 | std_msgs 48 | geometry_msgs 49 | roscpp 50 | rospy 51 | std_msgs 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /pointgreyusb3_notes.md: -------------------------------------------------------------------------------- 1 | To use point grey / FLIR USB3 cameras you can use: 2 | 3 | https://github.com/ros-drivers/pointgrey_camera_driver 4 | 5 | I can confirm that installing that from source on ubuntu 18.04.2 running melodic does work, after also installing FlyCapture 2.13.3.31 SDK - Linux Ubuntu 18.04 (64-bit) — 10/25/2018. 6 | 7 | Tested with a blackfly or flea USB3 (I can't recall how to tell which one). 8 | 9 | https://www.flir.com/products/flycapture-sdk/ 10 | -- OR -- 11 | https://flir.app.boxcn.net/v/Flycapture2SDK/folder/72274730742 12 | 13 | It may be necessary to install some other packages first (see: https://www.ptgrey.com/tan/10548): 14 | `user$: sudo apt-get install libraw1394-11 libgtkmm-2.4-1v5 libglademm-2.4-1v5 libgtkglextmm-x11-1.2-dev libgtkglextmm-x11-1.2 libusb-1.0-0` 15 | 16 | Then run 17 | `user$: sudo sh install_flycapture.sh` 18 | where install_flycapture.sh is in the FlyCapture 2.13.3.31 SDK download. 19 | 20 | Also note that for USB3 to work properly, you may need to follow these instructions (configuring USB-FS) to permanently modify the USB-FS memory limit: 21 | 22 | In a text editor, open the /etc/default/grub file. 23 | Find 24 | `GRUB_CMDLINE_LINUX_DEFAULT="quiet splash"` 25 | Replace with 26 | `GRUB_CMDLINE_LINUX_DEFAULT="quiet splash usbcore.usbfs_memory_mb=1000"` 27 | Update grub with these settings: 28 | `$ sudo update-grub` 29 | Reboot and test a USB camera. 30 | -------------------------------------------------------------------------------- /rules/99-point-grey-firefly.rules: -------------------------------------------------------------------------------- 1 | SUBSYSTEM=="usb", ATTRS{idVendor}=="1e10", ATTRS{idProduct}=="3300", GROUP="plugdev", SYMLINK+="flea", MODE:="0666" 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | setup( 4 | name='MultiTrackerAnalysis', 5 | version='0.0.1', 6 | author='Floris van Breugel', 7 | author_email='floris@caltech.edu', 8 | packages = ['multi_tracker_analysis'], 9 | license='BSD', 10 | description='Analysis scripts for data collected with multi tracker', 11 | long_description=open('README.md').read(), 12 | ) 13 | -------------------------------------------------------------------------------- /solidworks/fly_arena_parts/light_panel_base_12x12.SLDDRW: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/solidworks/fly_arena_parts/light_panel_base_12x12.SLDDRW -------------------------------------------------------------------------------- /solidworks/fly_arena_parts/light_panel_base_12x12.SLDPRT: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/solidworks/fly_arena_parts/light_panel_base_12x12.SLDPRT -------------------------------------------------------------------------------- /solidworks/fly_arena_parts/light_panel_base_12x12_base.SLDDRW: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/solidworks/fly_arena_parts/light_panel_base_12x12_base.SLDDRW -------------------------------------------------------------------------------- /solidworks/fly_arena_parts/light_panel_base_12x12_base.SLDPRT: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/solidworks/fly_arena_parts/light_panel_base_12x12_base.SLDPRT -------------------------------------------------------------------------------- /solidworks/fly_arena_parts/light_panel_clamp.SLDDRW: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/solidworks/fly_arena_parts/light_panel_clamp.SLDDRW -------------------------------------------------------------------------------- /solidworks/fly_arena_parts/light_panel_clamp.SLDPRT: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/florisvb/multi_tracker/f2bef123ab5f8d9540e1c9af35728a86ea3254f4/solidworks/fly_arena_parts/light_panel_clamp.SLDPRT -------------------------------------------------------------------------------- /srv/addImageToBackgroundService.srv: -------------------------------------------------------------------------------- 1 | --- 2 | bool result 3 | -------------------------------------------------------------------------------- /srv/resetBackgroundService.srv: -------------------------------------------------------------------------------- 1 | --- 2 | bool result 3 | --------------------------------------------------------------------------------