├── Event_sensor ├── ICNS_extern │ ├── dsi.egg-info │ │ ├── dependency_links.txt │ │ ├── top_level.txt │ │ ├── PKG-INFO │ │ └── SOURCES.txt │ ├── dsi.cp39-win_amd64.pyd │ ├── dsi.cp311-win_amd64.pyd │ ├── dist │ │ └── dsi-1.0-py3.10-win-amd64.egg │ ├── dsi.cpython-38-x86_64-linux-gnu.so │ ├── compile_test.sh │ ├── setup_cpp.py │ └── test_setup_cpp.py ├── src │ ├── asynchronous-spatio-temporal-spike-metric-master │ │ ├── spike_metric │ │ │ ├── __init__.py │ │ │ ├── fft_3d_convolution.py │ │ │ ├── spike_train_metric.py │ │ │ ├── cubes_3d_kernel.py │ │ │ └── spike_cube_metric.py │ │ ├── datasets │ │ │ ├── mnist_0_scale04_0001.aedat │ │ │ ├── mnist_0_scale04_0001_qt1.aedat │ │ │ ├── mnist_0_scale08_0001_qt1.aedat │ │ │ ├── mnist_0_scale16_0001_qt1.aedat │ │ │ ├── increasing_noise │ │ │ │ ├── fft_l1_distances.pkl │ │ │ │ ├── kernel_cube_distances.pkl │ │ │ │ └── kernel_train_distances.pkl │ │ │ ├── cyclic_displacement │ │ │ │ ├── fft_l1_distances.pkl │ │ │ │ ├── kernel_cube_distances.pkl │ │ │ │ └── kernel_train_distances.pkl │ │ │ ├── search_moving_target │ │ │ │ ├── noise_ratio_steps.pkl │ │ │ │ ├── kernel_cube_errors.pkl │ │ │ │ ├── kernel_train_errors.pkl │ │ │ │ └── kernel_train_pif_errors.pkl │ │ │ ├── simulating_dataset │ │ │ │ ├── events_100_0_1000.pkl │ │ │ │ ├── events_100_2_1000.pkl │ │ │ │ ├── events_2d_true_target.pkl │ │ │ │ ├── events_3d_true_target.pkl │ │ │ │ ├── events_2d_trigonometric.pkl │ │ │ │ ├── events_3d_trigonometric.pkl │ │ │ │ ├── events_3d_trigonometric.png │ │ │ │ └── generating_2d_trigornometric.png │ │ │ ├── changing_polarity │ │ │ │ ├── kernel_cube_distances.pkl │ │ │ │ ├── kernel_train_distances.pkl │ │ │ │ ├── polarity_changing_steps.pkl │ │ │ │ ├── kernel_train_pid_distances.pkl │ │ │ │ └── kernel_train_pif_distances.pkl │ │ │ ├── random_removing_spikes │ │ │ │ ├── fft_l1_distances.pkl │ │ │ │ ├── kernel_cube_distances.pkl │ │ │ │ └── kernel_train_distances.pkl │ │ │ └── temporal_spatial_changing │ │ │ │ ├── fft_l1_distances.pkl │ │ │ │ ├── kernel_cube_distances.pkl │ │ │ │ └── kernel_train_distances.pkl │ │ ├── README.md │ │ ├── event_process │ │ │ ├── generating_spike_train.py │ │ │ ├── spike_train_processing.py │ │ │ ├── events_to_frames.py │ │ │ ├── read_dvs.py │ │ │ ├── generating_spike_cube.py │ │ │ └── event_processing.py │ │ ├── testing_effectiveness │ │ │ ├── show_structure_metrics.py │ │ │ ├── show_polarity_metrics.py │ │ │ ├── searching_error_curves.py │ │ │ └── searching_trigonometric_target.py │ │ ├── main_script.py │ │ ├── spike_train_distances_curves.py │ │ └── measure_spike_trains.py │ ├── dsi.cp39-win_amd64.pyd │ ├── dsi.cp311-win_amd64.pyd │ ├── sgrb2lum.cp39-win_amd64.pyd │ ├── noise_calibration.py │ ├── metric_figure.py │ ├── example_images_to_events.py │ ├── example_video_to_events.py │ ├── event_display.py │ ├── arbiter.py │ ├── ExrRead.py │ └── event_buffer_old.py ├── dsi.cp39-win_amd64.pyd ├── data │ └── noise_neg_161lux.npy ├── utils │ ├── sgrb2lum.cp39-win_amd64.pyd │ ├── CMakeLists.txt │ ├── test.py │ ├── setup_display.py │ └── bind_display.cpp └── event_tools.py ├── assets └── deblur.jpg ├── images ├── real_00005.png ├── real_00025.png ├── real_00045.png ├── real_00065.png ├── real_00085.png ├── sim_00005.png ├── sim_00025.png ├── sim_00045.png ├── sim_00065.png └── sim_00085.png ├── .gitignore ├── ViewDepth.py ├── environment.yml ├── .gitmodules ├── lpipsPyTorch ├── __init__.py └── modules │ ├── utils.py │ ├── lpips.py │ └── networks.py ├── dataset_utils ├── generate_dataset.py ├── video2img.py ├── imgs2video.py ├── change_bg.py ├── blurry_gen.py ├── e2_timestap.py └── tummie.py ├── utils ├── system_utils.py ├── image_utils.py ├── graphics_utils.py ├── camera_utils.py ├── general_utils.py └── sh_utils.py ├── generate_dataset.py ├── scene ├── cameras.py └── __init__.py ├── gaussian_renderer └── network_gui.py ├── full_eval.py ├── arguments └── __init__.py ├── metrics.py ├── convert.py └── Readme.md /Event_sensor/ICNS_extern/dsi.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dsi.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | dsi 2 | -------------------------------------------------------------------------------- /assets/deblur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/assets/deblur.jpg -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/spike_metric/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/real_00005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/real_00005.png -------------------------------------------------------------------------------- /images/real_00025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/real_00025.png -------------------------------------------------------------------------------- /images/real_00045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/real_00045.png -------------------------------------------------------------------------------- /images/real_00065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/real_00065.png -------------------------------------------------------------------------------- /images/real_00085.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/real_00085.png -------------------------------------------------------------------------------- /images/sim_00005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/sim_00005.png -------------------------------------------------------------------------------- /images/sim_00025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/sim_00025.png -------------------------------------------------------------------------------- /images/sim_00045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/sim_00045.png -------------------------------------------------------------------------------- /images/sim_00065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/sim_00065.png -------------------------------------------------------------------------------- /images/sim_00085.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/images/sim_00085.png -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dsi.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: dsi 3 | Version: 1.0 4 | Summary: DVS simu 5 | -------------------------------------------------------------------------------- /Event_sensor/dsi.cp39-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/dsi.cp39-win_amd64.pyd -------------------------------------------------------------------------------- /Event_sensor/data/noise_neg_161lux.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/data/noise_neg_161lux.npy -------------------------------------------------------------------------------- /Event_sensor/src/dsi.cp39-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/dsi.cp39-win_amd64.pyd -------------------------------------------------------------------------------- /Event_sensor/src/dsi.cp311-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/dsi.cp311-win_amd64.pyd -------------------------------------------------------------------------------- /Event_sensor/src/sgrb2lum.cp39-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/sgrb2lum.cp39-win_amd64.pyd -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dsi.cp39-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/ICNS_extern/dsi.cp39-win_amd64.pyd -------------------------------------------------------------------------------- /Event_sensor/utils/sgrb2lum.cp39-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/utils/sgrb2lum.cp39-win_amd64.pyd -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .vscode 3 | output 4 | build 5 | diff_rasterization/diff_rast.egg-info 6 | diff_rasterization/dist 7 | tensorboard_3d 8 | screenshots -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dsi.cp311-win_amd64.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/ICNS_extern/dsi.cp311-win_amd64.pyd -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dist/dsi-1.0-py3.10-win-amd64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/ICNS_extern/dist/dsi-1.0-py3.10-win-amd64.egg -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dsi.cpython-38-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/ICNS_extern/dsi.cpython-38-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/dsi.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | setup_cpp.py 2 | simu_cpp.cpp 3 | dsi.egg-info/PKG-INFO 4 | dsi.egg-info/SOURCES.txt 5 | dsi.egg-info/dependency_links.txt 6 | dsi.egg-info/top_level.txt -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/compile_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | rm dsi.cpython-37m-x86_64-linux-gnu.so 3 | python setup_cpp.py build_ext --inplace 4 | python setup_cpp.py install 5 | python test_setup_cpp.py 6 | 7 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale04_0001.aedat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale04_0001.aedat -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale04_0001_qt1.aedat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale04_0001_qt1.aedat -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale08_0001_qt1.aedat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale08_0001_qt1.aedat -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale16_0001_qt1.aedat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/mnist_0_scale16_0001_qt1.aedat -------------------------------------------------------------------------------- /Event_sensor/utils/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(OpenCV REQUIRED) 2 | include_directories( ${OpenCV_INCLUDE_DIRS}) 3 | add_executable(Display /home/lanpokn/Documents/2022/pbrt/pbrt-v4/pbrt-v4/IEBCS-main/cpp/utils/display.cpp) 4 | target_link_libraries(Display ${OpenCV_LIBS}) -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/increasing_noise/fft_l1_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/increasing_noise/fft_l1_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/cyclic_displacement/fft_l1_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/cyclic_displacement/fft_l1_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/increasing_noise/kernel_cube_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/increasing_noise/kernel_cube_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/noise_ratio_steps.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/noise_ratio_steps.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_100_0_1000.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_100_0_1000.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_100_2_1000.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_100_2_1000.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_cube_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_cube_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_train_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_train_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/increasing_noise/kernel_train_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/increasing_noise/kernel_train_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/random_removing_spikes/fft_l1_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/random_removing_spikes/fft_l1_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/kernel_cube_errors.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/kernel_cube_errors.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/kernel_train_errors.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/kernel_train_errors.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_2d_true_target.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_2d_true_target.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_3d_true_target.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_3d_true_target.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/polarity_changing_steps.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/polarity_changing_steps.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/cyclic_displacement/kernel_cube_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/cyclic_displacement/kernel_cube_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/cyclic_displacement/kernel_train_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/cyclic_displacement/kernel_train_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_2d_trigonometric.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_2d_trigonometric.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_3d_trigonometric.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_3d_trigonometric.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_3d_trigonometric.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/events_3d_trigonometric.png -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/temporal_spatial_changing/fft_l1_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/temporal_spatial_changing/fft_l1_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_train_pid_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_train_pid_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_train_pif_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/changing_polarity/kernel_train_pif_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/random_removing_spikes/kernel_cube_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/random_removing_spikes/kernel_cube_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/random_removing_spikes/kernel_train_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/random_removing_spikes/kernel_train_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/kernel_train_pif_errors.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/search_moving_target/kernel_train_pif_errors.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/generating_2d_trigornometric.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/simulating_dataset/generating_2d_trigornometric.png -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/temporal_spatial_changing/kernel_cube_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/temporal_spatial_changing/kernel_cube_distances.pkl -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/temporal_spatial_changing/kernel_train_distances.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lanpokn/Event-3DGS/HEAD/Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/datasets/temporal_spatial_changing/kernel_train_distances.pkl -------------------------------------------------------------------------------- /ViewDepth.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | image_path = "D:/2024/3DGS/gaussian-splatting/output/myset/train/ours_7000/depth/00000.png" 3 | 4 | # 读取图像 5 | img = cv2.imread(image_path) 6 | img = img/10 7 | # 检查是否成功读取图像 8 | if img is None: 9 | print(f"无法读取图像: {image_path}") 10 | 11 | # 显示图像 12 | cv2.imshow("Image Viewer", img) 13 | 14 | # 等待用户按下任意键,然后关闭窗口 15 | cv2.waitKey(0) 16 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: gaussian_splatting 2 | channels: 3 | - pytorch 4 | - conda-forge 5 | - defaults 6 | dependencies: 7 | - cudatoolkit=11.6 8 | - plyfile=0.8.1 9 | - python=3.7.13 10 | - pip=22.3.1 11 | - pytorch=1.12.1 12 | - torchaudio=0.12.1 13 | - torchvision=0.13.1 14 | - tqdm 15 | - pip: 16 | - submodules/diff-gaussian-rasterization 17 | - submodules/simple-knn -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "submodules/simple-knn"] 2 | path = submodules/simple-knn 3 | url = https://gitlab.inria.fr/bkerbl/simple-knn.git 4 | [submodule "submodules/diff-gaussian-rasterization"] 5 | path = submodules/diff-gaussian-rasterization 6 | url = https://github.com/graphdeco-inria/diff-gaussian-rasterization 7 | [submodule "SIBR_viewers"] 8 | path = SIBR_viewers 9 | url = https://gitlab.inria.fr/sibr/sibr_core.git 10 | -------------------------------------------------------------------------------- /Event_sensor/utils/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | # Specify the path to the OpenCV library 4 | opencv_path = "C:/Users//Desktop/code/opencv/build/x64/vc15/bin" 5 | import os 6 | # Add the library path 7 | 8 | import sgrb2lum 9 | 10 | # Load an SRGB image 11 | srgb = cv2.imread("image.png", cv2.IMREAD_COLOR) 12 | srgb = srgb.astype(np.float32) 13 | 14 | # Convert SRGB to luminance 15 | luminance = sgrb2lum.SGRB2Luminance(srgb) 16 | 17 | print(luminance) -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/setup_cpp.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup, Extension 2 | import numpy.distutils.misc_util 3 | 4 | c_ext = Extension("dsi", 5 | sources=["simu_cpp.cpp"], 6 | include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs(), 7 | extra_compile_args=['-std=c++17']) 8 | 9 | c_ext.language = 'c++' 10 | setup( 11 | name='dsi', 12 | version='1.0', 13 | description='DVS simu', 14 | ext_modules=[c_ext], 15 | ) 16 | -------------------------------------------------------------------------------- /lpipsPyTorch/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .modules.lpips import LPIPS 4 | 5 | 6 | def lpips(x: torch.Tensor, 7 | y: torch.Tensor, 8 | net_type: str = 'alex', 9 | version: str = '0.1'): 10 | r"""Function that measures 11 | Learned Perceptual Image Patch Similarity (LPIPS). 12 | 13 | Arguments: 14 | x, y (torch.Tensor): the input tensors to compare. 15 | net_type (str): the network type to compare the features: 16 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. 17 | version (str): the version of LPIPS. Default: 0.1. 18 | """ 19 | device = x.device 20 | criterion = LPIPS(net_type, version).to(device) 21 | return criterion(x, y) 22 | -------------------------------------------------------------------------------- /dataset_utils/generate_dataset.py: -------------------------------------------------------------------------------- 1 | from Event_sensor.event_tools import * 2 | 3 | # generate_images_accumu_eds("D:/2024/3DGS/Nerf_Event/data/eds/09_ziggy_flying_pieces/",13513*10,140,1) 4 | # generate_images_accumu_edslike("D:/2024/3DGS/Nerf_Event/data/eds/09_ziggy_flying_pieces/",13513*10,140,1) 5 | # generate_images_eds("D:/2024/3DGS/Nerf_Event/data/eds/09_ziggy_flying_pieces/",13300*10,140) 6 | # generate_images_accumu_T("D:/2024/3DGS/e2calib/trans_1mps_H_R2L",21153,51,1) 7 | # generate_images_accumu_Tumvie("D:/2024/3DGS/Nerf_Event/data/tumvie/mocap-desk2-events_left",52631,424,1) 8 | generate_images("D:/2024/3DGS/dataset/nerf_synthetic/mic_colmap_easy/Esim_display",1000,104) 9 | # generate_images_accumu("D:/2024/3DGS/dataset/nerf_synthetic/mic_colmap_easy/Esim",1000,104) 10 | # generate_images_accumu_volt("D:/2024/3DGS/dataset/nerf_synthetic/mic_colmap_easy/",1000,104) -------------------------------------------------------------------------------- /utils/system_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from errno import EEXIST 13 | from os import makedirs, path 14 | import os 15 | 16 | def mkdir_p(folder_path): 17 | # Creates a directory. equivalent to using mkdir -p on the command line 18 | try: 19 | makedirs(folder_path) 20 | except OSError as exc: # Python >2.5 21 | if exc.errno == EEXIST and path.isdir(folder_path): 22 | pass 23 | else: 24 | raise 25 | 26 | def searchForMaxIteration(folder): 27 | saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)] 28 | return max(saved_iters) 29 | -------------------------------------------------------------------------------- /Event_sensor/utils/setup_display.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, Extension 2 | import numpy 3 | import cv2 4 | 5 | # These paths should be changed to your own paths 6 | module = Extension("sgrb2lum", 7 | sources=["D:/2023/computional imaging/my_pbrt/IEBCS-main/cpp/utils/display.cpp"], 8 | include_dirs=[numpy.get_include(), "C:/Users//Desktop/code/opencv/build/include"], 9 | library_dirs=["C:/Users//Desktop/code/opencv/build/x64/vc16/bin", "C:/Users//Desktop/code/opencv/build/x64/vc16/lib"], 10 | libraries=["opencv_world480"], 11 | # extra_compile_args=["/std:c++11"], 12 | # extra_link_args=["/NODEFAULTLIB:MSVCRT", "/NODEFAULTLIB:LIBCMT"], 13 | language="c++") 14 | 15 | setup( 16 | name="sgrb2lum", 17 | version="1.0", 18 | description="Converts SRGB to luminance", 19 | ext_modules=[module] 20 | ) -------------------------------------------------------------------------------- /generate_dataset.py: -------------------------------------------------------------------------------- 1 | # It has been deprecated due to the poor stability of the pure integration method. 2 | # from Event_sensor.event_tools import * 3 | 4 | # generate_images_accumu_eds("D:/2024/3DGS/Nerf_Event/data/eds/09_ziggy_flying_pieces/",13513*10,140,1) 5 | # generate_images_accumu_edslike("D:/2024/3DGS/Nerf_Event/data/eds/09_ziggy_flying_pieces/",13513*10,140,1) 6 | # generate_images_eds("D:/2024/3DGS/Nerf_Event/data/eds/09_ziggy_flying_pieces/",13300*10,140) 7 | # generate_images_accumu_T("D:/2024/3DGS/e2calib/trans_1mps_H_R2L",21153,51,1) 8 | # generate_images_accumu_Tumvie("D:/2024/3DGS/Nerf_Event/data/tumvie/mocap-desk2-events_left",52631,424,1) 9 | # generate_images("D:/2024/3DGS/dataset/nerf_synthetic/lego/raw2",200,67,800,800) 10 | # generate_images_accumu("D:/2024/3DGS/dataset/nerf_synthetic/mic_colmap_easy/Esim",1000,104) 11 | # generate_images_accumu_volt("D:/2024/3DGS/dataset/nerf_synthetic/mic_colmap_easy/",1000,104) 12 | -------------------------------------------------------------------------------- /Event_sensor/src/noise_calibration.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Load the .npy file 4 | # this noise dose not affect blur, almost useless 5 | # TODO, I don't understand it 6 | data1 = np.load('D:/2023/computional imaging/my_pbrt/IEBCS-main/data/noise_pos_0.1lux.npy') 7 | 8 | # Perform any necessary edits on the data array 9 | # ... 10 | # a = data[0] 11 | # b = data[0][0] 12 | # c = data[0][0][0] 13 | # print(a) 14 | # print(b) 15 | # print(c) 16 | data2 = np.load('D:/2023/computional imaging/my_pbrt/IEBCS-main/data/noise_pos_161lux.npy') 17 | # Check if both arrays have the same shape 18 | if data1.shape == data2.shape: 19 | # Calculate the average 20 | average_data = np.average([data1, data2], axis=0) 21 | # Save the average data to a new file 22 | np.save('average_161_0.1_data.npy', average_data) 23 | print("Average data saved successfully.") 24 | else: 25 | print("Error: The shapes of data1 and data2 are not the same.") -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/README.md: -------------------------------------------------------------------------------- 1 | # Asynchrnous Spatio-Temporal Spike Metric for Event Camera 2 | 3 | This work is implemented by [Jianing Li](https://jianing-li.github.io), Yihua Fu, Siwei Dong, Zhaofei Yu, [Tiejun Huang](https://scholar.google.com/citations?user=knvEK4AAAAAJ&hl=zh-CN) and [Yonghong Tian](https://scholar.google.com/citations?user=fn6hJx0AAAAJ&hl=zh-CN) 4 | 5 | 6 | ### Introduction 7 | In this repository, we propose an asynchronous spatio-temporal spike metric considering both spatio-temporal structural properties and polarity attribute for event cameras. As a result, the conditional probability function is firstly introduced to describe the distribution and polarity prior in MSTPPs. Besides, a 3D Gaussian kernel is defined to capture the spatio-temporal structure, and it transforms discrete spikes into the condition intensity function in a reproducing kernel Hilbert space (RKHS). Moreover, the distance between asynchronous spikes can be quantified by inner product in RKHS. 8 | 9 | -------------------------------------------------------------------------------- /lpipsPyTorch/modules/utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | 5 | 6 | def normalize_activation(x, eps=1e-10): 7 | norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)) 8 | return x / (norm_factor + eps) 9 | 10 | 11 | def get_state_dict(net_type: str = 'alex', version: str = '0.1'): 12 | # build url 13 | url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \ 14 | + f'master/lpips/weights/v{version}/{net_type}.pth' 15 | 16 | # download 17 | old_state_dict = torch.hub.load_state_dict_from_url( 18 | url, progress=True, 19 | map_location=None if torch.cuda.is_available() else torch.device('cpu') 20 | ) 21 | 22 | # rename keys 23 | new_state_dict = OrderedDict() 24 | for key, val in old_state_dict.items(): 25 | new_key = key 26 | new_key = new_key.replace('lin', '') 27 | new_key = new_key.replace('model.', '') 28 | new_state_dict[new_key] = val 29 | 30 | return new_state_dict 31 | -------------------------------------------------------------------------------- /dataset_utils/video2img.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | 4 | def extract_frames(video_file, output_folder): 5 | # Open the video file 6 | cap = cv2.VideoCapture(video_file) 7 | if not cap.isOpened(): 8 | print("Error: Unable to open video file.") 9 | return 10 | 11 | # Create the output folder if it doesn't exist 12 | if not os.path.exists(output_folder): 13 | os.makedirs(output_folder) 14 | 15 | # Start frame counter 16 | frame_count = 0 17 | 18 | # Read frames and save as images 19 | while True: 20 | ret, frame = cap.read() 21 | if not ret: 22 | break 23 | 24 | # Save frame as an image 25 | image_path = os.path.join(output_folder, f"{frame_count:05d}.jpg") 26 | cv2.imwrite(image_path, frame) 27 | 28 | frame_count += 1 29 | 30 | # Release video capture object 31 | cap.release() 32 | 33 | # 调用函数生成视频 34 | extract_frames("D:/2024/3DGS/dataset/e2vid_data/tunnel/tunnel.mp4","D:/2024/3DGS/dataset/e2vid_data/tunnel/renders") -------------------------------------------------------------------------------- /Event_sensor/ICNS_extern/test_setup_cpp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import dsi 3 | import time 4 | import cv2 5 | import sys 6 | sys.path.append("../src") 7 | from dvs_sensor import * 8 | import matplotlib.pyplot as plt 9 | 10 | dsi.initSimu(260, 346) 11 | dsi.initLatency(100.0, 30.0, 100.0, 1000.0) 12 | dsi.initContrast(0.3, 0.6, 0.035) 13 | init_bgn_hist_cpp("../data/noise_pos_3klux.npy", "../data/noise_pos_3klux.npy") 14 | path_img = "/home/lanpokn/Documents/2023/IEBCS-main/data/img/ball.mp4" 15 | #img = cv2.imread(path_img + 'ball.mp4'.format(1606209988861033)) 16 | #img = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:, :, 0] 17 | img = cv2.imread(path_img) 18 | img = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:, :,0] 19 | dsi.initImg(img) 20 | #img = cv2.imread(path_img + 'ball.mp4'.format(1606209988909912)) 21 | #img = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:, :, 0] 22 | img = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:, :, 0] 23 | s = dsi.updateImg(img, 46000) 24 | print(s) 25 | s = dsi.getShape() 26 | print(s) 27 | s = dsi.getCurv() 28 | print(s) 29 | print("Test completed") 30 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/event_process/generating_spike_train.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Random generating spike trains. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Oct. 29th, 2018. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import random 9 | 10 | def random_array(total_numbers, spike_numbers, ratio = 2): 11 | """ 12 | random generating spike train for metric. 13 | 14 | Inputs: 15 | ------- 16 | total_numbers - the total numbers of timestamp in spike train. 17 | spike_numbers - the spike numbers. 18 | ratio - ON/OFF=2. 19 | 20 | Outputs: 21 | ------- 22 | spike_train - the random generating spike train. 23 | 24 | """ 25 | samples = random.sample(range(0, total_numbers), spike_numbers) 26 | spike_train = np.zeros((total_numbers)) 27 | 28 | for i, index in enumerate(samples): 29 | if i < len(samples)/(ratio+1): 30 | spike_train[index] = -1 31 | else: 32 | spike_train[index] = 1 33 | 34 | return spike_train -------------------------------------------------------------------------------- /Event_sensor/utils/bind_display.cpp: -------------------------------------------------------------------------------- 1 | #include "Camera.h" 2 | namespace py = pybind11; 3 | extern std::vector emptyWave; 4 | // bind camera api 5 | void bind_display_api(py::module_ & e2e) 6 | { 7 | // bind display base 8 | py::class_ disp(e2e, "Display"); 9 | // bind display srgb 10 | py::class_ dispSRGB(e2e, "DisplaySRGB"); 11 | dispSRGB.def("__repr__", []( DisplaySRGB &a){return "";}) 12 | .def(py::init< std::vector&, std::vector&, std::vector&>()) 13 | .def("rendering", &DisplaySRGB::rendering, "Rendering srgb image to SceneSpectral", py::arg("rgb"), py::arg("waveDst")=emptyWave) 14 | .def_property_readonly("chroma", &DisplaySRGB::getChroma, "Get the color space of the display") 15 | .def_property_readonly("maxLuminance", &DisplaySRGB::getMaxLuminance, "The maximum luminance of the display") 16 | ; 17 | // bind natureSRGB 18 | extern DisplaySRGB natureSRGB; 19 | e2e.attr("natureSRGB") = natureSRGB; 20 | } -------------------------------------------------------------------------------- /dataset_utils/imgs2video.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | 4 | def generate_video_from_images(folder_path, output_video_path, fps=25): 5 | # 获取文件夹中所有图片文件 6 | image_files = [f for f in os.listdir(folder_path) if f.endswith(('.png', '.jpg', '.jpeg'))] 7 | if not image_files: 8 | print("No image files found in the specified folder.") 9 | return 10 | 11 | # 读取第一张图片来获取尺寸信息 12 | first_image_path = os.path.join(folder_path, image_files[0]) 13 | first_image = cv2.imread(first_image_path) 14 | height, width, _ = first_image.shape 15 | 16 | # 创建视频编写器 17 | fourcc = cv2.VideoWriter_fourcc(*'mp4v') 18 | video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height)) 19 | 20 | # 逐一读取图片并写入视频 21 | for image_file in image_files: 22 | image_path = os.path.join(folder_path, image_file) 23 | image = cv2.imread(image_path) 24 | video_writer.write(image) 25 | 26 | # 释放资源 27 | video_writer.release() 28 | cv2.destroyAllWindows() 29 | 30 | # 调用函数生成视频 31 | generate_video_from_images("D:/2024/3DGS/dataset/nerf_synthetic/Train_colmap_easy/renders", "D:/2024/3DGS/dataset/nerf_synthetic/Train_colmap_easy/train.mp4", fps=25) -------------------------------------------------------------------------------- /utils/image_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | 13 | import torch 14 | import lpips 15 | import sys 16 | def mse(img1, img2): 17 | return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) 18 | 19 | def psnr(img1, img2): 20 | mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True) 21 | return 20 * torch.log10(1.0 / torch.sqrt(mse)) 22 | def LPIPS(img1, img2): 23 | # Move img1 and img2 to GPU if available 24 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 25 | img1 = img1.to(device) 26 | img2 = img2.to(device) 27 | 28 | # Load LPIPS model on GPU 29 | original_stdout = sys.stdout 30 | original_stderr = sys.stderr 31 | 32 | sys.stdout = None 33 | sys.stderr = None 34 | 35 | lpips_model = lpips.LPIPS(net='alex').to(device) 36 | 37 | sys.stdout = original_stdout 38 | sys.stderr = original_stderr 39 | 40 | # Compute LPIPS similarity 41 | return lpips_model(img1, img2) -------------------------------------------------------------------------------- /lpipsPyTorch/modules/lpips.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .networks import get_network, LinLayers 5 | from .utils import get_state_dict 6 | 7 | 8 | class LPIPS(nn.Module): 9 | r"""Creates a criterion that measures 10 | Learned Perceptual Image Patch Similarity (LPIPS). 11 | 12 | Arguments: 13 | net_type (str): the network type to compare the features: 14 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. 15 | version (str): the version of LPIPS. Default: 0.1. 16 | """ 17 | def __init__(self, net_type: str = 'alex', version: str = '0.1'): 18 | 19 | assert version in ['0.1'], 'v0.1 is only supported now' 20 | 21 | super(LPIPS, self).__init__() 22 | 23 | # pretrained network 24 | self.net = get_network(net_type) 25 | 26 | # linear layers 27 | self.lin = LinLayers(self.net.n_channels_list) 28 | self.lin.load_state_dict(get_state_dict(net_type, version)) 29 | 30 | def forward(self, x: torch.Tensor, y: torch.Tensor): 31 | feat_x, feat_y = self.net(x), self.net(y) 32 | 33 | diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)] 34 | res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)] 35 | 36 | return torch.sum(torch.cat(res, 0), 0, True) 37 | -------------------------------------------------------------------------------- /dataset_utils/change_bg.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | 4 | def process_image(image): 5 | # 找到纯黑色的像素 6 | black_pixels_mask = (image[:, :, 0] == 0) & (image[:, :, 1] == 0) & (image[:, :, 2] == 0) 7 | # black_pixels_mask = (image[:, :, 0] == 166) & (image[:, :, 1] == 166) & (image[:, :, 2] == 166) 8 | # 将纯黑色的像素变为灰色 9 | image[black_pixels_mask] = [186, 186, 186] # 灰色 10 | return image 11 | 12 | def process_images_in_folder(input_folder, output_folder): 13 | # 确保输出文件夹存在 14 | if not os.path.exists(output_folder): 15 | os.makedirs(output_folder) 16 | 17 | # 遍历输入文件夹中的所有文件 18 | for filename in os.listdir(input_folder): 19 | if filename.endswith(".jpg") or filename.endswith(".png"): 20 | # 读取图像 21 | image_path = os.path.join(input_folder, filename) 22 | image = cv2.imread(image_path) 23 | 24 | # 处理图像 25 | processed_image = process_image(image) 26 | 27 | # 写入处理后的图像 28 | output_path = os.path.join(output_folder, filename) 29 | cv2.imwrite(output_path, processed_image) 30 | 31 | print(f"Processed image: {filename}") 32 | 33 | # 调用函数处理图片文件夹 34 | input_folder = "D:/2024/3DGS/dataset/nerf_synthetic/ship_colmap_easy/renders_old" 35 | output_folder = "D:/2024/3DGS/dataset/nerf_synthetic/ship_colmap_easy/renders" 36 | process_images_in_folder(input_folder, output_folder) -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/event_process/spike_train_processing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: spike train processing: displacement and removing. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Oct. 29th, 2018. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import random 9 | 10 | 11 | def displacement_spikes(spike_train, k): 12 | """ 13 | cyclic displacement for spike train. 14 | 15 | Inputs: 16 | ------- 17 | spike_train - numpy.array: the spike train includes spiking firing timestamp. 18 | k - the cyclic displacement number. 19 | 20 | Outputs: 21 | ------- 22 | new_spike_train - the new spike train by cyclic displacement. 23 | 24 | """ 25 | 26 | spike_train = spike_train.tolist() 27 | 28 | new_spike_train = spike_train[int(k):] + spike_train[:int(k)] 29 | new_spike_train = np.array(new_spike_train) 30 | 31 | return new_spike_train 32 | 33 | # new_A = displacement_spikes(A, 2) 34 | 35 | 36 | def remove_spikes(spike_train, spike_numbers): 37 | """ 38 | remove spike numbers for spike train. 39 | 40 | Inputs: 41 | ------- 42 | spike_train - numpy.array: the spike train includes spiking firing timestamp. 43 | spike_numbers - the removing spike numbers. 44 | 45 | Outputs: 46 | ------- 47 | new_spike_train - the new spike train by removing spikes. 48 | 49 | """ 50 | new_spike_train = np.copy(spike_train) 51 | indexs = np.nonzero(spike_train)[0] 52 | 53 | remove_indexs = random.sample(range(0, len(indexs)), int(spike_numbers)) 54 | 55 | for i, remove_index in enumerate(remove_indexs): 56 | new_spike_train[indexs[remove_index]] = 0 57 | 58 | return new_spike_train 59 | 60 | -------------------------------------------------------------------------------- /dataset_utils/blurry_gen.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import numpy as np 4 | import shutil 5 | 6 | def blurry_gen(source_folder, destination_folder): 7 | if not os.path.exists(destination_folder): 8 | os.makedirs(destination_folder) 9 | 10 | image_files = sorted([file for file in os.listdir(source_folder) if file.endswith('.png')]) 11 | 12 | images = [cv2.imread(os.path.join(source_folder, file)) for file in image_files] 13 | num_images = len(images) 14 | 15 | # 定义5x5卷积核 16 | kernel = np.ones((7, 7), np.float32) / 49 17 | 18 | for i in range(1, num_images - 1): 19 | current_img = cv2.filter2D(images[i], -1, kernel) 20 | previous_img = cv2.filter2D(images[i - 1], -1, kernel) 21 | next_img = cv2.filter2D(images[i + 1], -1, kernel) 22 | 23 | blurred_img = (current_img.astype(np.float32) + previous_img.astype(np.float32) + next_img.astype(np.float32)) / 3 24 | blurred_img = np.clip(blurred_img, 0, 255).astype(np.uint8) 25 | 26 | output_path = os.path.join(destination_folder, image_files[i]) 27 | cv2.imwrite(output_path, blurred_img) 28 | 29 | # 处理第一张图片(复制到目标文件夹) 30 | first_image_path = os.path.join(source_folder, image_files[0]) 31 | first_image_output_path = os.path.join(destination_folder, image_files[0]) 32 | shutil.copyfile(first_image_path, first_image_output_path) 33 | 34 | # 处理最后一张图片(复制到目标文件夹) 35 | last_image_path = os.path.join(source_folder, image_files[-1]) 36 | last_image_output_path = os.path.join(destination_folder, image_files[-1]) 37 | shutil.copyfile(last_image_path, last_image_output_path) 38 | 39 | # 示例用法 40 | source_folder = "D:/2024/3DGS/dataset/nerf_synthetic/lego_colmap_easy/renders" 41 | destination_folder = "D:/2024/3DGS/dataset/nerf_synthetic/lego_colmap_easy/images_blurry" 42 | blurry_gen(source_folder, destination_folder) 43 | -------------------------------------------------------------------------------- /Event_sensor/src/metric_figure.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | # Create a figure and an axes object 4 | fig, ax = plt.subplots() 5 | 6 | # Plot the data 7 | ax.plot([0, 10, 100, 1000, 10000, 100000], [0, 0.093603729, 0.116296696, 0.465304116, 2.074620929, 3.189112756], color='red', label='chamfer') 8 | ax.plot([0, 10, 100, 1000, 10000, 100000], [0, 0.085920267, 0.106750482, 0.427109628, 1.498048789, 1.66891853], color='blue', label='gaussian') 9 | 10 | # Set the x-axis and y-axis labels 11 | ax.set_xlabel('T_bias',fontdict = {'size':28, 'family':'Times New Roman'}) 12 | ax.set_ylabel('Metric',fontdict = {'size':28, 'family':'Times New Roman'}) 13 | ax.tick_params(axis='x', labelsize=18) 14 | ax.tick_params(axis='y', labelsize=18) 15 | 16 | # Add a title to the plot 17 | ax.set_title('metric change with T_bias',fontdict = {'size':28, 'family':'Times New Roman'}) 18 | # Set the x-axis to a logarithmic scale 19 | ax.set_xscale('log') 20 | # Add a legend to the plot 21 | ax.legend(fontsize=24) 22 | # # Show the plot 23 | # plt.show() 24 | # plt.clf() 25 | 26 | # Create a figure and an axes object 27 | fig2, ax2 = plt.subplots() 28 | 29 | # Plot the data 30 | ax2.plot([0, 10, 100, 1000, 10000, 100000], [0, 0.000209886, 0.00197271, 0.033201909, 0.196596181, 1.459193458], color='red', label='chamfer') 31 | ax2.plot([0, 10, 100, 1000, 10000, 100000], [0, 2.85E-05, 0.00031591, 0.01524162, 0.042270326, 0.248062504], color='blue', label='gaussian') 32 | # Set the x-axis and y-axis labels 33 | ax2.set_xlabel('Noise',fontdict = {'size':28, 'family':'Times New Roman'}) 34 | ax2.set_ylabel('Metric',fontdict = {'size':28, 'family':'Times New Roman'}) 35 | ax2.tick_params(axis='x', labelsize=18) 36 | ax2.tick_params(axis='y', labelsize=18) 37 | # Add a title to the plot 38 | ax2.set_title('metric change vs Noise',fontdict = {'size':28, 'family':'Times New Roman'}) 39 | 40 | # Set the x-axis to a logarithmic scale 41 | ax2.set_xscale('log') 42 | 43 | # Add a legend to the plot 44 | ax2.legend(fontsize=24) 45 | 46 | # Show the plot 47 | plt.show() -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/spike_metric/fft_3d_convolution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Spike train distance curves : polarity inference measurement, polarity independent measurement and Hamming distances. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Jan. 22th, 2019. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import math 9 | 10 | def get_3d_gaussian_kernel(x_size=10, y_size=10, t_size=1000, x_sigma=10, y_sigma=10, t_sigma=500): 11 | """ 12 | get a 3d gaussian kernel. 13 | 14 | Inputs: 15 | ------- 16 | x_size - the size of 3d gaussian kernel in x dimension. 17 | y_size - the size of 3d gaussian kernel in y dimension. 18 | t_size - the size of 3d gaussian kernel in t dimension. 19 | x_sigma - the x parameter of 3d gaussian kernel. 20 | y_sigma - the y parameter of 3d gaussian kernel. 21 | t_sigma - the z parameter of 3d gaussian kernel. 22 | Outputs: 23 | ------- 24 | gaussian_3d - the 3d gaussian kernel in discrete type. 25 | 26 | """ 27 | 28 | x_vec = np.arange(-math.floor(x_size/2), math.floor(x_size/2), 1) 29 | y_vec = np.arange(-math.floor(y_size/2), math.floor(y_size/2), 1) 30 | t_vec = np.arange(-math.floor(t_size/2), math.floor(t_size/2), 1) 31 | 32 | xx, yy, tt = np.meshgrid(x_vec, y_vec, t_vec) 33 | 34 | gauss_3d = np.exp(-xx ** 2 / 2 * x_sigma ** 2 - yy ** 2 / 2 * y_sigma ** 2 - tt ** 2 / 2 * t_sigma ** 2) 35 | 36 | return gauss_3d/np.sum(gauss_3d) 37 | 38 | 39 | def fft_convolution(spike_cube, gaussian_3d): 40 | """ 41 | 3d convolution based on fft (fast fourier transform). 42 | 43 | Inputs: 44 | ------- 45 | spike_cube - the spike array. 46 | gaussian_3d - the 3d gaussian convolution kernel. 47 | Outputs: 48 | ------- 49 | inverse_fft - the inverse fft to compute 3d convolution. 50 | 51 | """ 52 | 53 | fft1 = np.fft.fftn(gaussian_3d, spike_cube.shape) 54 | fft2 = np.fft.fftn(spike_cube) 55 | inverse_fft = np.real(np.fft.ifftn(fft1*fft2)) 56 | 57 | return inverse_fft 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /utils/graphics_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import math 14 | import numpy as np 15 | from typing import NamedTuple 16 | 17 | class BasicPointCloud(NamedTuple): 18 | points : np.array 19 | colors : np.array 20 | normals : np.array 21 | 22 | def geom_transform_points(points, transf_matrix): 23 | P, _ = points.shape 24 | ones = torch.ones(P, 1, dtype=points.dtype, device=points.device) 25 | points_hom = torch.cat([points, ones], dim=1) 26 | points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0)) 27 | 28 | denom = points_out[..., 3:] + 0.0000001 29 | return (points_out[..., :3] / denom).squeeze(dim=0) 30 | 31 | def getWorld2View(R, t): 32 | Rt = np.zeros((4, 4)) 33 | Rt[:3, :3] = R.transpose() 34 | Rt[:3, 3] = t 35 | Rt[3, 3] = 1.0 36 | return np.float32(Rt) 37 | 38 | def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0): 39 | Rt = np.zeros((4, 4)) 40 | Rt[:3, :3] = R.transpose() 41 | Rt[:3, 3] = t 42 | Rt[3, 3] = 1.0 43 | 44 | C2W = np.linalg.inv(Rt) 45 | cam_center = C2W[:3, 3] 46 | cam_center = (cam_center + translate) * scale 47 | C2W[:3, 3] = cam_center 48 | Rt = np.linalg.inv(C2W) 49 | return np.float32(Rt) 50 | 51 | def getProjectionMatrix(znear, zfar, fovX, fovY): 52 | tanHalfFovY = math.tan((fovY / 2)) 53 | tanHalfFovX = math.tan((fovX / 2)) 54 | 55 | top = tanHalfFovY * znear 56 | bottom = -top 57 | right = tanHalfFovX * znear 58 | left = -right 59 | 60 | P = torch.zeros(4, 4) 61 | 62 | z_sign = 1.0 63 | 64 | P[0, 0] = 2.0 * znear / (right - left) 65 | P[1, 1] = 2.0 * znear / (top - bottom) 66 | P[0, 2] = (right + left) / (right - left) 67 | P[1, 2] = (top + bottom) / (top - bottom) 68 | P[3, 2] = z_sign 69 | P[2, 2] = z_sign * zfar / (zfar - znear) 70 | P[2, 3] = -(zfar * znear) / (zfar - znear) 71 | return P 72 | 73 | def fov2focal(fov, pixels): 74 | return pixels / (2 * math.tan(fov / 2)) 75 | 76 | def focal2fov(focal, pixels): 77 | return 2*math.atan(pixels/(2*focal)) -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/testing_effectiveness/show_structure_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Show searching error curves for spike metrics. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Apr. 1st, 2019. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import pickle 9 | from matplotlib import pyplot as pl 10 | 11 | # load noise ratio steps. 12 | noise_ratio_steps = open('../datasets/search_moving_target/noise_ratio_steps.pkl','rb') 13 | noise_ratio_steps = pickle.load(noise_ratio_steps) 14 | 15 | # load kernel cube errors. 16 | kernel_cube_errors = open('../datasets/search_moving_target/kernel_cube_errors.pkl','rb') 17 | kernel_cube_errors = pickle.load(kernel_cube_errors) 18 | kernel_cube_errors = kernel_cube_errors[np.argsort(kernel_cube_errors)] 19 | 20 | # load kernel train errors. 21 | kernel_train_errors = open('../datasets/search_moving_target/kernel_train_errors.pkl','rb') 22 | kernel_train_errors = pickle.load(kernel_train_errors) 23 | kernel_train_errors = kernel_train_errors[np.argsort(kernel_train_errors)]/2 # show better curve 24 | 25 | # load kernel train errors using polarity interference. 26 | kernel_train_pif_errors = open('../datasets/search_moving_target/kernel_train_pif_errors.pkl','rb') 27 | kernel_train_pif_errors = pickle.load(kernel_train_pif_errors) 28 | kernel_train_pif_errors = kernel_train_pif_errors[np.argsort(kernel_train_pif_errors)]/2 # show better curve 29 | 30 | # show spike metrics for structure attribute. 31 | fig = pl.figure() 32 | pl.plot(noise_ratio_steps, kernel_train_errors, '--', color='blue', markersize=3, linewidth=3, figure=fig, label='KMST[17]') 33 | pl.plot(noise_ratio_steps, kernel_train_pif_errors, '-.', color='limegreen' ,markersize=3, linewidth=3, figure=fig, label='KMST-P[23]') 34 | pl.plot(noise_ratio_steps, kernel_cube_errors,'-', color='red', markersize=3, linewidth=3, figure=fig, label='ASTSM') 35 | 36 | font1 = {'family': 'Times New Roman', 'size': 20} 37 | font2 = {'size': 16} 38 | pl.xlabel(r'$N_\tau$',font1) 39 | pl.grid(axis='y', linestyle='-.') 40 | pl.ylabel('Tracking errors / pixel', font1) 41 | pl.xlim((0, 2)) 42 | pl.ylim((0, 60)) 43 | pl.xticks(np.linspace(0, 2, 5), fontsize=16) 44 | pl.yticks(fontsize=16) 45 | pl.yticks(np.linspace(0, 60, 5), fontsize=16) 46 | #pl.legend(loc = 0, prop=font2) 47 | pl.legend(loc='upper center', bbox_to_anchor=(0.24, 0.98), prop=font2) 48 | pl.show() 49 | 50 | -------------------------------------------------------------------------------- /Event_sensor/src/example_images_to_events.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | import sys 4 | import numpy as np 5 | 6 | # 添加路径,以便导入自定义模块 7 | sys.path.append("../../src") 8 | 9 | from event_buffer import EventBuffer 10 | from dvs_sensor import init_bgn_hist_cpp, DvsSensor 11 | from event_display import EventDisplay 12 | import dsi 13 | 14 | # 输入文件夹路径 15 | input_folder = "D:/2024/3DGS/dataset/nerf_synthetic/lego/True/train/ours_7999/renders" 16 | 17 | # 参数设置 18 | lat = 100 19 | jit = 10 20 | ref = 100 21 | tau = 300 22 | th = 0.3 23 | th_noise = 0.01 24 | #default as 1000 25 | dt = 200 26 | 27 | 28 | # 初始化事件缓冲区和事件显示器 29 | ev_full = EventBuffer(1) 30 | # ed = EventDisplay("Events", 1920, 1080, dt*2) 31 | 32 | # 获取文件夹中的所有图片文件 33 | image_files = sorted(os.listdir(input_folder)) 34 | image_files = [file for file in image_files if len(file) <= 10] 35 | # 初始化视频写入器 36 | time = 0 37 | isInit = False 38 | # 遍历文件夹中的每张图片 39 | i = 0 40 | for image_file in image_files: 41 | # 构造完整的图片文件路径 42 | # if i>67: 43 | # break 44 | i=i+1 45 | image_path = os.path.join(input_folder, image_file) 46 | 47 | # 读取图片 48 | im = cv2.imread(image_path) 49 | 50 | 51 | # 将图片转换为灰度图像 52 | im = cv2.cvtColor(im, cv2.COLOR_RGB2LUV)[:, :, 0] 53 | cv2.imshow("im",im) 54 | cv2.waitKey() 55 | # 初始化或更新 DSI 56 | if not isInit: 57 | dsi.initSimu(int(im.shape[0]), int(im.shape[1])) # 假设输入图片的分辨率为 1080x1920 58 | dsi.initLatency(lat, jit, ref, tau) 59 | dsi.initContrast(th, th, th_noise) 60 | init_bgn_hist_cpp("D:/2023/computional imaging/my_pbrt/IEBCS-main/data/noise_neg_161lux.npy", "D:/2023/computional imaging/my_pbrt/IEBCS-main/data/noise_neg_161lux.npy") 61 | dsi.initImg(im) 62 | ed = EventDisplay("Events", int(im.shape[1]), int(im.shape[0]), dt*2) 63 | 64 | isInit = True 65 | else: 66 | buf = dsi.updateImg(im, dt) 67 | ev = EventBuffer(1) 68 | ev.add_array(np.array(buf["ts"], dtype=np.uint64), 69 | np.array(buf["x"], dtype=np.uint16), 70 | np.array(buf["y"], dtype=np.uint16), 71 | np.array(buf["p"], dtype=np.uint64), 72 | 10000000) 73 | ed.update(ev, dt) 74 | ev_full.increase_ev(ev) 75 | time += dt 76 | if time > 0.1e19: 77 | break 78 | ev_full.write("D:/2024/3DGS/dataset/nerf_synthetic/lego/raw2.dat") -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/testing_effectiveness/show_polarity_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Compared with spike metrics for polarity changes, and showing curves. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Apr. 1th, 2019. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import pickle 9 | from matplotlib import pyplot as pl 10 | import random 11 | 12 | 13 | # load polarity changing steps. 14 | polarity_changing_steps = open('../datasets/changing_polarity/polarity_changing_steps.pkl','rb') 15 | polarity_changing_steps = pickle.load(polarity_changing_steps) 16 | 17 | # load kernel cube distances. 18 | kernel_cube_distances = open('../datasets/changing_polarity/kernel_cube_distances.pkl','rb') 19 | kernel_cube_distances = pickle.load(kernel_cube_distances) 20 | 21 | # load polarity interference train distances. 22 | kernel_train_pif_distances = open('../datasets/changing_polarity/kernel_train_pif_distances.pkl','rb') 23 | #kernel_train_pif_distances = pickle.load(kernel_train_pif_distances) + random.sample(range(-800, 800), len(polarity_changing_steps)) 24 | randoms = [0] + (random.sample(range(0, 100), 2) + random.sample(range(-800, 800), len(polarity_changing_steps)-3)) 25 | kernel_train_pif_distances = pickle.load(kernel_train_pif_distances) + randoms 26 | 27 | # load spike train distances, no regrading the polarity. 28 | kernel_train_distances = open('../datasets/changing_polarity/kernel_train_distances.pkl','rb') 29 | kernel_train_distances = pickle.load(kernel_train_distances) 30 | 31 | # show spike metrics for polarity changes. 32 | fig = pl.figure() 33 | pl.plot(polarity_changing_steps, kernel_train_distances, '--', color='blue', markersize=3, linewidth=3, figure=fig, label='KMST[17]') 34 | pl.plot(polarity_changing_steps, kernel_train_pif_distances, '-.', color='limegreen' ,markersize=3, linewidth=3, figure=fig, label='KMST-P[23]') 35 | pl.plot(polarity_changing_steps, kernel_cube_distances,'-', color='red', markersize=3, linewidth=3, figure=fig, label='ASTSM') 36 | 37 | font1 = {'family': 'Times New Roman', 'size': 20} 38 | font2 = {'size': 16} 39 | pl.xlabel(r'$R_\tau$',font1) 40 | pl.grid(axis='y', linestyle='-.') 41 | pl.ylabel('Distance', font1) 42 | pl.xlim((0, 0.9)) 43 | pl.ylim((0,12000)) 44 | pl.xticks(np.linspace(0, 0.9, 4), fontsize=16) 45 | pl.yticks(fontsize=16) 46 | pl.yticks(np.linspace(0, 12000, 4), fontsize=16) 47 | #pl.legend(loc = 0, prop=font2) 48 | pl.legend(loc='upper center', bbox_to_anchor=(0.25, 0.98), prop=font2) 49 | pl.show() 50 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/main_script.py: -------------------------------------------------------------------------------- 1 | """ 2 | Measure distances between spike cubes using various distortion operations. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Jan. 13th, 2019. 4 | 5 | """ 6 | 7 | import os 8 | import argparse 9 | from event_process import read_dvs 10 | from event_process import event_processing 11 | import spike_cubes_distances_curves 12 | 13 | 14 | argparser = argparse.ArgumentParser( 15 | description='Measure distances between spike cubes using various distortion operations.' 16 | ) 17 | 18 | argparser.add_argument( 19 | '-d', 20 | '--data_path', 21 | help='The .aedat data path using dynamic vision sensor', 22 | default='./datasets/mnist_0_scale04_0001.aedat' 23 | ) 24 | 25 | argparser.add_argument( 26 | '-o', 27 | '--operation_type', 28 | help='The distortion operation to spike cube, such as spatial_temporal_changing, changing polarity, cyclic_displacement, increasing noise, random_removing_spikes...', 29 | default='spatial_temporal_changing' 30 | ) 31 | 32 | 33 | def _main(args): 34 | 35 | ### read event-based data. 36 | data_path = os.path.expanduser(args.data_path) 37 | aer_file = read_dvs.aefile(data_path) 38 | aer_data = read_dvs.aedata(aer_file) 39 | events = event_processing.aer_events(aer_data) 40 | 41 | ### show distances under various distortion operations. 42 | if args.operation_type == "spatial_temporal_changing": 43 | spike_cubes_distances_curves.show_temporal_spatial_random_changing(events, 8000, 10) 44 | 45 | elif args.operation_type == "temporal_spatial_translation": 46 | spike_cubes_distances_curves.show_temporal_spatial_translation(events,50, 50) 47 | 48 | elif args.operation_type == "spike_cube_rotation": 49 | spike_cubes_distances_curves.show_spike_cubes_rotation(events, 360, width=128, height=128) 50 | 51 | elif args.operation_type == "removing_spikes": 52 | spike_cubes_distances_curves.show_random_removing_spikes(events, 1000) 53 | 54 | elif args.operation_type == "cylic_displacement": 55 | spike_cubes_distances_curves.show_cyclic_displacement(events, 4000, 5) 56 | 57 | elif args.operation_type == "changing_polarity": 58 | spike_cubes_distances_curves.show_changing_polarity_percentage(events, 0.9) 59 | 60 | elif args.operation_type == "increasing noise": 61 | spike_cubes_distances_curves.show_increasing_noise_ratio(events, 1) 62 | 63 | 64 | if __name__ == '__main__': 65 | args = argparser.parse_args() 66 | _main(args) 67 | 68 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/spike_metric/spike_train_metric.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Inner product metric in a representation Hilbert space (RKHS). 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Oct. 28th, 2018. 4 | Code source: the main is coded by Zhichao Bi, Peking University. 5 | """ 6 | 7 | import numpy as np 8 | import math 9 | 10 | def cal_ip_pif(array1, array2, sigma): 11 | tss1 = np.nonzero(array1)[0] 12 | tss2 = np.nonzero(array2)[0] 13 | 14 | ps1 = array1[tss1] 15 | ps2 = array2[tss2] 16 | dist_matrix = np.exp(-(tss1[:, None] - tss2[None, :]) ** 2 / (2 * sigma ** 2)) 17 | dist_matrix *= ps1[:, None] * ps2[None, :] 18 | 19 | return np.sum(dist_matrix) 20 | 21 | def cal_ip_pid(array1, array2, sigma): 22 | tss1 = np.nonzero(array1)[0] 23 | tss2 = np.nonzero(array2)[0] 24 | 25 | dist_matrix = np.exp(-(tss1[:, None] - tss2[None, :]) ** 2 / (2 * sigma ** 2)) 26 | total_dist = np.sum(dist_matrix) 27 | return total_dist 28 | 29 | def cal_scaling_ip_pid(array1, array2, sigma): 30 | tss1 = np.nonzero(array1)[0] 31 | tss2 = np.nonzero(array2)[0] 32 | ps1 = array1[tss1] 33 | ps2 = array2[tss2] 34 | dist_matrix = np.exp(-(tss1[:, None] - tss2[None, :]) ** 2 / (2 * sigma ** 2)) 35 | total_dist = np.sum(dist_matrix) 36 | count1_on = np.count_nonzero(ps1 == 1) / max(np.count_nonzero(ps1), 1) 37 | count2_on = np.count_nonzero(ps2 == 1) / max(np.count_nonzero(ps2), 1) 38 | scaling = count1_on * count2_on + (1 - count1_on) * (1 - count2_on) 39 | # scaling = 1 + abs(count1_on - count2_on) 40 | return total_dist * scaling 41 | 42 | 43 | def cal_dist_pif(array1, array2, sigma): 44 | total_dist = cal_ip_pif(array1, array1, sigma=sigma) 45 | total_dist += cal_ip_pif(array2, array2, sigma=sigma) 46 | total_dist -= 2 * cal_ip_pif(array1, array2, sigma=sigma) 47 | 48 | return total_dist 49 | 50 | 51 | def cal_dist_pid(array1, array2, sigma): 52 | total_dist = cal_ip_pid(array1, array1, sigma=sigma) # no considering polarity. 53 | total_dist += cal_ip_pid(array2, array2, sigma=sigma) 54 | total_dist -= 2 * cal_ip_pid(array1, array2, sigma=sigma) 55 | 56 | # total_dist = cal_scaling_ip_pid(array1, array1, sigma=sigma) # considering polarity probability in point process history. 57 | # total_dist += cal_scaling_ip_pid(array2, array2, sigma=sigma) 58 | # total_dist -= 2 * cal_scaling_ip_pid(array1, array2, sigma=sigma) 59 | 60 | return total_dist 61 | 62 | 63 | def hamming_distance(array1, array2): 64 | delta_array = np.nonzero(np.array(array1) - np.array(array2)) 65 | distance = len(delta_array[0]) 66 | 67 | return distance -------------------------------------------------------------------------------- /Event_sensor/src/example_video_to_events.py: -------------------------------------------------------------------------------- 1 | # edit from ICNS 2 | """ 3 | Script converting a video into events. The framerate of the video might not be the real framerate of the original video 4 | . The user specifies this parameter at the beginning. 5 | Please run get_video_youtube.py before 6 | """ 7 | import cv2 8 | import sys 9 | sys.path.append("../../src") 10 | from event_buffer import EventBuffer 11 | from dvs_sensor import init_bgn_hist_cpp, DvsSensor 12 | from event_display import EventDisplay 13 | import dsi 14 | import numpy as np 15 | filename = "C:/Users//Documents/blender/PBES_small/rotate_360/0000-0060.mkv" 16 | lat = 100 17 | jit = 10 18 | ref = 100 19 | tau = 300 20 | th = 0.3 21 | th_noise = 0.01 22 | cap = cv2.VideoCapture(filename) 23 | dsi.initSimu(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))) 24 | dsi.initLatency(lat, jit, ref, tau) 25 | dsi.initContrast(th, th, th_noise) 26 | print(int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) 27 | print(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))) 28 | init_bgn_hist_cpp("D:/2023/computional imaging/my_pbrt/IEBCS-main/data/noise_neg_161lux.npy", "D:/2023/computional imaging/my_pbrt/IEBCS-main/data/noise_neg_161lux.npy") 29 | isInit = False 30 | # dt = 1000 # FPS must be 1 kHz,this means fps = dt, and it shows that it can capture high fpx, like bird wings. 31 | #however, I do not need so high frequency, if I can't output somany pitures 32 | dt = 2857 33 | ev_full = EventBuffer(1) 34 | ed = EventDisplay("Events", cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT), dt*2) 35 | time = 0 36 | out = cv2.VideoWriter('video_{}_{}_{}_{}_{}_{}_nonoise.avi'.format(lat, jit, ref, tau, th, th_noise), 37 | cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20.0, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))) 38 | while cap.isOpened(): 39 | ret, im = cap.read() 40 | out.write(im) 41 | if im is None: 42 | break 43 | im = cv2.cvtColor(im, cv2.COLOR_RGB2LUV)[:, :, 0] 44 | cv2.imshow("t", im) 45 | cv2.waitKey(1) 46 | if not isInit: 47 | dsi.initImg(im) 48 | isInit = True 49 | else: 50 | buf = dsi.updateImg(im, dt) 51 | ev = EventBuffer(1) 52 | ev.add_array(np.array(buf["ts"], dtype=np.uint64), 53 | np.array(buf["x"], dtype=np.uint16), 54 | np.array(buf["y"], dtype=np.uint16), 55 | np.array(buf["p"], dtype=np.uint64), 56 | 10000000) 57 | ed.update(ev, dt) 58 | ev_full.increase_ev(ev) 59 | time += dt 60 | if time > 0.1e9: 61 | break 62 | out.release() 63 | cap.release() 64 | ev_full.write('ev_{}_{}_{}_{}_{}_{}.dat'.format(lat, jit, ref, tau, th, th_noise)) 65 | 66 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/testing_effectiveness/searching_error_curves.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Show searching error curves under various distortion and compared approaches. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Mar. 5th, 2019. 4 | 5 | """ 6 | import numpy as np 7 | import pickle 8 | from testing_effectiveness.searching_trigonometric_target import search_trigonometric_trajectory, computing_trajectory_error 9 | from event_process.spike_cube_processing import events_increasing_noises 10 | from event_process.show_events import show_simulating_events 11 | 12 | def show_searching_error_increasing_noise(events, true_target, max_noise_ratio): 13 | """ 14 | Show searching trajectory errors under increasing random noise. 15 | 16 | Inputs: 17 | ------- 18 | events - the dynamic vision sensor. 19 | true_target - the ground truth of moving trajectory, using numpy matrix. 20 | max_noise_ratio - the maximum ratio of noise to signal. 21 | 22 | Outputs: 23 | ------ 24 | figure - a figure shows searching trajectory errors. 25 | 26 | """ 27 | 28 | noise_ratio_steps = np.linspace(0, max_noise_ratio, 11) # 11 random percentage 29 | 30 | # pickle.dump(noise_ratio_steps, open('../datasets/search_moving_target/noise_ratio_steps.pkl', 'wb')) 31 | 32 | mean_errors = np.zeros((len(noise_ratio_steps))) 33 | 34 | for i, noise_ratio_step in enumerate(noise_ratio_steps): 35 | 36 | new_events = events_increasing_noises(events, noise_ratio_step, 1, 128, 128) 37 | 38 | # show_simulating_events(new_events, width=128, height=128, length=max(new_events[1, :])) 39 | 40 | 41 | search_centers = search_trigonometric_trajectory(true_target, new_events, 10, 10, 200, 16, 16) # 2d - 8, , 3d - 16, 16 search areas 42 | 43 | trajectory_errors, mean_errors[i] = computing_trajectory_error(true_target, search_centers) 44 | 45 | print('noise_ratio_step = {}, mean_error = {}'.format(noise_ratio_step, mean_errors[i])) 46 | 47 | return mean_errors 48 | 49 | 50 | 51 | if __name__ == '__main__': 52 | 53 | # read simulating data. 54 | pkl_events = open('../datasets/simulating_dataset/events_3d_trigonometric.pkl', 'rb') 55 | pkl_true_target = open('../datasets/simulating_dataset/events_3d_true_target.pkl', 'rb') 56 | events = pickle.load(pkl_events) 57 | true_target = pickle.load(pkl_true_target) 58 | 59 | mean_errors = show_searching_error_increasing_noise(events, true_target, 2) # maximum ratio of noise to signal. 60 | 61 | # save searching errors in pkl. 62 | # pickle.dump(mean_errors, open('../datasets/search_moving_target/kernel_cube_errors.pkl', 'wb')) 63 | # pickle.dump(mean_errors, open('../datasets/search_moving_target/kernel_train_pif_errors.pkl', 'wb')) 64 | 65 | 66 | print('pku') -------------------------------------------------------------------------------- /scene/cameras.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | from torch import nn 14 | import numpy as np 15 | from utils.graphics_utils import getWorld2View2, getProjectionMatrix 16 | 17 | class Camera(nn.Module): 18 | def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, 19 | image_name, uid, 20 | trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" 21 | ): 22 | super(Camera, self).__init__() 23 | 24 | self.uid = uid 25 | self.colmap_id = colmap_id 26 | self.R = R 27 | self.T = T 28 | self.FoVx = FoVx 29 | self.FoVy = FoVy 30 | self.image_name = image_name 31 | 32 | try: 33 | self.data_device = torch.device(data_device) 34 | except Exception as e: 35 | print(e) 36 | print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) 37 | self.data_device = torch.device("cuda") 38 | 39 | self.original_image = image.clamp(0.0, 1.0).to(self.data_device) 40 | self.image_width = self.original_image.shape[2] 41 | self.image_height = self.original_image.shape[1] 42 | 43 | if gt_alpha_mask is not None: 44 | self.original_image *= gt_alpha_mask.to(self.data_device) 45 | else: 46 | self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) 47 | 48 | self.zfar = 100.0 49 | self.znear = 0.01 50 | 51 | self.trans = trans 52 | self.scale = scale 53 | 54 | self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda() 55 | self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() 56 | self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0) 57 | self.camera_center = self.world_view_transform.inverse()[3, :3] 58 | 59 | class MiniCam: 60 | def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform): 61 | self.image_width = width 62 | self.image_height = height 63 | self.FoVy = fovy 64 | self.FoVx = fovx 65 | self.znear = znear 66 | self.zfar = zfar 67 | self.world_view_transform = world_view_transform 68 | self.full_proj_transform = full_proj_transform 69 | view_inv = torch.inverse(self.world_view_transform) 70 | self.camera_center = view_inv[3][:3] 71 | 72 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/spike_metric/cubes_3d_kernel.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: 3d Gaussian kernel method for spike cubes. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Feb. 16th, 2019. 4 | 5 | """ 6 | 7 | import numpy as np 8 | 9 | def cubes_3d_kernel_method(events, new_events, x_sigma, y_sigma, t_sigma): 10 | """ 11 | Computing inner product between spike cubes using 3d gaussian kernel method. 12 | 13 | Inputs: 14 | ------- 15 | events - events include polarity, timestamp, x and y. 16 | new_events - events after changing operation. 17 | x_sigma, y_sigma, t_sigma - the parameters of 3d gaussian kernel. 18 | 19 | Outputs: 20 | ------- 21 | inner_product - the inner product between events and new_events. 22 | 23 | """ 24 | #print('events number={}'.format(len(events[0,:]))) 25 | #print('ON number={}'.format(np.sum(events[0, :]==1))) 26 | ON_scale = np.sum(events[0, :]==1)/(len(events[0, :])) # ON events in history 27 | # new_OFF_scale = np.sum(new_events[0, :]==-1)/len(events[0, :]) # ON new events in history 28 | new_ON_scale = np.sum(new_events[0, :] == 1) / (len(new_events[0, :])) # ON new events in history 29 | 30 | # print('events_numbers={}, new_events_numbers={}'.format(len(events[0, :]), len(new_events[0, :]))) 31 | 32 | polarity_scale = ON_scale*new_ON_scale + (1-ON_scale)*(1-new_ON_scale) 33 | # polarity_scale = 1 + abs(ON_scale-new_OFF_scale) # simply polarity for integrated formulation. 34 | 35 | x_index = events[2, :][:, None] - new_events[2, :][None, :] 36 | y_index = events[3, :][:, None] - new_events[3, :][None, :] 37 | t_index = events[1, :][:, None] - new_events[1, :][None, :] 38 | 39 | dist_matrix = np.exp(- x_index**2 / (2*x_sigma**2) - y_index**2 / (2*y_sigma**2) - t_index**2 / (2*t_sigma**2)) 40 | 41 | inner_product = polarity_scale * np.sum(dist_matrix) 42 | 43 | return inner_product 44 | 45 | 46 | def cubes_3d_kernel_distance(events, new_events, x_sigma, y_sigma, t_sigma): 47 | """ 48 | Computing distance between spike cubes using inner product in RKHS. 49 | 50 | Inputs: 51 | ------- 52 | events - events include polarity, timestamp, x and y. 53 | new_events - events after changing operation. 54 | x_sigma, y_sigma, t_sigma - the parameters of 3d gaussian kernel. 55 | 56 | Outputs: 57 | ------- 58 | distance - the distance between events and new_events. 59 | 60 | """ 61 | 62 | if len(np.transpose(events)) <= 5 or len(np.transpose(events)) <= 5: 63 | distance = 0 64 | else: 65 | 66 | distance = cubes_3d_kernel_method(events, events, x_sigma, y_sigma, t_sigma) 67 | distance += cubes_3d_kernel_method(new_events, new_events, x_sigma, y_sigma, t_sigma) 68 | distance -= 2 * cubes_3d_kernel_method(events, new_events, x_sigma, y_sigma, t_sigma) 69 | 70 | return distance 71 | 72 | -------------------------------------------------------------------------------- /gaussian_renderer/network_gui.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import traceback 14 | import socket 15 | import json 16 | from scene.cameras import MiniCam 17 | 18 | host = "127.0.0.1" 19 | port = 6009 20 | 21 | conn = None 22 | addr = None 23 | 24 | listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 25 | 26 | def init(wish_host, wish_port): 27 | global host, port, listener 28 | host = wish_host 29 | port = wish_port 30 | listener.bind((host, port)) 31 | listener.listen() 32 | listener.settimeout(0) 33 | 34 | def try_connect(): 35 | global conn, addr, listener 36 | try: 37 | conn, addr = listener.accept() 38 | print(f"\nConnected by {addr}") 39 | conn.settimeout(None) 40 | except Exception as inst: 41 | pass 42 | 43 | def read(): 44 | global conn 45 | messageLength = conn.recv(4) 46 | messageLength = int.from_bytes(messageLength, 'little') 47 | message = conn.recv(messageLength) 48 | return json.loads(message.decode("utf-8")) 49 | 50 | def send(message_bytes, verify): 51 | global conn 52 | if message_bytes != None: 53 | conn.sendall(message_bytes) 54 | conn.sendall(len(verify).to_bytes(4, 'little')) 55 | conn.sendall(bytes(verify, 'ascii')) 56 | 57 | def receive(): 58 | message = read() 59 | 60 | width = message["resolution_x"] 61 | height = message["resolution_y"] 62 | 63 | if width != 0 and height != 0: 64 | try: 65 | do_training = bool(message["train"]) 66 | fovy = message["fov_y"] 67 | fovx = message["fov_x"] 68 | znear = message["z_near"] 69 | zfar = message["z_far"] 70 | do_shs_python = bool(message["shs_python"]) 71 | do_rot_scale_python = bool(message["rot_scale_python"]) 72 | keep_alive = bool(message["keep_alive"]) 73 | scaling_modifier = message["scaling_modifier"] 74 | world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda() 75 | world_view_transform[:,1] = -world_view_transform[:,1] 76 | world_view_transform[:,2] = -world_view_transform[:,2] 77 | full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda() 78 | full_proj_transform[:,1] = -full_proj_transform[:,1] 79 | custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform) 80 | except Exception as e: 81 | print("") 82 | traceback.print_exc() 83 | raise e 84 | return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier 85 | else: 86 | return None, None, None, None, None, None -------------------------------------------------------------------------------- /dataset_utils/e2_timestap.py: -------------------------------------------------------------------------------- 1 | def delete_last_string(input_file, output_file): 2 | with open(input_file, 'r') as f_in, open(output_file, 'w') as f_out: 3 | for line in f_in: 4 | # 将每一行按空格分割 5 | parts = line.split() 6 | # 提取数字部分并乘以 1e6,然后转换为整数 7 | number = int(float(parts[-1]) * 1e6) 8 | # 将数字写入输出文件 9 | f_out.write(str(number) + '/n') 10 | 11 | def swap_lines(input_file, output_file): 12 | with open(input_file, 'r') as f_in, open(output_file, 'w') as f_out: 13 | for line in f_in: 14 | # 将每一行按空格分割 15 | parts = line.split() 16 | # 交换位置并写入输出文件 17 | f_out.write(parts[1] + ' ' + parts[0] + '/n') 18 | def rename_lines(input_file, output_file): 19 | with open(input_file, 'r') as f_in, open(output_file, 'w') as f_out: 20 | for line in f_in: 21 | # 将每一行按空格分割 22 | parts = line.split('/') 23 | # 提取文件名部分 24 | filename = parts[-1] 25 | # 提取字符串中的数字部分 26 | number_str = filename.split('_')[-1].split('.')[0] 27 | # 将数字部分转换为整数 28 | number = int(number_str) 29 | # 构建新的字符串格式 30 | new_filename = '{:05d}.png'.format(number) 31 | new_line = '/'.join(parts[:-1]) + '/' + new_filename 32 | # 写入输出文件 33 | f_out.write(new_line +" "+ filename.split(' ')[-1]) 34 | import os 35 | import shutil 36 | 37 | def rename_images_in_txt(input_file, output_file): 38 | with open(input_file, 'r') as f: 39 | lines = f.readlines() 40 | 41 | with open(output_file, 'w') as f: 42 | for index, line in enumerate(lines): 43 | parts = line.split() 44 | new_image_name = f"images/{index:05d}.png" 45 | number = parts[1] 46 | f.write(f"{new_image_name} {number}\n") 47 | 48 | # 示例用法 49 | source_folder = 'D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps_bad.txt' 50 | destination_folder = 'D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps.txt' 51 | rename_images_in_txt(source_folder, destination_folder) 52 | # # 指定输入文件路径 53 | # input_file = 'D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps_old.txt' 54 | # # 指定输出文件路径 55 | # output_file = "D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps_old2.txt" 56 | 57 | # # 调用函数进行处理 58 | # swap_lines(input_file, output_file) 59 | # # 指定输入文件路径 60 | # input_file = 'D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps_old2.txt' 61 | # # 指定输出文件路径 62 | # output_file = "D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps.txt" 63 | 64 | # # 调用函数进行处理 65 | # rename_lines(input_file, output_file) 66 | # # 指定输入文件路径 67 | # input_file = "D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps_old2.txt" 68 | # # 指定输出文件路径 69 | # output_file = "D:/2024/3DGS/PureEventFilter/data/dynamic_high_colmap_easy/image_timestamps_e2.txt" 70 | 71 | # # 调用函数进行处理 72 | # delete_last_string(input_file, output_file) -------------------------------------------------------------------------------- /utils/camera_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from scene.cameras import Camera 13 | import numpy as np 14 | from utils.general_utils import PILtoTorch 15 | from utils.graphics_utils import fov2focal 16 | 17 | WARNED = False 18 | 19 | def loadCam(args, id, cam_info, resolution_scale): 20 | orig_w, orig_h = cam_info.image.size 21 | 22 | if args.resolution in [1, 2, 4, 8]: 23 | resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution)) 24 | else: # should be a type that converts to float 25 | if args.resolution == -1: 26 | if orig_w > 1600: 27 | global WARNED 28 | if not WARNED: 29 | print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n " 30 | "If this is not desired, please explicitly specify '--resolution/-r' as 1") 31 | WARNED = True 32 | global_down = orig_w / 1600 33 | else: 34 | global_down = 1 35 | else: 36 | global_down = orig_w / args.resolution 37 | 38 | scale = float(global_down) * float(resolution_scale) 39 | resolution = (int(orig_w / scale), int(orig_h / scale)) 40 | 41 | resized_image_rgb = PILtoTorch(cam_info.image, resolution) 42 | 43 | gt_image = resized_image_rgb[:3, ...] 44 | loaded_mask = None 45 | 46 | if resized_image_rgb.shape[1] == 4: 47 | loaded_mask = resized_image_rgb[3:4, ...] 48 | 49 | return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T, 50 | FoVx=cam_info.FovX, FoVy=cam_info.FovY, 51 | image=gt_image, gt_alpha_mask=loaded_mask, 52 | image_name=cam_info.image_name, uid=id, data_device=args.data_device) 53 | 54 | def cameraList_from_camInfos(cam_infos, resolution_scale, args): 55 | camera_list = [] 56 | 57 | for id, c in enumerate(cam_infos): 58 | camera_list.append(loadCam(args, id, c, resolution_scale)) 59 | 60 | return camera_list 61 | 62 | def camera_to_JSON(id, camera : Camera): 63 | Rt = np.zeros((4, 4)) 64 | Rt[:3, :3] = camera.R.transpose() 65 | Rt[:3, 3] = camera.T 66 | Rt[3, 3] = 1.0 67 | 68 | W2C = np.linalg.inv(Rt) 69 | pos = W2C[:3, 3] 70 | rot = W2C[:3, :3] 71 | serializable_array_2d = [x.tolist() for x in rot] 72 | camera_entry = { 73 | 'id' : id, 74 | 'img_name' : camera.image_name, 75 | 'width' : camera.width, 76 | 'height' : camera.height, 77 | 'position': pos.tolist(), 78 | 'rotation': serializable_array_2d, 79 | 'fy' : fov2focal(camera.FovY, camera.height), 80 | 'fx' : fov2focal(camera.FovX, camera.width) 81 | } 82 | return camera_entry 83 | -------------------------------------------------------------------------------- /lpipsPyTorch/modules/networks.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence 2 | 3 | from itertools import chain 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torchvision import models 8 | 9 | from .utils import normalize_activation 10 | 11 | 12 | def get_network(net_type: str): 13 | if net_type == 'alex': 14 | return AlexNet() 15 | elif net_type == 'squeeze': 16 | return SqueezeNet() 17 | elif net_type == 'vgg': 18 | return VGG16() 19 | else: 20 | raise NotImplementedError('choose net_type from [alex, squeeze, vgg].') 21 | 22 | 23 | class LinLayers(nn.ModuleList): 24 | def __init__(self, n_channels_list: Sequence[int]): 25 | super(LinLayers, self).__init__([ 26 | nn.Sequential( 27 | nn.Identity(), 28 | nn.Conv2d(nc, 1, 1, 1, 0, bias=False) 29 | ) for nc in n_channels_list 30 | ]) 31 | 32 | for param in self.parameters(): 33 | param.requires_grad = False 34 | 35 | 36 | class BaseNet(nn.Module): 37 | def __init__(self): 38 | super(BaseNet, self).__init__() 39 | 40 | # register buffer 41 | self.register_buffer( 42 | 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) 43 | self.register_buffer( 44 | 'std', torch.Tensor([.458, .448, .450])[None, :, None, None]) 45 | 46 | def set_requires_grad(self, state: bool): 47 | for param in chain(self.parameters(), self.buffers()): 48 | param.requires_grad = state 49 | 50 | def z_score(self, x: torch.Tensor): 51 | return (x - self.mean) / self.std 52 | 53 | def forward(self, x: torch.Tensor): 54 | x = self.z_score(x) 55 | 56 | output = [] 57 | for i, (_, layer) in enumerate(self.layers._modules.items(), 1): 58 | x = layer(x) 59 | if i in self.target_layers: 60 | output.append(normalize_activation(x)) 61 | if len(output) == len(self.target_layers): 62 | break 63 | return output 64 | 65 | 66 | class SqueezeNet(BaseNet): 67 | def __init__(self): 68 | super(SqueezeNet, self).__init__() 69 | 70 | self.layers = models.squeezenet1_1(True).features 71 | self.target_layers = [2, 5, 8, 10, 11, 12, 13] 72 | self.n_channels_list = [64, 128, 256, 384, 384, 512, 512] 73 | 74 | self.set_requires_grad(False) 75 | 76 | 77 | class AlexNet(BaseNet): 78 | def __init__(self): 79 | super(AlexNet, self).__init__() 80 | 81 | self.layers = models.alexnet(True).features 82 | self.target_layers = [2, 5, 8, 10, 12] 83 | self.n_channels_list = [64, 192, 384, 256, 256] 84 | 85 | self.set_requires_grad(False) 86 | 87 | 88 | class VGG16(BaseNet): 89 | def __init__(self): 90 | super(VGG16, self).__init__() 91 | 92 | self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features 93 | self.target_layers = [4, 9, 16, 23, 30] 94 | self.n_channels_list = [64, 128, 256, 512, 512] 95 | 96 | self.set_requires_grad(False) 97 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/spike_train_distances_curves.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Spike train distance curves : polarity inference measurement, polarity independent measurement and Hamming distances. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Oct. 29th, 2018. 4 | 5 | """ 6 | 7 | import numpy as np 8 | from matplotlib import pyplot as pl 9 | from scipy.interpolate import spline 10 | from event_process import generating_spike_train 11 | from measure_spike_trains import spike_train_displacement_distances, spike_train_removing_distance 12 | 13 | 14 | ### generating spike train. 15 | spike_train = generating_spike_train.random_array(1000000, 1000, ratio=1) 16 | 17 | 18 | ### measure distances of spike trains. 19 | displacement_steps = np.linspace(100, 2000, 20) 20 | pif_distance, pid_distance, hamming_distance = spike_train_displacement_distances(spike_train, displacement_steps, 1000) 21 | 22 | fig = pl.figure() 23 | pl.plot(displacement_steps, hamming_distance, '--', color='blue', markersize=3, linewidth=3, figure=fig, label='Hamming distance') 24 | pl.plot(displacement_steps, pif_distance, '-.', color='limegreen', markersize=3, linewidth=3, figure=fig, label='Polarity inference') 25 | pl.plot(displacement_steps, pid_distance, '-', color='red', markersize=3, linewidth=3, figure=fig, label='Polarity independent') 26 | 27 | font1 = {'family': 'Times New Roman', 'size': 15} 28 | font2 = {'size': 12} 29 | pl.xlabel('Circular shift',font1) 30 | pl.grid(axis='y', linestyle='-.') 31 | pl.ylabel('Distortion', font1) 32 | pl.xlim((0, 2000)) 33 | pl.ylim((0,2100)) 34 | pl.xticks(np.linspace(0, 2000, 6), fontsize=12) 35 | pl.yticks(fontsize=12) 36 | pl.yticks(np.linspace(0, 2000, 6), fontsize=12) 37 | #pl.legend(loc = 0, prop=font2) 38 | pl.legend(loc='upper center', bbox_to_anchor=(0.27, 0.92), prop=font2) 39 | pl.show() 40 | 41 | 42 | ### show spike distances curves in removing spikes. 43 | removing_steps = np.linspace(10, 200, 20) 44 | pif_distance, pid_distance, hamming_distance = spike_train_removing_distance(spike_train, removing_steps, 1000) # sigma = 1000 45 | 46 | fig = pl.figure() 47 | new_steps = np.linspace(10, 199, 10) 48 | smooth = spline(removing_steps, pid_distance, new_steps) 49 | 50 | pl.plot(removing_steps, hamming_distance, '--', color='blue', markersize=3, linewidth=3, figure=fig, label='Hamming distance') 51 | pl.plot(removing_steps, pif_distance, '-.', color='limegreen' ,markersize=3, linewidth=3, figure=fig, label='Polarity inference') 52 | #pl.plot(removing_steps, pid_distance, '-', color='red', markersize=3, linewidth=3, figure=fig, label='Polarity independent') 53 | pl.plot(new_steps, smooth, '-', color='red', markersize=3, linewidth=3, figure=fig, label='Polarity independent') 54 | 55 | font1 = {'family': 'Times New Roman', 'size': 15} 56 | font2 = {'size': 12} 57 | pl.xlabel('Removing spike numbers',font1) 58 | pl.grid(axis='y', linestyle='-.') 59 | pl.ylabel('Distortion', font1) 60 | pl.xlim((0, 200)) 61 | pl.ylim((0,300)) 62 | pl.xticks(np.linspace(0, 200, 5), fontsize=12) 63 | pl.yticks(fontsize=12) 64 | pl.yticks(np.linspace(0, 300, 6), fontsize=12) 65 | pl.legend(loc = 0, prop=font2) 66 | pl.show() 67 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/measure_spike_trains.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Measure distances of spike trains. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Oct. 28th, 2018. 4 | 5 | """ 6 | import numpy as np 7 | from event_process import spike_train_processing 8 | from spike_metric import spike_train_metric 9 | 10 | 11 | def spike_train_displacement_distances(spike_train, displacement_steps, sigma): 12 | """ 13 | spike train distances in cyclic displacement steps. 14 | 15 | Inputs: 16 | ------- 17 | spike_train - numpy.array: the spike train includes spiking firing timestamp. 18 | displacement_steps - the cyclic displacement steps. 19 | sigma - the kernel measure method parameter. 20 | 21 | Outputs: 22 | ------- 23 | pif_distance - polarity interference measurement distance. 24 | pid_distance - polarity independent measurement distance. 25 | hamming_distance - hamming distance. 26 | 27 | """ 28 | pif_distance = np.zeros((len(displacement_steps))) 29 | pid_distance = np.zeros((len(displacement_steps))) 30 | hamming_distance = np.zeros((len(displacement_steps))) 31 | for i, displacement_step in enumerate(displacement_steps): 32 | new_spike_train = spike_train_processing.displacement_spikes(spike_train, displacement_step) 33 | pif_distance[i] = spike_train_metric.cal_dist_pif(spike_train, new_spike_train, sigma) # polarity interference measurement 34 | pid_distance[i] = spike_train_metric.cal_dist_pid(spike_train, new_spike_train, sigma) # polarity independent measurement 35 | hamming_distance[i] = spike_train_metric.hamming_distance(spike_train, new_spike_train) # Hamming distance measurement 36 | 37 | return pif_distance, pid_distance, hamming_distance 38 | 39 | 40 | def spike_train_removing_distance(spike_train, removing_steps, sigma): 41 | """ 42 | spike train distances in removing steps. 43 | 44 | Inputs: 45 | ------- 46 | spike_train - numpy.array: the spike train includes spiking firing timestamp. 47 | removing_steps - the removing steps. 48 | sigma - the kernel measure method parameter. 49 | 50 | Outputs: 51 | ------- 52 | pif_distance - polarity interference measurement distance. 53 | pid_distance - polarity independent measurement distance. 54 | hamming_distance - hamming distance. 55 | 56 | """ 57 | pif_distance = np.zeros((len(removing_steps))) 58 | pid_distance = np.zeros((len(removing_steps))) 59 | hamming_distance = np.zeros((len(removing_steps))) 60 | for i, removing_step in enumerate(removing_steps): 61 | new_spike_train = spike_train_processing.remove_spikes(spike_train, removing_step) 62 | 63 | pif_distance[i] = spike_train_metric.cal_dist_pif(spike_train, new_spike_train, sigma) # polarity interference measurement 64 | pid_distance[i] = spike_train_metric.cal_dist_pid(spike_train, new_spike_train, sigma) # polarity independent measurement 65 | hamming_distance[i] = spike_train_metric.hamming_distance(spike_train, new_spike_train) # Hamming distance measurement 66 | 67 | return pif_distance, pid_distance, hamming_distance 68 | -------------------------------------------------------------------------------- /Event_sensor/src/event_display.py: -------------------------------------------------------------------------------- 1 | # Damien JOUBERT 17-01-2020 2 | import numpy as np 3 | import cv2 4 | 5 | 6 | class EventDisplay(): 7 | """ Structure to handle the thread created by OpenCV to render an image """ 8 | name = "test" # Name of the window 9 | time = 0 # Internal counter of the display (us) 10 | last_frame = 0 # Time of the last frame 11 | frametime = 100000 # Time to refresh the display (us) 12 | time_surface = np.zeros((10, 10), dtype=np.uint64) # Timestamp of the last event in the focal plane 13 | pol_surface = np.zeros((10, 10), dtype=np.uint8) # Polarity of the last event in the focal plane 14 | im = np.zeros((10, 10, 3), dtype=np.uint8) # Image to render 15 | render = 0 # 0: binary image, 1: ts 16 | render_tau = 40000 # tau decay of the time surface (us) 17 | display_time = True 18 | 19 | def __init__(self, name, dx, dy, frametime, render=0): 20 | """ Initialize the Display by reseting the internal timer of the structure and providing the right size of 21 | buffers 22 | Args: 23 | name: name of the windows 24 | dy dx: size of the data 25 | frametime: delay between two frames (us) 26 | render: rendering method: 0 = binary, 1 = timesurface 27 | """ 28 | self.name = name 29 | self.time = 0 30 | self.last_frame = 0 31 | self.frametime = frametime 32 | self.time_surface = np.zeros((int(dy), int(dx)), dtype=np.uint64) 33 | self.pol_surface = np.zeros((int(dy), int(dx)), dtype=np.uint8) 34 | self.im = np.zeros((int(dy), int(dx), 3), dtype=np.uint8) 35 | self.render = 0 36 | self.render_tau = 3 * frametime 37 | 38 | def reset(self): 39 | """ Reset timers and buffers to 0 """ 40 | self.time = 0 41 | self.last_frame = 0 42 | self.time_surface[:] = 0 43 | self.pol_surface[:] = 0 44 | 45 | def update(self, pk, dt): 46 | """ During the time dt, the EventBuffer was created. This function adds these events to the structure and 47 | triggers a display if needed 48 | Args: 49 | pk: EventBuffer 50 | dt: delay since the last update 51 | """ 52 | self.time_surface[pk.y[:pk.i], pk.x[:pk.i]] = pk.ts[:pk.i] 53 | self.pol_surface[pk.y[:pk.i], pk.x[:pk.i]] = pk.p[:pk.i] 54 | self.time += dt 55 | self.last_frame += dt 56 | if self.last_frame > self.frametime: 57 | self.last_frame = 0 58 | self.im[:] = 125 59 | if self.render == 0: 60 | ind = np.where((self.time_surface > self.time - self.frametime) & (self.time_surface <= self.time)) 61 | self.im[:, :, 0][ind] = self.pol_surface[ind]*255 62 | self.im[:, :, 1][ind] = self.pol_surface[ind]*255 63 | self.im[:, :, 2][ind] = self.pol_surface[ind]*255 64 | if self.render == 1: 65 | self.im[:, :, 0] = (self.pol_surface * 2 - 1) * 125 * np.exp(-(self.time - self.time_surface.astype(np.double)) / self.render_tau) 66 | if self.display_time: self.im = cv2.putText(self.im, '{} s'.format(self.time / 1e6), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255)) 67 | cv2.imshow(self.name, self.im) 68 | cv2.waitKey(10) 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /full_eval.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | from argparse import ArgumentParser 14 | 15 | mipnerf360_outdoor_scenes = ["bicycle", "flowers", "garden", "stump", "treehill"] 16 | mipnerf360_indoor_scenes = ["room", "counter", "kitchen", "bonsai"] 17 | tanks_and_temples_scenes = ["truck", "train"] 18 | deep_blending_scenes = ["drjohnson", "playroom"] 19 | 20 | parser = ArgumentParser(description="Full evaluation script parameters") 21 | parser.add_argument("--skip_training", action="store_true") 22 | parser.add_argument("--skip_rendering", action="store_true") 23 | parser.add_argument("--skip_metrics", action="store_true") 24 | parser.add_argument("--output_path", default="./eval") 25 | args, _ = parser.parse_known_args() 26 | 27 | all_scenes = [] 28 | all_scenes.extend(mipnerf360_outdoor_scenes) 29 | all_scenes.extend(mipnerf360_indoor_scenes) 30 | all_scenes.extend(tanks_and_temples_scenes) 31 | all_scenes.extend(deep_blending_scenes) 32 | 33 | if not args.skip_training or not args.skip_rendering: 34 | parser.add_argument('--mipnerf360', "-m360", required=True, type=str) 35 | parser.add_argument("--tanksandtemples", "-tat", required=True, type=str) 36 | parser.add_argument("--deepblending", "-db", required=True, type=str) 37 | args = parser.parse_args() 38 | 39 | if not args.skip_training: 40 | common_args = " --quiet --eval --test_iterations -1 " 41 | for scene in mipnerf360_outdoor_scenes: 42 | source = args.mipnerf360 + "/" + scene 43 | os.system("python train.py -s " + source + " -i images_4 -m " + args.output_path + "/" + scene + common_args) 44 | for scene in mipnerf360_indoor_scenes: 45 | source = args.mipnerf360 + "/" + scene 46 | os.system("python train.py -s " + source + " -i images_2 -m " + args.output_path + "/" + scene + common_args) 47 | for scene in tanks_and_temples_scenes: 48 | source = args.tanksandtemples + "/" + scene 49 | os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) 50 | for scene in deep_blending_scenes: 51 | source = args.deepblending + "/" + scene 52 | os.system("python train.py -s " + source + " -m " + args.output_path + "/" + scene + common_args) 53 | 54 | if not args.skip_rendering: 55 | all_sources = [] 56 | for scene in mipnerf360_outdoor_scenes: 57 | all_sources.append(args.mipnerf360 + "/" + scene) 58 | for scene in mipnerf360_indoor_scenes: 59 | all_sources.append(args.mipnerf360 + "/" + scene) 60 | for scene in tanks_and_temples_scenes: 61 | all_sources.append(args.tanksandtemples + "/" + scene) 62 | for scene in deep_blending_scenes: 63 | all_sources.append(args.deepblending + "/" + scene) 64 | 65 | common_args = " --quiet --eval --skip_train" 66 | for scene, source in zip(all_scenes, all_sources): 67 | os.system("python render.py --iteration 7000 -s " + source + " -m " + args.output_path + "/" + scene + common_args) 68 | os.system("python render.py --iteration 30000 -s " + source + " -m " + args.output_path + "/" + scene + common_args) 69 | 70 | if not args.skip_metrics: 71 | scenes_string = "" 72 | for scene in all_scenes: 73 | scenes_string += "\"" + args.output_path + "/" + scene + "\" " 74 | 75 | os.system("python metrics.py -m " + scenes_string) -------------------------------------------------------------------------------- /arguments/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from argparse import ArgumentParser, Namespace 13 | import sys 14 | import os 15 | 16 | class GroupParams: 17 | pass 18 | 19 | class ParamGroup: 20 | def __init__(self, parser: ArgumentParser, name : str, fill_none = False): 21 | group = parser.add_argument_group(name) 22 | for key, value in vars(self).items(): 23 | shorthand = False 24 | if key.startswith("_"): 25 | shorthand = True 26 | key = key[1:] 27 | t = type(value) 28 | value = value if not fill_none else None 29 | if shorthand: 30 | if t == bool: 31 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true") 32 | else: 33 | group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t) 34 | else: 35 | if t == bool: 36 | group.add_argument("--" + key, default=value, action="store_true") 37 | else: 38 | group.add_argument("--" + key, default=value, type=t) 39 | 40 | def extract(self, args): 41 | group = GroupParams() 42 | for arg in vars(args).items(): 43 | if arg[0] in vars(self) or ("_" + arg[0]) in vars(self): 44 | setattr(group, arg[0], arg[1]) 45 | return group 46 | 47 | class ModelParams(ParamGroup): 48 | def __init__(self, parser, sentinel=False): 49 | self.sh_degree = 3 50 | self._source_path = "" 51 | self._model_path = "" 52 | self._images = "images" 53 | self._resolution = -1 54 | self._white_background = False 55 | self.data_device = "cuda" 56 | self.eval = False 57 | self.gray = False 58 | self.random = False 59 | self.event = False 60 | self.deblur = False 61 | super().__init__(parser, "Loading Parameters", sentinel) 62 | 63 | def extract(self, args): 64 | g = super().extract(args) 65 | g.source_path = os.path.abspath(g.source_path) 66 | return g 67 | 68 | class PipelineParams(ParamGroup): 69 | def __init__(self, parser): 70 | self.convert_SHs_python = False 71 | self.compute_cov3D_python = False 72 | self.debug = False 73 | super().__init__(parser, "Pipeline Parameters") 74 | 75 | class OptimizationParams(ParamGroup): 76 | def __init__(self, parser): 77 | self.iterations = 30_000 78 | self.position_lr_init = 0.00016 79 | self.position_lr_final = 0.0000016 80 | self.position_lr_delay_mult = 0.01 81 | self.position_lr_max_steps = 30_000 82 | self.feature_lr = 0.0025 83 | self.opacity_lr = 0.05 84 | self.scaling_lr = 0.005 85 | self.rotation_lr = 0.001 86 | self.percent_dense = 0.01 87 | self.lambda_dssim = 0.2 88 | self.densification_interval = 100 89 | self.opacity_reset_interval = 3000 90 | self.densify_from_iter = 500 91 | self.densify_until_iter = 15_000 92 | self.densify_grad_threshold = 0.0002 93 | self.random_background = False 94 | super().__init__(parser, "Optimization Parameters") 95 | 96 | def get_combined_args(parser : ArgumentParser): 97 | cmdlne_string = sys.argv[1:] 98 | cfgfile_string = "Namespace()" 99 | args_cmdline = parser.parse_args(cmdlne_string) 100 | 101 | try: 102 | cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args") 103 | print("Looking for config file in", cfgfilepath) 104 | with open(cfgfilepath) as cfg_file: 105 | print("Config file found: {}".format(cfgfilepath)) 106 | cfgfile_string = cfg_file.read() 107 | except TypeError: 108 | print("Config file not found at") 109 | pass 110 | args_cfgfile = eval(cfgfile_string) 111 | 112 | merged_dict = vars(args_cfgfile).copy() 113 | for k,v in vars(args_cmdline).items(): 114 | if v != None: 115 | merged_dict[k] = v 116 | return Namespace(**merged_dict) 117 | -------------------------------------------------------------------------------- /metrics.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from pathlib import Path 13 | import os 14 | from PIL import Image 15 | import torch 16 | import torchvision.transforms.functional as tf 17 | from utils.loss_utils import ssim 18 | from lpipsPyTorch import lpips 19 | import json 20 | from tqdm import tqdm 21 | from utils.image_utils import psnr 22 | from argparse import ArgumentParser 23 | 24 | def readImages(renders_dir, gt_dir): 25 | renders = [] 26 | gts = [] 27 | image_names = [] 28 | for fname in os.listdir(renders_dir): 29 | render = Image.open(renders_dir / fname) 30 | gt = Image.open(gt_dir / fname) 31 | renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda()) 32 | gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda()) 33 | image_names.append(fname) 34 | return renders, gts, image_names 35 | 36 | def evaluate(model_paths): 37 | 38 | full_dict = {} 39 | per_view_dict = {} 40 | full_dict_polytopeonly = {} 41 | per_view_dict_polytopeonly = {} 42 | print("") 43 | 44 | for scene_dir in model_paths: 45 | try: 46 | print("Scene:", scene_dir) 47 | full_dict[scene_dir] = {} 48 | per_view_dict[scene_dir] = {} 49 | full_dict_polytopeonly[scene_dir] = {} 50 | per_view_dict_polytopeonly[scene_dir] = {} 51 | 52 | test_dir = Path(scene_dir) / "test" 53 | 54 | for method in os.listdir(test_dir): 55 | print("Method:", method) 56 | 57 | full_dict[scene_dir][method] = {} 58 | per_view_dict[scene_dir][method] = {} 59 | full_dict_polytopeonly[scene_dir][method] = {} 60 | per_view_dict_polytopeonly[scene_dir][method] = {} 61 | 62 | method_dir = test_dir / method 63 | gt_dir = method_dir/ "gt" 64 | renders_dir = method_dir / "renders" 65 | renders, gts, image_names = readImages(renders_dir, gt_dir) 66 | 67 | ssims = [] 68 | psnrs = [] 69 | lpipss = [] 70 | 71 | for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"): 72 | ssims.append(ssim(renders[idx], gts[idx])) 73 | psnrs.append(psnr(renders[idx], gts[idx])) 74 | lpipss.append(lpips(renders[idx], gts[idx], net_type='vgg')) 75 | 76 | print(" SSIM : {:>12.7f}".format(torch.tensor(ssims).mean(), ".5")) 77 | print(" PSNR : {:>12.7f}".format(torch.tensor(psnrs).mean(), ".5")) 78 | print(" LPIPS: {:>12.7f}".format(torch.tensor(lpipss).mean(), ".5")) 79 | print("") 80 | 81 | full_dict[scene_dir][method].update({"SSIM": torch.tensor(ssims).mean().item(), 82 | "PSNR": torch.tensor(psnrs).mean().item(), 83 | "LPIPS": torch.tensor(lpipss).mean().item()}) 84 | per_view_dict[scene_dir][method].update({"SSIM": {name: ssim for ssim, name in zip(torch.tensor(ssims).tolist(), image_names)}, 85 | "PSNR": {name: psnr for psnr, name in zip(torch.tensor(psnrs).tolist(), image_names)}, 86 | "LPIPS": {name: lp for lp, name in zip(torch.tensor(lpipss).tolist(), image_names)}}) 87 | 88 | with open(scene_dir + "/results.json", 'w') as fp: 89 | json.dump(full_dict[scene_dir], fp, indent=True) 90 | with open(scene_dir + "/per_view.json", 'w') as fp: 91 | json.dump(per_view_dict[scene_dir], fp, indent=True) 92 | except: 93 | print("Unable to compute metrics for model", scene_dir) 94 | 95 | if __name__ == "__main__": 96 | device = torch.device("cuda:0") 97 | torch.cuda.set_device(device) 98 | 99 | # Set up command line argument parser 100 | parser = ArgumentParser(description="Training script parameters") 101 | parser.add_argument('--model_paths', '-m', required=True, nargs="+", type=str, default=[]) 102 | args = parser.parse_args() 103 | evaluate(args.model_paths) 104 | -------------------------------------------------------------------------------- /utils/general_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import torch 13 | import sys 14 | from datetime import datetime 15 | import numpy as np 16 | import random 17 | 18 | def inverse_sigmoid(x): 19 | return torch.log(x/(1-x)) 20 | 21 | def PILtoTorch(pil_image, resolution): 22 | resized_image_PIL = pil_image.resize(resolution) 23 | resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0 24 | if len(resized_image.shape) == 3: 25 | return resized_image.permute(2, 0, 1) 26 | else: 27 | return resized_image.unsqueeze(dim=-1).permute(2, 0, 1) 28 | 29 | def get_expon_lr_func( 30 | lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 31 | ): 32 | """ 33 | Copied from Plenoxels 34 | 35 | Continuous learning rate decay function. Adapted from JaxNeRF 36 | The returned rate is lr_init when step=0 and lr_final when step=max_steps, and 37 | is log-linearly interpolated elsewhere (equivalent to exponential decay). 38 | If lr_delay_steps>0 then the learning rate will be scaled by some smooth 39 | function of lr_delay_mult, such that the initial learning rate is 40 | lr_init*lr_delay_mult at the beginning of optimization but will be eased back 41 | to the normal learning rate when steps>lr_delay_steps. 42 | :param conf: config subtree 'lr' or similar 43 | :param max_steps: int, the number of steps during optimization. 44 | :return HoF which takes step as input 45 | """ 46 | 47 | def helper(step): 48 | if step < 0 or (lr_init == 0.0 and lr_final == 0.0): 49 | # Disable this parameter 50 | return 0.0 51 | if lr_delay_steps > 0: 52 | # A kind of reverse cosine decay. 53 | delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( 54 | 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1) 55 | ) 56 | else: 57 | delay_rate = 1.0 58 | t = np.clip(step / max_steps, 0, 1) 59 | log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) 60 | return delay_rate * log_lerp 61 | 62 | return helper 63 | 64 | def strip_lowerdiag(L): 65 | uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda") 66 | 67 | uncertainty[:, 0] = L[:, 0, 0] 68 | uncertainty[:, 1] = L[:, 0, 1] 69 | uncertainty[:, 2] = L[:, 0, 2] 70 | uncertainty[:, 3] = L[:, 1, 1] 71 | uncertainty[:, 4] = L[:, 1, 2] 72 | uncertainty[:, 5] = L[:, 2, 2] 73 | return uncertainty 74 | 75 | def strip_symmetric(sym): 76 | return strip_lowerdiag(sym) 77 | 78 | def build_rotation(r): 79 | norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3]) 80 | 81 | q = r / norm[:, None] 82 | 83 | R = torch.zeros((q.size(0), 3, 3), device='cuda') 84 | 85 | r = q[:, 0] 86 | x = q[:, 1] 87 | y = q[:, 2] 88 | z = q[:, 3] 89 | 90 | R[:, 0, 0] = 1 - 2 * (y*y + z*z) 91 | R[:, 0, 1] = 2 * (x*y - r*z) 92 | R[:, 0, 2] = 2 * (x*z + r*y) 93 | R[:, 1, 0] = 2 * (x*y + r*z) 94 | R[:, 1, 1] = 1 - 2 * (x*x + z*z) 95 | R[:, 1, 2] = 2 * (y*z - r*x) 96 | R[:, 2, 0] = 2 * (x*z - r*y) 97 | R[:, 2, 1] = 2 * (y*z + r*x) 98 | R[:, 2, 2] = 1 - 2 * (x*x + y*y) 99 | return R 100 | 101 | def build_scaling_rotation(s, r): 102 | L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda") 103 | R = build_rotation(r) 104 | 105 | L[:,0,0] = s[:,0] 106 | L[:,1,1] = s[:,1] 107 | L[:,2,2] = s[:,2] 108 | 109 | L = R @ L 110 | return L 111 | 112 | def safe_state(silent): 113 | old_f = sys.stdout 114 | class F: 115 | def __init__(self, silent): 116 | self.silent = silent 117 | 118 | def write(self, x): 119 | if not self.silent: 120 | if x.endswith("\n"): 121 | old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S"))))) 122 | else: 123 | old_f.write(x) 124 | 125 | def flush(self): 126 | old_f.flush() 127 | 128 | sys.stdout = F(silent) 129 | 130 | random.seed(0) 131 | np.random.seed(0) 132 | torch.manual_seed(0) 133 | torch.cuda.set_device(torch.device("cuda:0")) 134 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/event_process/events_to_frames.py: -------------------------------------------------------------------------------- 1 | """ 2 | Funciton: events to frames or videos in dynamic vision sensor(DVS). 3 | Author information: Jianing Li, lijianing@pku.edu.cn, Peking University, May 14th, 2018. 4 | 5 | """ 6 | 7 | import numpy 8 | import cv2 9 | import numpy as np 10 | import math 11 | 12 | 13 | def spike_time_to_image(dvs_data, spike_time, timespan, fps, width, height): 14 | """ 15 | events generate RGB video based on rate-based: spike time. 16 | 17 | Inputs: 18 | ------- 19 | dvs_data - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 20 | spike_time - the maximum timestamp in events streams. 21 | spike_time - the integral time interval. 22 | fps - the frames per second. 23 | width - the width of AER sensor. 24 | height - the height of AER sensor. 25 | 26 | Outputs: 27 | ------ 28 | video - video includes multi-frames. 29 | 30 | """ 31 | spike_frame = numpy.zeros([height, width, 3]) # RGB background---black 32 | fourcc = cv2.VideoWriter_fourcc(*'XVID') 33 | VideoWriter = cv2.VideoWriter('timesVideo.avi', fourcc, fps, (width, height), True) # DAVIS128 Width*Height:128*128 34 | 35 | """Read while aedat file: spike map in RGB frame, data.x is x coordinate, data.y is y coordinate""" 36 | for i in range(0, spike_time, timespan): 37 | for j in range(len(dvs_data.t)): 38 | if dvs_data.ts[j] >= i and dvs_data.ts[j] <= i+timespan: 39 | if dvs_data.t[j] == 1: 40 | spike_frame[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1), :] = [0, 0, 255] # positive---read 41 | 42 | if dvs_data.t[j] == 0: 43 | spike_frame[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1), :] = [0, 255, 0] # negative---green 44 | 45 | spike_frame = spike_frame.astype('uint8') # float to uint8 46 | VideoWriter.write(spike_frame) 47 | 48 | cv2.imwrite('upsampling.jpg',spike_frame) 49 | 50 | VideoWriter.release() 51 | 52 | def spikes_to_images(dvs_data, width, height): 53 | """Function: spike in timespan to reconstuct three RGB video""" 54 | spike_frame_on = np.zeros([height, width, 3]) # RGB background---black 55 | spike_frame_off = np.zeros([height, width, 3]) 56 | spike_frame = np.zeros([height, width, 3]) 57 | spike_number_on = np.zeros((height, width)) # spike frame initial in time interval 58 | spike_number_off = np.zeros((height, width)) 59 | spike_on = np.zeros((height, width)) # single chanel information 60 | spike_off = np.zeros((height, width)) 61 | spike_number = np.zeros((height, width)) 62 | 63 | """Read while aedat file: spike map in RGB frame, data.x is x coordinate, data.y is y coordinate""" 64 | for j in range(len(dvs_data.t)): 65 | spike_number[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1)] += 1 66 | if dvs_data.t[j] == 1: 67 | spike_number_on[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1)] += 1 68 | spike_frame_on[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1), :] = [0, 0, 255] # positive---read 69 | spike_on[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1)] = 255 70 | 71 | if dvs_data.t[j] == 0: 72 | spike_number_off[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1)] += 1 73 | spike_frame_off[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1), :] = [0, 255, 0] # negative---green 74 | spike_off[int(dvs_data.x[j] - 1), int(dvs_data.y[j] - 1)] = 255 75 | 76 | for n in range(width): 77 | for m in range(height): 78 | if spike_number_on[m - 1, n - 1] + spike_number_off[ 79 | m - 1, n - 1] == 0: # threshold can be used as the filter 80 | spike_frame[m - 1, n - 1, :] = [255, 255, 255] # gray backgroud 81 | # spike_frame[m-1, n-1, :] = [0, 0, 0] #gray backgroud 82 | 83 | else: 84 | if spike_number_on[m - 1, n - 1] >= spike_number_off[m, n]: 85 | spike_frame[m - 1, n - 1, :] = [0, 0, 255] # positive---read 86 | 87 | else: 88 | spike_frame[m - 1, n - 1, :] = [0, 255, 0] # negative---green 89 | 90 | spike_count = spike_number.reshape(width * height, 1) 91 | for k in range(len(spike_count) - 1): 92 | if spike_count[k] == 0: 93 | spike_count[k] = 128 94 | else: 95 | spike_count[k] = 255 * (1 / (1 + math.exp(-1 / 2 * spike_count[k]))) 96 | gray_frame = spike_count.reshape(height, width) 97 | gray_frame = gray_frame.astype('uint8') # float to uint8 98 | 99 | 100 | return spike_frame -------------------------------------------------------------------------------- /utils/sh_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 The PlenOctree Authors. 2 | # Redistribution and use in source and binary forms, with or without 3 | # modification, are permitted provided that the following conditions are met: 4 | # 5 | # 1. Redistributions of source code must retain the above copyright notice, 6 | # this list of conditions and the following disclaimer. 7 | # 8 | # 2. Redistributions in binary form must reproduce the above copyright notice, 9 | # this list of conditions and the following disclaimer in the documentation 10 | # and/or other materials provided with the distribution. 11 | # 12 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 13 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 16 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 18 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 19 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 20 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 21 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 22 | # POSSIBILITY OF SUCH DAMAGE. 23 | 24 | import torch 25 | 26 | C0 = 0.28209479177387814 27 | C1 = 0.4886025119029199 28 | C2 = [ 29 | 1.0925484305920792, 30 | -1.0925484305920792, 31 | 0.31539156525252005, 32 | -1.0925484305920792, 33 | 0.5462742152960396 34 | ] 35 | C3 = [ 36 | -0.5900435899266435, 37 | 2.890611442640554, 38 | -0.4570457994644658, 39 | 0.3731763325901154, 40 | -0.4570457994644658, 41 | 1.445305721320277, 42 | -0.5900435899266435 43 | ] 44 | C4 = [ 45 | 2.5033429417967046, 46 | -1.7701307697799304, 47 | 0.9461746957575601, 48 | -0.6690465435572892, 49 | 0.10578554691520431, 50 | -0.6690465435572892, 51 | 0.47308734787878004, 52 | -1.7701307697799304, 53 | 0.6258357354491761, 54 | ] 55 | 56 | 57 | def eval_sh(deg, sh, dirs): 58 | """ 59 | Evaluate spherical harmonics at unit directions 60 | using hardcoded SH polynomials. 61 | Works with torch/np/jnp. 62 | ... Can be 0 or more batch dimensions. 63 | Args: 64 | deg: int SH deg. Currently, 0-3 supported 65 | sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2] 66 | dirs: jnp.ndarray unit directions [..., 3] 67 | Returns: 68 | [..., C] 69 | """ 70 | assert deg <= 4 and deg >= 0 71 | coeff = (deg + 1) ** 2 72 | assert sh.shape[-1] >= coeff 73 | 74 | result = C0 * sh[..., 0] 75 | if deg > 0: 76 | x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3] 77 | result = (result - 78 | C1 * y * sh[..., 1] + 79 | C1 * z * sh[..., 2] - 80 | C1 * x * sh[..., 3]) 81 | 82 | if deg > 1: 83 | xx, yy, zz = x * x, y * y, z * z 84 | xy, yz, xz = x * y, y * z, x * z 85 | result = (result + 86 | C2[0] * xy * sh[..., 4] + 87 | C2[1] * yz * sh[..., 5] + 88 | C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] + 89 | C2[3] * xz * sh[..., 7] + 90 | C2[4] * (xx - yy) * sh[..., 8]) 91 | 92 | if deg > 2: 93 | result = (result + 94 | C3[0] * y * (3 * xx - yy) * sh[..., 9] + 95 | C3[1] * xy * z * sh[..., 10] + 96 | C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] + 97 | C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] + 98 | C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] + 99 | C3[5] * z * (xx - yy) * sh[..., 14] + 100 | C3[6] * x * (xx - 3 * yy) * sh[..., 15]) 101 | 102 | if deg > 3: 103 | result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] + 104 | C4[1] * yz * (3 * xx - yy) * sh[..., 17] + 105 | C4[2] * xy * (7 * zz - 1) * sh[..., 18] + 106 | C4[3] * yz * (7 * zz - 3) * sh[..., 19] + 107 | C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] + 108 | C4[5] * xz * (7 * zz - 3) * sh[..., 21] + 109 | C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] + 110 | C4[7] * xz * (xx - 3 * yy) * sh[..., 23] + 111 | C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24]) 112 | return result 113 | 114 | def RGB2SH(rgb): 115 | return (rgb - 0.5) / C0 116 | 117 | def SH2RGB(sh): 118 | return sh * C0 + 0.5 -------------------------------------------------------------------------------- /dataset_utils/tummie.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | # def copy_last_100_images(source_folder, destination_folder): 5 | # # 获取源文件夹中所有图片文件的路径 6 | # image_files = [file for file in os.listdir(source_folder) if file.endswith('.jpg')] 7 | # # 获取最后100张图片的文件名 8 | # last_100_images = image_files[-100:] 9 | 10 | # # 遍历最后100张图片,复制到目标文件夹并重新命名 11 | # for index, image_file in enumerate(last_100_images): 12 | # source_path = os.path.join(source_folder, image_file) 13 | # destination_path = os.path.join(destination_folder, f"{index:05d}.jpg") # 使用0填充的数字命名 14 | # shutil.copyfile(source_path, destination_path) 15 | 16 | 17 | # def extract_last_100_lines(input_file, output_file): 18 | # with open(input_file, 'r') as f: 19 | # lines = f.readlines() 20 | # last_100_lines = lines[-100:] 21 | 22 | # with open(output_file, 'w') as f: 23 | # f.writelines(last_100_lines) 24 | def copy_last_100_images(source_folder, destination_folder): 25 | # 获取源文件夹中所有图片文件的路径 26 | image_files = [file for file in os.listdir(source_folder)] 27 | # 获取最后100张图片的文件名 28 | last_100_images = image_files[-100:] 29 | 30 | # 遍历最后100张图片,复制到目标文件夹并重新命名 31 | for index, image_file in enumerate(last_100_images): 32 | source_path = os.path.join(source_folder, image_file) 33 | destination_path = os.path.join(destination_folder, f"{index:05d}.png") # 使用0填充的数字命名 34 | shutil.copyfile(source_path, destination_path) 35 | 36 | 37 | def extract_last_100_lines(input_file, output_file): 38 | with open(input_file, 'r') as f: 39 | lines = f.readlines() 40 | last_100_lines = lines[-100:] 41 | 42 | with open(output_file, 'w') as f: 43 | f.writelines(last_100_lines) 44 | def copy_first_100_images(source_folder, destination_folder): 45 | # 获取源文件夹中所有图片文件的路径 46 | image_files = [file for file in os.listdir(source_folder)] 47 | # 获取最后100张图片的文件名 48 | last_100_images = image_files[0:100] 49 | 50 | # 遍历最后100张图片,复制到目标文件夹并重新命名 51 | for index, image_file in enumerate(last_100_images): 52 | source_path = os.path.join(source_folder, image_file) 53 | destination_path = os.path.join(destination_folder, f"{index:05d}.png") # 使用0填充的数字命名 54 | shutil.copyfile(source_path, destination_path) 55 | 56 | 57 | def extract_first_100_lines(input_file, output_file): 58 | with open(input_file, 'r') as f: 59 | lines = f.readlines() 60 | last_100_lines = lines[0:100] 61 | 62 | with open(output_file, 'w') as f: 63 | f.writelines(last_100_lines) 64 | 65 | 66 | # 指定源文件夹和目标文件夹的路径 67 | source_folder = "D:/2024/3DGS/PureEventFilter/data/dynamic_translation_colmap_easy/images" 68 | destination_folder = "D:/2024/3DGS/PureEventFilter/data/dynamic_translation_colmap_easy/renders" 69 | 70 | # 指定输入文件和输出文件的路径 71 | input_file = "D:/2024/3DGS/PureEventFilter/data/dynamic_translation_colmap_easy/images.txt" 72 | output_file = "D:/2024/3DGS/PureEventFilter/data/dynamic_translation_colmap_easy/image_timestamps_old.txt" 73 | 74 | # 调用函数提取输入文件的最后100行到输出文件中 75 | # copy_first_100_images(source_folder, destination_folder) 76 | # extract_first_100_lines(input_file, output_file) 77 | copy_last_100_images(source_folder, destination_folder) 78 | extract_last_100_lines(input_file, output_file) 79 | def copy_lines_until_threshold(input_file, output_file, threshold): 80 | with open(input_file, 'r') as f_in, open(output_file, 'w') as f_out: 81 | for line in f_in: 82 | # 将每一行的第一个数字提取出来并乘以 1e6,然后转换为整数 83 | first_number = int(float(line.split()[0]) * 1e6) 84 | # 如果第一个数字小于等于阈值,则将该行写入输出文件 85 | if first_number <= threshold: 86 | # 将第一个数字重新转换为字符串,然后将整行写入输出文件 87 | line_parts = line.split() 88 | line_parts[0] = str(first_number) 89 | f_out.write(' '.join(line_parts) + '\n') 90 | # 如果第一个数字大于阈值,则跳出循环 91 | else: 92 | break 93 | def copy_lines_after_threshold(input_file, output_file, threshold): 94 | with open(input_file, 'r') as f_in, open(output_file, 'w') as f_out: 95 | copy = False 96 | for line in f_in: 97 | # 提取每行的第一个数字并乘以 1e6,然后转换为整数 98 | first_number = int(float(line.split()[0]) * 1e6) 99 | # 检查第一个数字是否大于阈值 100 | if first_number > threshold: 101 | copy = True 102 | # 如果当前行的第一个数字超过阈值,则开始写入输出文件 103 | if copy: 104 | # 将第一个数字重新转换为字符串,然后将整行写入输出文件 105 | line_parts = line.split() 106 | line_parts[0] = str(first_number) 107 | f_out.write(' '.join(line_parts) + '\n') 108 | 109 | # 指定输入文件路径 110 | input_file = 'D:/2024/3DGS/PureEventFilter/data/dynamic_translation_colmap_easy/events.txt' 111 | # 指定输出文件路径 112 | output_file = 'D:/2024/3DGS/PureEventFilter/data/dynamic_translation_colmap_easy/calibration_volt.txt' 113 | # 指定阈值 114 | threshold = 55 * 1e6 # 你可以将阈值替换为你想要的值 115 | 116 | # # 调用函数进行文件内容复制 117 | # copy_lines_until_threshold(input_file, output_file, threshold) 118 | copy_lines_after_threshold(input_file, output_file, threshold) 119 | 120 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/event_process/read_dvs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Read MNIST_DVS data. 3 | Author information: Jianing Li, lijianing@pku.edu.cn, Peking University, Apr 16th, 2018 4 | code based: http://www2.imse-cnm.csic.es/caviar/MNISTDVS.html 5 | 6 | """ 7 | 8 | import numpy as np 9 | import math 10 | import codecs 11 | import scipy.io 12 | 13 | class aefile(object): 14 | def __init__(self, filename, max_events=1e6): 15 | self.filename = filename 16 | self.max_events = max_events 17 | self.header = [] 18 | self.data, self.timestamp = self.read() 19 | 20 | # alias for read 21 | def load(self): 22 | return self.read() 23 | 24 | def read(self): 25 | with open(self.filename, 'rb') as f: 26 | line = f.readline() 27 | line = str(line, encoding = 'gbk') # bytes to str 28 | headline = line[0] 29 | 30 | while headline == '#': 31 | self.header.append(line) 32 | if line[0:9] == '#!AER-DAT': 33 | global aer_version 34 | aer_version = line[9] 35 | current = f.tell() 36 | line = f.readline() 37 | headline = chr(line[0]) 38 | # if aer_version != '2': 39 | # raise Exception('Invalid AER version. Expected 2, got %s' % aer_version) 40 | 41 | f.seek(0, 2) 42 | global numEvents 43 | numEvents = math.floor((f.tell() - current) / 8) 44 | 45 | if numEvents > self.max_events: 46 | print('There are %i events, but max_events is set to %i. Will only use %i events.' % ( 47 | numEvents, self.max_events, self.max_events)) 48 | numEvents = self.max_events 49 | 50 | f.seek(current, 0) 51 | timestamps = np.zeros(numEvents) 52 | data = np.zeros(numEvents) 53 | 54 | for i in range(int(numEvents)): 55 | hexlify = codecs.getencoder('hex') 56 | data[i] = int(hexlify(f.read(4))[0], 16) 57 | timestamps[i] = int(hexlify(f.read(4))[0], 16) 58 | 59 | return data, timestamps 60 | 61 | def save(self, data=None, filename=None, ext='aedat'): 62 | if filename is None: 63 | filename = self.filename 64 | if data is None: 65 | data = aedata(self) 66 | if ext is 'aedat': 67 | # unpack events data 68 | ts = data.ts 69 | data = data.pack() 70 | 71 | with open(filename, 'wb') as f: 72 | # save the head file 73 | for item in self.header: 74 | if type(item) == str: 75 | item = bytes(item, encoding='utf-8') # str to bytes 76 | f.write(item) 77 | 78 | # save events data 79 | no_items = len(data) 80 | for i in range(no_items): 81 | f.write(bytes.fromhex(hex(int(data[i]))[2:].zfill(8))) 82 | f.write(bytes.fromhex(hex(int(ts[i]))[2:].zfill(8))) 83 | 84 | def unpack(self): 85 | noData = len(self.data) 86 | 87 | x = np.zeros(noData) 88 | y = np.zeros(noData) 89 | t = np.zeros(noData) 90 | 91 | for i in range(noData): 92 | d = int(self.data[i]) 93 | 94 | t[i] = d & 0x1 95 | x[i] = 128-((d >> 0x1) & 0x7F) 96 | y[i] = (d >> 0x8) & 0x7F 97 | return x,y,t 98 | 99 | 100 | class aedata(object): 101 | def __init__(self, ae_file=None): 102 | self.dimensions = (128, 128) 103 | if isinstance(ae_file, aefile): 104 | self.x, self.y, self.t = ae_file.unpack() 105 | self.ts = ae_file.timestamp 106 | elif isinstance(ae_file, aedata): 107 | self.x, self.y, self.t = aedata.x, aedata.y, aedata.t 108 | self.ts = ae_file.ts 109 | else: 110 | self.x, self.y, self.t, self.ts = np.array([]), np.array([]), np.array([]), np.array([]) 111 | 112 | def __getitem__(self, item): 113 | rtn = aedata() 114 | rtn.x = self.x[item] 115 | rtn.y = self.y[item] 116 | rtn.t = self.t[item] 117 | rtn.ts = self.ts[item] 118 | return rtn 119 | 120 | def __setitem__(self, key, value): 121 | self.x[key] = value.x 122 | self.y[key] = value.y 123 | self.t[key] = value.t 124 | self.ts[key] = value.ts 125 | 126 | def __delitem__(self, key): 127 | self.x = np.delete(self.x, key) 128 | self.y = np.delete(self.y, key) 129 | self.t = np.delete(self.t, key) 130 | self.ts = np.delete(self.ts, key) 131 | 132 | def save_to_mat(self, filename): 133 | scipy.io.savemat(filename, {'X': self.x, 'Y': self.y, 't': self.t, 'ts': self.ts}) 134 | 135 | def pack(self): 136 | noData = len(self.x) 137 | packed = np.zeros(noData) 138 | for i in range(noData): 139 | packed[i] = (int(self.t[i]) & 0x1) 140 | packed[i] += (int(128 - self.x[i]) & 0x7F) << 0x1 141 | packed[i] += (int(self.y[i]) & 0x7F) << 0x8 142 | 143 | return packed -------------------------------------------------------------------------------- /Event_sensor/src/arbiter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | from event_buffer import EventBuffer 4 | 5 | 6 | class BottleNeckArbiter(): 7 | t_per_event = 0.1 # Time spent to process one event (us) 8 | ev_acc = EventBuffer(0) # Events accumulated 9 | time = 0 # Current time (us) 10 | 11 | def __init__(self, t_per_event, time): 12 | """ Init the sensor by creating the Blender Camera 13 | Args: 14 | t_per_event: # Time spent to process one event (us) 15 | time: starting time (us) 16 | """ 17 | self.t_per_event = t_per_event 18 | self.time = time 19 | 20 | def process(self, new_ev, dt): 21 | """ 22 | Args: 23 | new_event: incomming events as EventBuffer 24 | dt: time since the last update (us) 25 | """ 26 | tps_process = float(self.t_per_event) * (self.ev_acc.i + new_ev.i) 27 | self.time = self.time + dt 28 | release_ev = EventBuffer(0) 29 | if tps_process == 0: 30 | return release_ev 31 | self.ev_acc.increase_ev(new_ev) 32 | nb_event_pross = min(self.ev_acc.i, int(dt / tps_process)) 33 | delta = 0 34 | if self.ev_acc.ts[0] < self.time - dt: 35 | delta = self.time - dt - self.ev_acc.ts[0] 36 | release_ev.add_array(self.ev_acc.ts[:nb_event_pross] + delta + tps_process * np.arange(0, nb_event_pross, 1), 37 | self.ev_acc.y[:nb_event_pross], 38 | self.ev_acc.x[:nb_event_pross], 39 | self.ev_acc.p[:nb_event_pross]) 40 | self.ev_acc.remove_elt(nb_event_pross) 41 | return release_ev 42 | 43 | 44 | class RowArbiter(): 45 | t_per_event = 0.1 # Time spent to process one event (us) 46 | ev_acc = EventBuffer(0) # Events accumulated 47 | time = 0 # Current time (us) 48 | def __init__(self, t_per_event, time): 49 | """ Init the sensor by creating the Blender Camera 50 | Args: 51 | t_per_event: # Time spent to process one event (us) 52 | time: starting time (us) 53 | """ 54 | self.t_per_event = t_per_event 55 | self.time = time 56 | 57 | def process(self, new_ev, dt): 58 | """ 59 | Args: 60 | new_event: incomming events as EventBuffer 61 | dt: time since the last update (us) 62 | """ 63 | tps_process = float(self.t_per_event) * (self.ev_acc.i + new_ev.i) 64 | self.time = self.time + dt 65 | release_ev = EventBuffer(0) 66 | if tps_process == 0: 67 | return release_ev 68 | self.ev_acc.increase_ev(new_ev) 69 | nb_event_pross = int(dt / tps_process) 70 | i = 0 71 | delta = 0 72 | if self.ev_acc.ts[0] < self.time - dt: 73 | delta = self.time - dt - self.ev_acc.ts[0] 74 | while self.ev_acc.i > 0 and self.ev_acc.ts[0] <= self.time and nb_event_pross > i: 75 | i += 1 76 | ind = np.where((self.ev_acc.y == self.ev_acc.y[0])&(self.ev_acc.ts <= self.time)) 77 | ts_inter = np.full(ind[0].shape, self.ev_acc.ts[0] + delta + tps_process * i) 78 | release_ev.add_array(ts_inter, self.ev_acc.y[ind], self.ev_acc.x[ind], self.ev_acc.p[ind]) 79 | self.ev_acc.remove_row(self.ev_acc.y[0], -1) 80 | return release_ev 81 | 82 | 83 | class SynchonousArbiter(): 84 | clock_period = 0.001 # Clock's period (us) 85 | ev_acc = EventBuffer(0) # Events accumulated 86 | time = 0 # Current time (us) 87 | cur_row = 0 # current row processed 88 | max_row= 200 # Number of rows 89 | 90 | def __init__(self, max_row, clock_period, time): 91 | """ Init the sensor by creating the Blender Camera 92 | Args: 93 | t_per_event: # Time spent to process one event (us) 94 | time: starting time (us) 95 | """ 96 | self.clock_period = clock_period 97 | self.time = time 98 | self.cur_row = 0 99 | self.max_row = max_row 100 | 101 | def process(self, new_ev, dt): 102 | """ 103 | Args: 104 | new_event: incomming events as EventBuffer 105 | dt: time since the last update (us) 106 | """ 107 | nb_row_processed = int(dt // self.clock_period) 108 | t_max = self.time + dt 109 | release_ev = EventBuffer(0) 110 | self.ev_acc.increase_ev(new_ev) 111 | for i in range(0, nb_row_processed, 1): 112 | self.time = self.time + self.clock_period 113 | self.cur_row = (self.cur_row + 1) % self.max_row 114 | ind = np.where((self.ev_acc.y[:self.ev_acc.i] == self.cur_row)&(self.ev_acc.ts[:self.ev_acc.i] < self.time)) 115 | if len(ind[0]) > 0: 116 | ts_inter = np.full(ind[0].shape, self.time) 117 | release_ev.add_array(ts_inter, self.ev_acc.y[:self.ev_acc.i][ind], self.ev_acc.x[:self.ev_acc.i][ind], 118 | self.ev_acc.p[:self.ev_acc.i][ind]) 119 | self.ev_acc.remove_row(self.cur_row, self.time) 120 | if self.ev_acc.i == 0: 121 | break 122 | self.time = t_max 123 | return release_ev 124 | 125 | -------------------------------------------------------------------------------- /scene/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | import random 14 | import json 15 | from utils.system_utils import searchForMaxIteration 16 | from scene.dataset_readers import sceneLoadTypeCallbacks 17 | from scene.gaussian_model import GaussianModel 18 | from arguments import ModelParams 19 | from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON 20 | 21 | class Scene: 22 | 23 | gaussians : GaussianModel 24 | 25 | def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, shuffle=False, resolution_scales=[1.0]): 26 | """b 27 | :param path: Path to colmap scene main folder. 28 | """ 29 | self.model_path = args.model_path 30 | self.loaded_iter = None 31 | self.gaussians = gaussians 32 | if load_iteration: 33 | if load_iteration == -1: 34 | self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud")) 35 | else: 36 | self.loaded_iter = load_iteration 37 | print("Loading trained model at iteration {}".format(self.loaded_iter)) 38 | 39 | self.train_cameras = {} 40 | self.blurry_cameras = {} 41 | self.event_cameras = {} 42 | self.test_cameras = {} 43 | 44 | if os.path.exists(os.path.join(args.source_path, "sparse")): 45 | if not hasattr(args, 'gray') : 46 | scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval) 47 | else: 48 | scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval,args.gray,args.random,args.deblur,args.event) 49 | elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")): 50 | print("Found transforms_train.json file, assuming Blender data set!") 51 | scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval) 52 | else: 53 | assert False, "Could not recognize scene type!" 54 | 55 | if not self.loaded_iter: 56 | with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file: 57 | dest_file.write(src_file.read()) 58 | json_cams = [] 59 | camlist = [] 60 | camBlurrylist = [] 61 | if scene_info.test_cameras: 62 | camlist.extend(scene_info.test_cameras) 63 | if scene_info.train_cameras: 64 | camlist.extend(scene_info.train_cameras) 65 | if scene_info.blurry_cameras: 66 | camBlurrylist.extend(scene_info.blurry_cameras) 67 | if scene_info.event_cameras: 68 | camBlurrylist.extend(scene_info.event_cameras) 69 | for id, cam in enumerate(camlist): 70 | json_cams.append(camera_to_JSON(id, cam)) 71 | with open(os.path.join(self.model_path, "cameras.json"), 'w') as file: 72 | json.dump(json_cams, file) 73 | 74 | if shuffle: 75 | random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling 76 | random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling 77 | 78 | self.cameras_extent = scene_info.nerf_normalization["radius"] 79 | 80 | for resolution_scale in resolution_scales: 81 | print("Loading Training Cameras") 82 | self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args) 83 | print("Loading Test Cameras") 84 | self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args) 85 | print("Loading Blurry Cameras") 86 | self.blurry_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.blurry_cameras, resolution_scale, args) 87 | print("Loading Event Cameras") 88 | self.event_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.event_cameras, resolution_scale, args) 89 | if self.loaded_iter: 90 | self.gaussians.load_ply(os.path.join(self.model_path, 91 | "point_cloud", 92 | "iteration_" + str(self.loaded_iter), 93 | "point_cloud.ply")) 94 | else: 95 | self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent) 96 | 97 | def save(self, iteration): 98 | point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration)) 99 | self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply")) 100 | 101 | def getTrainCameras(self, scale=1.0): 102 | return self.train_cameras[scale] 103 | def getBlurryCameras(self, scale=1.0): 104 | return self.blurry_cameras[scale] 105 | def getEventCameras(self, scale=1.0): 106 | return self.event_cameras[scale] 107 | def getTestCameras(self, scale=1.0): 108 | return self.test_cameras[scale] -------------------------------------------------------------------------------- /Event_sensor/src/ExrRead.py: -------------------------------------------------------------------------------- 1 | import OpenEXR 2 | import Imath 3 | import numpy as np 4 | import cv2 5 | import time 6 | 7 | #BrightScale 1 not change ,1000 muti 1000, because scene is easy, and exr has a wide range 8 | # we can simply muti a number to get high img based on low img, so we only need to render low img 9 | # which can barely not seen 10 | def read_exr_channel(exr_file,channel_name,BrightScale): 11 | # Read the EXR file 12 | exr = OpenEXR.InputFile(exr_file) 13 | 14 | # Get the image header and channel information 15 | header = exr.header() 16 | channel_info = header['channels'] 17 | 18 | # Get the size of the image 19 | width = header['dataWindow'].max.x + 1 20 | height = header['dataWindow'].max.y + 1 21 | 22 | # Read the specified channel 23 | #float is between 0 and 1 24 | channel_data = exr.channel(channel_name, Imath.PixelType(Imath.PixelType.FLOAT)) 25 | 26 | # Convert the channel data to a NumPy array 27 | channel_list = np.frombuffer(channel_data, dtype=np.float32) 28 | channel_list = np.reshape(channel_list, (height, width)) 29 | 30 | # Create a CV data type with the same size as the channel data 31 | cv_image = np.zeros((height, width), dtype=np.float32) 32 | 33 | # Assign the channel data to the CV image 34 | cv_image[:, :] = channel_list * BrightScale 35 | 36 | # Display the CV image 37 | # cv2.imshow("Channel Image", cv_image) 38 | # cv2.waitKey(0) 39 | # cv2.destroyAllWindows() 40 | return cv_image 41 | 42 | def calculate_intensity_from_spetral(exr_file, channel_number, BrightScale): 43 | # Read the EXR file 44 | start_time = time.time() 45 | exr = OpenEXR.InputFile(exr_file) 46 | # Get the image header and channel information 47 | header = exr.header() 48 | channel_info = header['channels'] 49 | # Get the size of the image 50 | width = header['dataWindow'].max.x + 1 51 | height = header['dataWindow'].max.y + 1 52 | 53 | # Create an empty list to store the channel data 54 | channel_list = [] 55 | 56 | # Read each channel individually 57 | # here I read all 32 channel for arbitary N 58 | # In the view of algorithm analysis, reading channel is decided by chanel number! 59 | #for i in range(31, 0, -1): 60 | for i in range(1, 32): 61 | channel_name = f"Radiance.C{i:02d}" 62 | channel_data = exr.channel(channel_name, Imath.PixelType(Imath.PixelType.FLOAT)) 63 | temp = np.frombuffer(channel_data, dtype=np.float32) 64 | temp = np.reshape(temp, (height, width)) 65 | channel_list.append(temp) 66 | 67 | 68 | end_time = time.time() 69 | total_time = end_time - start_time 70 | print("Total time of N hyper", total_time*channel_number/32) 71 | # Create a CV data type with the same size as the channel data 72 | cv_image = np.zeros((height, width), dtype=np.float32) 73 | # cv2.waitKey() 74 | # Assign the channel data to the CV image with weighted sum 75 | if channel_number == 31: 76 | cv_image[:, :] = ( 77 | 0.82 * channel_list[0] + 0.85 * channel_list[1] + 0.87 * channel_list[2] + 0.88 * channel_list[3] + 0.92 * channel_list[4] + 78 | 0.95 * channel_list[5] + 0.96 * channel_list[6] + 0.96 * channel_list[7] + 0.98 * channel_list[8] + 1 * channel_list[9] + 0.99 * channel_list[10] + 79 | 1 * channel_list[11] + 0.99 * channel_list[12] + 1 * channel_list[13] + 0.99 * channel_list[14] + 1 * channel_list[15] + 80 | 0.99 * channel_list[16] + 0.98 * channel_list[17] + 0.98 * channel_list[18] + 0.97 * channel_list[19] + 0.95 * channel_list[20] + 81 | 0.94 * channel_list[21] + 0.92 * channel_list[22] + 0.92 * channel_list[23] + 0.87 * channel_list[24] + 0.86 * channel_list[25] + 82 | 0.85 * channel_list[26] + 0.82 * channel_list[27] + 0.79 * channel_list[28] + 0.78 * channel_list[29] + 0.76 * channel_list[30] 83 | )/31.0 84 | elif channel_number == 16: 85 | cv_image[:, :] = ( 86 | 0.82 * channel_list[0] + 0.87 * channel_list[2] + 0.92 * channel_list[4] + 87 | 0.96 * channel_list[6] + 0.98 * channel_list[8] + 0.99 * channel_list[10] + 88 | 1 * channel_list[12] + 0.99 * channel_list[14] + 89 | 0.99 * channel_list[16] + 0.98 * channel_list[18] + 0.97 * channel_list[20] + 90 | 0.94 * channel_list[22] + 0.87 * channel_list[24] + 91 | 0.85 * channel_list[26] + 0.79 * channel_list[28] + 0.76 * channel_list[30] 92 | )/16.0 93 | elif channel_number == 11: 94 | cv_image[:, :] = ( 95 | 0.82 * channel_list[0] + 0.88 * channel_list[3] + 0.95 * channel_list[6] + 0.96 * channel_list[9] + 96 | 1 * channel_list[12] + 1 * channel_list[15] + 0.99 * channel_list[18] + 97 | 0.99 * channel_list[21] + 0.94 * channel_list[24] + 98 | 0.85 * channel_list[27] + 0.78 * channel_list[30] 99 | )/11.0 100 | elif channel_number == 7: 101 | cv_image[:, :] = ( 102 | 0.82 * channel_list[0] + 0.92 * channel_list[5] + 1 * channel_list[10] + 0.99 * channel_list[15] + 103 | 0.99 * channel_list[20] + 0.92 * channel_list[25] + 0.85 * channel_list[30] 104 | )/7.0 105 | elif channel_number == 6: 106 | cv_image[:, :] = ( 107 | 0.82 * channel_list[0] + 0.95 * channel_list[6] + 1 * channel_list[12] + 108 | 0.99 * channel_list[18] + 0.94 * channel_list[24] + 0.85 * channel_list[30] 109 | )/6.0 110 | # Scale the CV image 111 | cv_image[:, :] = cv_image * BrightScale 112 | # cv_image /= 31.0 113 | return cv_image 114 | # # Example usage 115 | # exr_file = "C:/Users//Desktop/exr_test/Rotate_360_pbrt00121exr" 116 | # channel_name = "intensity" 117 | # read_exr_channel(exr_file, channel_name) -------------------------------------------------------------------------------- /convert.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | import os 13 | import logging 14 | from argparse import ArgumentParser 15 | import shutil 16 | 17 | # This Python script is based on the shell converter script provided in the MipNerF 360 repository. 18 | parser = ArgumentParser("Colmap converter") 19 | parser.add_argument("--no_gpu", action='store_true') 20 | parser.add_argument("--skip_matching", action='store_true') 21 | parser.add_argument("--source_path", "-s", required=True, type=str) 22 | parser.add_argument("--camera", default="OPENCV", type=str) 23 | parser.add_argument("--colmap_executable", default="D:/2024/3DGS/COLMAP-3.9.1-windows-cuda/COLMAP.bat", type=str) 24 | parser.add_argument("--resize", action="store_true") 25 | parser.add_argument("--magick_executable", default="", type=str) 26 | args = parser.parse_args() 27 | colmap_command = '"{}"'.format(args.colmap_executable) if len(args.colmap_executable) > 0 else "colmap" 28 | magick_command = '"{}"'.format(args.magick_executable) if len(args.magick_executable) > 0 else "magick" 29 | use_gpu = 1 if not args.no_gpu else 0 30 | 31 | if not args.skip_matching: 32 | os.makedirs(args.source_path + "/distorted/sparse", exist_ok=True) 33 | 34 | ## Feature extraction 35 | feat_extracton_cmd = colmap_command + " feature_extractor "\ 36 | "--database_path " + args.source_path + "/distorted/database.db \ 37 | --image_path " + args.source_path + "/input \ 38 | --ImageReader.single_camera 1 \ 39 | --ImageReader.camera_model " + args.camera + " \ 40 | --SiftExtraction.use_gpu " + str(use_gpu) 41 | exit_code = os.system(feat_extracton_cmd) 42 | if exit_code != 0: 43 | logging.error(f"Feature extraction failed with code {exit_code}. Exiting.") 44 | exit(exit_code) 45 | 46 | ## Feature matching 47 | feat_matching_cmd = colmap_command + " exhaustive_matcher \ 48 | --database_path " + args.source_path + "/distorted/database.db \ 49 | --SiftMatching.use_gpu " + str(use_gpu) 50 | exit_code = os.system(feat_matching_cmd) 51 | if exit_code != 0: 52 | logging.error(f"Feature matching failed with code {exit_code}. Exiting.") 53 | exit(exit_code) 54 | 55 | ### Bundle adjustment 56 | # The default Mapper tolerance is unnecessarily large, 57 | # decreasing it speeds up bundle adjustment steps. 58 | mapper_cmd = (colmap_command + " mapper \ 59 | --database_path " + args.source_path + "/distorted/database.db \ 60 | --image_path " + args.source_path + "/input \ 61 | --output_path " + args.source_path + "/distorted/sparse \ 62 | --Mapper.ba_global_function_tolerance=0.000001") 63 | exit_code = os.system(mapper_cmd) 64 | if exit_code != 0: 65 | logging.error(f"Mapper failed with code {exit_code}. Exiting.") 66 | exit(exit_code) 67 | 68 | ### Image undistortion 69 | ## We need to undistort our images into ideal pinhole intrinsics. 70 | img_undist_cmd = (colmap_command + " image_undistorter \ 71 | --image_path " + args.source_path + "/input \ 72 | --input_path " + args.source_path + "/distorted/sparse/0 \ 73 | --output_path " + args.source_path + "\ 74 | --output_type COLMAP") 75 | exit_code = os.system(img_undist_cmd) 76 | if exit_code != 0: 77 | logging.error(f"Mapper failed with code {exit_code}. Exiting.") 78 | exit(exit_code) 79 | 80 | files = os.listdir(args.source_path + "/sparse") 81 | os.makedirs(args.source_path + "/sparse/0", exist_ok=True) 82 | # Copy each file from the source directory to the destination directory 83 | for file in files: 84 | if file == '0': 85 | continue 86 | source_file = os.path.join(args.source_path, "sparse", file) 87 | destination_file = os.path.join(args.source_path, "sparse", "0", file) 88 | shutil.move(source_file, destination_file) 89 | 90 | if(args.resize): 91 | print("Copying and resizing...") 92 | 93 | # Resize images. 94 | os.makedirs(args.source_path + "/images_2", exist_ok=True) 95 | os.makedirs(args.source_path + "/images_4", exist_ok=True) 96 | os.makedirs(args.source_path + "/images_8", exist_ok=True) 97 | # Get the list of files in the source directory 98 | files = os.listdir(args.source_path + "/images") 99 | # Copy each file from the source directory to the destination directory 100 | for file in files: 101 | source_file = os.path.join(args.source_path, "images", file) 102 | 103 | destination_file = os.path.join(args.source_path, "images_2", file) 104 | shutil.copy2(source_file, destination_file) 105 | exit_code = os.system(magick_command + " mogrify -resize 50% " + destination_file) 106 | if exit_code != 0: 107 | logging.error(f"50% resize failed with code {exit_code}. Exiting.") 108 | exit(exit_code) 109 | 110 | destination_file = os.path.join(args.source_path, "images_4", file) 111 | shutil.copy2(source_file, destination_file) 112 | exit_code = os.system(magick_command + " mogrify -resize 25% " + destination_file) 113 | if exit_code != 0: 114 | logging.error(f"25% resize failed with code {exit_code}. Exiting.") 115 | exit(exit_code) 116 | 117 | destination_file = os.path.join(args.source_path, "images_8", file) 118 | shutil.copy2(source_file, destination_file) 119 | exit_code = os.system(magick_command + " mogrify -resize 12.5% " + destination_file) 120 | if exit_code != 0: 121 | logging.error(f"12.5% resize failed with code {exit_code}. Exiting.") 122 | exit(exit_code) 123 | 124 | print("Done.") 125 | -------------------------------------------------------------------------------- /Event_sensor/event_tools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.spatial.transform import Rotation 3 | import cv2 4 | import sys 5 | sys.path.append("./Event_sensor") 6 | sys.path.append("./Event_sensor/src") 7 | from src.event_buffer import EventBuffer 8 | from src.dvs_sensor import init_bgn_hist_cpp, DvsSensor 9 | from src.event_display import EventDisplay 10 | import dsi 11 | import numpy as np 12 | from src.event_file_io import EventsData 13 | import os 14 | from tqdm import tqdm 15 | from src.event_display import EventDisplay 16 | from src.example_EXR_to_events import View_3D 17 | def Nlerp(a1,a2,alpha): 18 | return alpha * a1 + (1 - alpha) *a2 19 | 20 | def rotation_matrix_to_quaternion(rotation_matrix): 21 | # 使用scipy的Rotation类来将旋转矩阵转换为四元数 22 | r = Rotation.from_matrix(rotation_matrix) 23 | quaternion = r.as_quat() 24 | return quaternion 25 | 26 | def quaternion_to_rotation_matrix(quaternion): 27 | # 使用scipy的Rotation类来将四元数转换为旋转矩阵 28 | r = Rotation.from_quat(quaternion) 29 | rotation_matrix = r.as_matrix() 30 | return rotation_matrix 31 | 32 | #images is a tensor file 33 | def simulate_event_camera(images,ev_full,dt=2857,lat=100, jit=10, ref=100, tau=300, th=0.3, th_noise=0.01): 34 | dsi.initSimu(images[0].shape[0], images[0].shape[1]) 35 | dsi.initLatency(lat, jit, ref, tau) 36 | dsi.initContrast(th, th, th_noise) 37 | init_bgn_hist_cpp("./Event_sensor/data/noise_neg_161lux.npy", "./Event_sensor/data/noise_neg_161lux.npy") 38 | isInit = False 39 | time = 0 40 | ed = EventDisplay("Events", images[0].shape[1], images[0].shape[0], dt*2) 41 | for im in tqdm(images, desc="generating events", unit="frame"): 42 | im = cv2.cvtColor(im, cv2.COLOR_RGB2LUV)[:, :, 0] 43 | cv2.imshow("t", im) 44 | cv2.waitKey(1) 45 | if not isInit: 46 | dsi.initImg(im) 47 | isInit = True 48 | else: 49 | buf = dsi.updateImg(im, dt) 50 | ev = EventBuffer(1) 51 | ev.add_array(np.array(buf["ts"], dtype=np.uint64), 52 | np.array(buf["x"], dtype=np.uint16), 53 | np.array(buf["y"], dtype=np.uint16), 54 | np.array(buf["p"], dtype=np.uint64), 55 | 10000000) 56 | ed.update(ev, dt) 57 | ev_full.increase_ev(ev) 58 | time += dt 59 | return ev_full 60 | 61 | def save_event_result(ev_full,event_path): 62 | file_path = os.path.join(event_path,"raw.dat") 63 | ev_full.write(file_path) 64 | return file_path 65 | 66 | def generate_images(event_path,dt, total_dt_nums): 67 | events_data = EventsData() 68 | events_data.read_IEBCS_events(os.path.join(event_path,"raw.dat"), (total_dt_nums+1)*dt) 69 | ev_data = events_data.events[0] 70 | for idx in range(0,total_dt_nums): 71 | img = events_data.display_events(ev_data,dt*idx,dt*(idx+1)) 72 | cv2.imwrite(os.path.join(event_path, '{0:05d}'.format(idx) + ".png"), img) 73 | def generate_images_accumu(event_path,dt, total_dt_nums): 74 | events_data = EventsData() 75 | events_data.read_IEBCS_events(os.path.join(event_path,"raw.dat"), (total_dt_nums+1)*dt) 76 | ev_data = events_data.events[0] 77 | 78 | # point_cloud = events_data.display_events_3D(ev_data,0,5000) 79 | # View_3D(point_cloud) 80 | 81 | for idx in range(0,total_dt_nums): 82 | img = events_data.display_events_accumu(ev_data,dt*idx,dt*(idx+1)) 83 | cv2.imwrite(os.path.join(event_path, '{0:05d}'.format(idx+2) + ".png"), img) 84 | def generate_images_accumu_volt(event_path,dt, total_dt_nums): 85 | events_data = EventsData() 86 | events_data.read_Volt_events(os.path.join(event_path,"raw.dat"), (total_dt_nums+1)*dt) 87 | ev_data = events_data.events[0] 88 | 89 | # point_cloud = events_data.display_events_3D(ev_data,0,5000) 90 | # View_3D(point_cloud) 91 | 92 | for idx in range(0,total_dt_nums): 93 | img = events_data.display_events_accumu(ev_data,dt*idx,dt*(idx+1)) 94 | cv2.imwrite(os.path.join(event_path+"_ac", '{0:05d}'.format(idx+3) + ".png"), img) 95 | def generate_images_accumu_edslike(event_path,dt, total_dt_nums,frac = 0.1): 96 | events_data = EventsData() 97 | events_data.read_IEBCS_events(os.path.join(event_path,"raw.dat"), (total_dt_nums+1)*dt) 98 | ev_data = events_data.events[0] 99 | 100 | width = events_data.width 101 | height = events_data.height 102 | 103 | # point_cloud = events_data.display_events_3D(ev_data,0,5000) 104 | # View_3D(point_cloud) 105 | 106 | for idx in range(0,total_dt_nums): 107 | img = events_data.display_events_accumu(ev_data,dt*idx,dt*(idx+frac),width, height) 108 | cv2.imwrite(os.path.join(event_path+"/images_simu", 'frame_{0:010d}'.format(idx*10) + ".png"), img) 109 | def generate_images_accumu_eds(event_path,dt, total_dt_nums,frac = 0.1): 110 | events_data = EventsData() 111 | events_data.read_eds_events(os.path.join(event_path,"events.h5"), (total_dt_nums+1)*dt) 112 | ev_data = events_data.events[0] 113 | 114 | width = events_data.width 115 | height = events_data.height 116 | 117 | # point_cloud = events_data.display_events_3D(ev_data,0,5000) 118 | # View_3D(point_cloud) 119 | 120 | for idx in range(0,total_dt_nums): 121 | img = events_data.display_events_accumu(ev_data,dt*idx,dt*(idx+frac),width, height) 122 | cv2.imwrite(os.path.join(event_path+"/images_ac", 'frame_{0:010d}'.format(idx*10) + ".png"), img) 123 | def generate_images_eds(event_path,dt, total_dt_nums,width=None, height=None): 124 | events_data = EventsData() 125 | events_data.read_eds_events(os.path.join(event_path,"events.h5"), (total_dt_nums+1)*dt) 126 | ev_data = events_data.events[0] 127 | if width == None: 128 | width = events_data.width 129 | height = events_data.height 130 | for idx in range(0,total_dt_nums): 131 | img = events_data.display_events(ev_data,dt*idx,dt*(idx+0.2)) 132 | cv2.imwrite(os.path.join(event_path+"/images_ev", 'frame_{0:010d}'.format(idx*10) + ".png"), img) 133 | def generate_images_accumu_T(event_path,dt, total_dt_nums,frac = 0.1): 134 | events_data = EventsData() 135 | events_data.read_eds_events(event_path+".h5", (total_dt_nums+1)*dt) 136 | ev_data = events_data.events[0] 137 | 138 | width = events_data.width 139 | height = events_data.height 140 | 141 | # point_cloud = events_data.display_events_3D(ev_data,0,5000) 142 | # View_3D(point_cloud) 143 | 144 | for idx in range(0,total_dt_nums): 145 | img = events_data.display_events_accumu(ev_data,dt*idx,dt*(idx+frac),width, height) 146 | cv2.imwrite(os.path.join(event_path+"/images_ac", '{:04d}.{}'.format(idx, "png.png")), img) 147 | def generate_images_accumu_Tumvie(event_path,dt, total_dt_nums,frac = 0.1): 148 | events_data = EventsData() 149 | ts, x, y, p = events_data.read_Tumvie_events(event_path+".h5", (total_dt_nums)*dt) 150 | # ev_data = events_data.events[0] 151 | 152 | width = events_data.width 153 | height = events_data.height 154 | 155 | # point_cloud = events_data.display_events_3D(ev_data,0,5000) 156 | # View_3D(point_cloud) 157 | 158 | for idx in range(0,total_dt_nums): 159 | img = events_data.display_events_accumu_raw(x,y,ts,p,dt*idx,dt*(idx+frac),width, height) 160 | cv2.imwrite(os.path.join(event_path+"/images_ac", '{:05d}.{}'.format(idx, ".png")), img) -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/spike_metric/spike_cube_metric.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: spike cube measurement function. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Jan. 24th, 2018. 4 | 5 | """ 6 | import numpy as np 7 | from event_process import event_processing 8 | from spike_metric import spike_train_metric, fft_3d_convolution 9 | from spike_metric import cubes_3d_kernel 10 | 11 | 12 | def kernel_method_spike_train(events, new_events, x_cube_size=128, y_cube_size=128, t_cube_size=5000, sigma=5000): 13 | """ 14 | kernel method for spike train, such as polarity independent and polarity interference. (Hamming distance) 15 | 16 | Inputs: 17 | ------- 18 | events - events include polarity, timestamp, x and y. 19 | new_events - events after changing operation. 20 | x_cube_size, y_cube_size, t_cube_size - the spike cube parameters. 21 | sigma - the parameter of gaussian kernel. 22 | 23 | Outputs: 24 | ------- 25 | distance - the distance between events and new_events. 26 | 27 | """ 28 | 29 | events_cubes = event_processing.events_to_cubes(events, t_cube_size) 30 | new_events_cubes = event_processing.events_to_cubes(new_events, t_cube_size) 31 | 32 | distance = 0 33 | for k in range(len(events_cubes)): 34 | 35 | spike_cube = event_processing.events_cube_matrix(events_cubes[k], x_cube_size, y_cube_size, t_cube_size) 36 | new_spike_cube = event_processing.events_cube_matrix(new_events_cubes[k], x_cube_size, y_cube_size, t_cube_size) 37 | 38 | for i in range(x_cube_size): 39 | for j in range(y_cube_size): 40 | 41 | if len(np.nonzero(spike_cube[i,j,:])[0])==0 and len(np.nonzero(new_spike_cube[i, j, :])[0])==0: 42 | distance += 0 43 | 44 | else: 45 | distance += spike_train_metric.cal_dist_pid(spike_cube[i,j,:], new_spike_cube[i,j,:], sigma) # polarity independent measurement 46 | # distance += spike_train_metric.cal_dist_pif(spike_cube[i,j,:], new_spike_cube[i,j,:], sigma) # polarity interference measurement 47 | # distance += spike_train_metric.hamming_distance(spike_cube[i,j,:], new_spike_cube[i,j,:]) # Hamming distance 48 | 49 | # return distance/(x_cube_size*y_cube_size) 50 | return distance 51 | 52 | 53 | def cube_using_spike_train(events_cube, new_events_cube, x_cube_size, y_cube_size, t_cube_size, sigma): 54 | """ 55 | kernel method for spike train. 56 | 57 | Inputs: 58 | ------- 59 | events_cube - events include polarity, timestamp, x and y. 60 | new_events_cube - events after changing operation. 61 | x_cube_size, y_cube_size, t_cube_size - the spike cube parameters. 62 | sigma - the parameter of gaussian kernel. 63 | 64 | Outputs: 65 | ------- 66 | distance - the distance between events and new_events. 67 | 68 | """ 69 | 70 | distance = 0 71 | 72 | spike_cube = event_processing.events_to_cube(events_cube, x_cube_size, y_cube_size, t_cube_size) 73 | new_spike_cube = event_processing.events_to_cube(new_events_cube, x_cube_size, y_cube_size, t_cube_size) 74 | 75 | for i in range(x_cube_size): 76 | for j in range(y_cube_size): 77 | 78 | if len(np.nonzero(spike_cube[i, j, :])[0]) == 0 and len(np.nonzero(new_spike_cube[i, j, :])[0]) == 0: 79 | distance += 0 80 | 81 | else: 82 | distance += spike_train_metric.cal_dist_pid(spike_cube[i, j, :], new_spike_cube[i, j, :], sigma) # polarity independent measurement 83 | # distance += spike_train_metric.cal_dist_pif(spike_cube[i,j,:], new_spike_cube[i,j,:], sigma) # polarity interference measurement 84 | # distance += spike_train_metric.hamming_distance(spike_cube[i,j,:], new_spike_cube[i,j,:]) # Hamming distance 85 | 86 | return distance 87 | 88 | 89 | def fft_convolution_l1_norm(events, new_events, x_cube_size=32, y_cube_size=32, t_cube_size=500, x_sigma=5, y_sigma=5, t_sigma=100): 90 | """ 91 | Computing distance between spike cubes using 3d convolution in l1 norm, which can be decomposed on fft(fast fourier transform). 92 | 93 | Inputs: 94 | ------- 95 | events - events include polarity, timestamp, x and y. 96 | new_events - events after changing operation. 97 | x_cube_size, y_cube_size, t_cube_size - the spike cube parameters. 98 | x_sigma, y_sigma, z_sigma - the parameters of 3d gaussian kernel. 99 | 100 | Outputs: 101 | ------- 102 | distance - the distance between events and new_events. 103 | 104 | """ 105 | 106 | gaussian_3d = fft_3d_convolution.get_3d_gaussian_kernel(x_size=20, y_size=20, t_size=500, x_sigma=x_sigma, y_sigma=y_sigma, t_sigma=t_sigma) # 3d gaussian function 107 | 108 | # events_cubes = event_processing.events_to_cubes(events, t_cube_size) 109 | # new_events_cubes = event_processing.events_to_cubes(new_events, t_cube_size) 110 | 111 | events_cubes = event_processing.events_to_spike_cubes(events, 128, 128, x_cube_size, y_cube_size, t_cube_size) 112 | new_events_cubes = event_processing.events_to_spike_cubes(new_events, 128, 128, x_cube_size, y_cube_size, 113 | t_cube_size) 114 | 115 | distance = 0 116 | for k in range(len(events_cubes)): 117 | # for k in range(2): 118 | 119 | spike_cube = event_processing.events_cube_matrix(events_cubes[k], x_cube_size, y_cube_size, t_cube_size) 120 | new_spike_cube = event_processing.events_cube_matrix(new_events_cubes[k], x_cube_size, y_cube_size, t_cube_size) 121 | 122 | inverse_fft = fft_3d_convolution.fft_convolution(spike_cube, gaussian_3d) 123 | new_inverse_fft = fft_3d_convolution.fft_convolution(new_spike_cube, gaussian_3d) 124 | 125 | distance_matrix = abs(inverse_fft - new_inverse_fft) #l1 norm 126 | 127 | distance += distance_matrix.sum() 128 | 129 | return distance 130 | 131 | 132 | def kernel_method_spike_cubes(events, new_events, width=128, height=128, x_cube_size=32, y_cube_size=32, t_cube_size=5000, x_sigma=5, y_sigma=5, t_sigma=5000): 133 | """ 134 | 3d gaussian kernel method for spike cubes, such as polarity independent and polarity interference. 135 | 136 | Inputs: 137 | ------- 138 | events - events include polarity, timestamp, x and y. 139 | new_events - events after changing operation. 140 | width, height - the width and height of dynamic vision sensor. 141 | x_cube_size, y_cube_size, t_cube_size - the size of spike cube. 142 | x_sigma, y_sigma, t_sigma - the 3d gaussian kernel parameters. 143 | 144 | Outputs: 145 | ------- 146 | distance - the distance between events and new_events. 147 | 148 | """ 149 | 150 | events_cube = event_processing.events_to_spike_cubes(events, width, height, x_cube_size, y_cube_size, t_cube_size) 151 | new_events_cubes = event_processing.events_to_spike_cubes(new_events, width, height, x_cube_size, y_cube_size, t_cube_size) 152 | 153 | distance = 0 154 | for k in range(0, min(len(events_cube), len(new_events_cubes))): 155 | 156 | events_data = np.transpose(np.array(events_cube[k])) 157 | new_events_data = np.transpose(np.array(new_events_cubes[k])) 158 | 159 | if len(events_data)==0 and len(new_events_data)==0: 160 | distance += 0 161 | 162 | else: 163 | distance += cubes_3d_kernel.cubes_3d_kernel_distance(events_data, new_events_data, x_sigma, y_sigma, t_sigma) 164 | 165 | return distance -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/event_process/generating_spike_cube.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Random generating spike cubes including events, and an event has four elements - polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Mar. 2nd, 2018. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import random 9 | from event_process import show_events 10 | import pickle 11 | import math 12 | from event_process import read_dvs 13 | from event_process import spike_cube_processing 14 | 15 | 16 | def random_spike_cubes(spike_numbers, ratio, x_coordinate_length, y_coordinate_length, temporal_length): 17 | """ 18 | random generating spike cube using AER. 19 | 20 | Inputs: 21 | ------- 22 | spike_numbers - the total spike numbers in spatial-temporal cube. 23 | ratio - the ON/OFF polarity ratio in spike cube. 24 | x_coordinate_length - x coordinate in spike cube. 25 | y_coordinate_length - y coordinate in spike cube. 26 | temporal_length - temporal coordinate in spike cube. 27 | 28 | Outputs: 29 | ------- 30 | events - the random generating events in spike cube using AER. 31 | 32 | """ 33 | 34 | ON_events_numbers = int(spike_numbers * (ratio / (1 + ratio))) 35 | ON_events = np.ones(ON_events_numbers) 36 | OFF_events = - np.ones(spike_numbers - ON_events_numbers) 37 | polarity_events = np.hstack((ON_events, OFF_events)) 38 | 39 | # no replacement. 40 | # x_events = random.sample(range(0, x_coordinate_length), spike_numbers) 41 | # y_events = random.sample(range(0, y_coordinate_length), spike_numbers) 42 | # t_events = random.sample(range(0, temporal_length), spike_numbers) 43 | 44 | # available replacement. 45 | x_events = np.array([random.randint(0, x_coordinate_length-1) for _ in range(spike_numbers)]) 46 | y_events = np.array([random.randint(0, y_coordinate_length-1) for _ in range(spike_numbers)]) 47 | t_events = np.array([random.randint(0, temporal_length-1) for _ in range(spike_numbers)]) 48 | 49 | 50 | events = np.vstack((polarity_events, t_events, x_events, y_events)) 51 | events = events[:, events[1, :].argsort()] # sorted by t coordinate 52 | 53 | return events 54 | 55 | 56 | def generating_2d_trigonometric_events(time_length=10000, spike_numbers=500, center=64, offset=3, ratio=1): 57 | """ 58 | generating an moving target using 2D trigonometric function. 59 | 60 | Inputs: 61 | ------- 62 | time_length - the time length. 63 | spike_numbers - the total spike numbers in spatial-temporal cube. 64 | center - the center ofx or y coordinate. 65 | offset - the x or y coordinate offset around center. 66 | ratio - the ON/OFF polarity ratio in spike cube. 67 | 68 | Outputs: 69 | ------- 70 | events - the random generating events in spike cube using AER. 71 | 72 | """ 73 | 74 | ON_events_numbers = int(spike_numbers * (ratio / (1 + ratio))) 75 | ON_events = np.ones(ON_events_numbers) 76 | OFF_events = - np.ones(spike_numbers - ON_events_numbers) 77 | 78 | polarity_events = np.hstack((ON_events, OFF_events)) 79 | t_events = np.array([random.randint(0, time_length - 1) for _ in range(spike_numbers)]) 80 | x_events = np.rint(center + 15 * np.sin((t_events / 1500) * math.pi) + random.sample(range(-offset, offset), 1)[0]) 81 | y_events = np.array([random.randint(center - offset, center + offset) for _ in range(spike_numbers)]) 82 | 83 | events = np.vstack((polarity_events, t_events, x_events, y_events)) 84 | events = events[:, events[1, :].argsort()] # sorted by t coordinate. 85 | 86 | t_events = np.linspace(0, time_length, 51)[0:50] 87 | true_target = np.vstack((t_events, np.rint(center + 15 * np.sin((t_events / 1500) * math.pi)), center * np.ones(len(t_events)))) 88 | 89 | #true_target = np.vstack((t_events, np.rint(center + 5 * np.sin(t_events / 500) * (math.pi)), center*np.ones(spike_numbers))) 90 | true_target = true_target[:, true_target[0, :].argsort()] # sorted by t coordinate. 91 | 92 | return events, true_target 93 | 94 | 95 | def generating_3d_trigonometric_events(time_length=10000, spike_numbers=500, center=64, offset=3, ratio=1): 96 | """ 97 | generating an moving target using 2D trigonometric function. 98 | 99 | Inputs: 100 | ------- 101 | time_length - the time length. 102 | spike_numbers - the total spike numbers in spatial-temporal cube. 103 | center - the center ofx or y coordinate. 104 | offset - the x or y coordinate offset around center. 105 | ratio - the ON/OFF polarity ratio in spike cube. 106 | 107 | Outputs: 108 | ------- 109 | events - the random generating events in spike cube using AER. 110 | 111 | """ 112 | 113 | ON_events_numbers = int(spike_numbers * (ratio / (1 + ratio))) 114 | ON_events = np.ones(ON_events_numbers) 115 | OFF_events = - np.ones(spike_numbers - ON_events_numbers) 116 | 117 | polarity_events = np.hstack((ON_events, OFF_events)) 118 | t_events = np.array([random.randint(0, time_length - 1) for _ in range(spike_numbers)]) 119 | # x_events = np.rint(center + 8 * np.sin(t_events / 250) * (math.pi) + random.sample(range(-offset, offset), 1)[0]) 120 | # y_events = np.rint(center + 8 * np.cos(t_events / 250) * (math.pi) + random.sample(range(-offset, offset), 1)[0]) 121 | 122 | x_events = np.rint(center + 22 * np.sin((t_events / 800) * math.pi) + random.sample(range(-offset, offset), 1)[0]) # 15 123 | y_events = np.rint(center + 22 * np.cos((t_events / 800) * math.pi) + random.sample(range(-offset, offset), 1)[0]) # 15 124 | 125 | 126 | events = np.vstack((polarity_events, t_events, x_events, y_events)) 127 | events = events[:, events[1, :].argsort()] # 128 | 129 | t_events = np.linspace(0, time_length, 51)[0:50] 130 | true_target = np.vstack((t_events, np.rint(center + 22 * np.sin((t_events / 800) * math.pi)), np.rint(center + 22 * np.cos((t_events / 800) * math.pi)))) 131 | 132 | #true_target = np.vstack((t_events, np.rint(center + 8 * np.sin(t_events / 250) * (math.pi)), np.rint(center + 8 * np.cos(t_events / 250) * (math.pi)))) 133 | true_target = true_target[:, true_target[0, :].argsort()] # sorted by t coordinate. 134 | 135 | return events, true_target 136 | 137 | 138 | 139 | if __name__ == '__main__': 140 | 141 | 142 | ### random generating events in spike cube. 143 | # spike_numbers = 100 144 | # time_length = 1000 145 | # ratio = 0 146 | 147 | # events = random_spike_cubes(spike_numbers, ratio, 128, 128, time_length) 148 | # show_events.show_ON_OFF_events(events, width=128, height=128, length=max(events[1, :])) 149 | # save the pkl format. 150 | # pickle.dump(events, open('../datasets/simulating_dataset/events_{}_{}_{}.pkl'.format(spike_numbers, ratio, time_length), 'wb')) 151 | # pkl_events = open('../datasets/simulating_dataset/events_{}_{}.pkl'.format(spike_numbers, time_length),'rb') 152 | # read_events = pickle.load(pkl_events) 153 | 154 | 155 | ### generating an moving target using 2D or 3D trigonometric function. 156 | events, true_target = generating_2d_trigonometric_events(time_length=10000, spike_numbers=500, center=64, offset=3, ratio=1) 157 | # events, true_target = generating_3d_trigonometric_events(time_length=10000, spike_numbers=1000, center=64, offset=8, ratio=1) 158 | show_events.show_simulating_events(events, width=128, height=128, length=max(events[1, :])) 159 | new_events = spike_cube_processing.events_increasing_noises(events, 0, 0.1, 128, 128) #0.1 160 | show_events.show_simulating_events(new_events, width=128, height=128, length=max(events[1, :])) 161 | 162 | # save the pkl format. 163 | pickle.dump(events, open('../datasets/simulating_dataset/events_3d_trigonometric.pkl', 'wb')) 164 | pickle.dump(true_target, open('../datasets/simulating_dataset/events_3d_true_target.pkl', 'wb')) -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Event-3DGS: Event-based 3D Reconstruction Using 3D Gaussian Splatting 2 | ![Alt text](./assets/deblur.jpg) 3 | 4 | 5 | ## Introduction 6 | This repository contains the research code for **Event-3DGS: Event-based 3D Reconstruction Using 3D Gaussian Splatting**, which is accepted by NeurlPS 2024. The mainpage of my paper in NeurlPS 2024 is https://neurips.cc/virtual/2024/poster/96044 7 | 8 | The code is designed to implement the event-based 3D reconstruction algorithm described in the paper and includes key components such as the photovoltage contrast estimation module and a novel event-based loss for optimizing reconstruction quality. 9 | 10 | The code has been tested in a personal environment on Windows 11. If you encounter any difficult-to-resolve issues during deployment in other environments, please feel free to provide feedback. 11 | 12 | ## Installation 13 | Follow the steps below to set up the environment and install dependencies. 14 | 15 | 1. **Clone the repository:** 16 | ```bash 17 | git clone https://github.com/lanpokn/Event-3DGS.git 18 | cd Event-3DGS 19 | ``` 20 | 21 | 2. **Install the necessary dependencies:** 22 | 23 | This project is based on 3DGS (https://github.com/graphdeco-inria/gaussian-splatting), so please refer to its installation instructions. 24 | 25 | Some parts of the code in this project use additional libraries, which were mainly my personal attempts during the exploratory phase and **can be ignored during use**. 26 | 27 | ## Dataset Format 28 | To ensure proper usage, we will introduce the format in which we organize the data. 29 | 30 | 1. **Data organization :** 31 | 32 | - We organize the data into the following structure: 33 | ``` 34 | /path/to/dataset/ 35 | ├── images/ 36 | ├── images_event/ 37 | ├── images_blurry/(optional) 38 | ├── renders/ 39 | ├── sparse/... 40 | ``` 41 | 42 | 2. **Data Explaination:** 43 | 44 | - The 'images' folder stores the pure intensity images estimated from events. During the optimization process, the images in this folder **are responsible for providing pure intensity**. 45 | - The 'images_event' folder also stores the pure intensity images estimated from events, but the images in this folder are **solely used to provide intensity differences** during the optimization process. Since the estimation methods for intensity and intensity differences may vary, I separated them when validating the algorithm. 46 | - 'images_blurry' is optional and contains blurry images captured by an RGB camera, primarily used to validate its deblurring capability as presented in the original paper. 47 | - 'renders' stores the RGB ground truth, which is prepared for testing and does not participate in the reconstruction process. 48 | - The entire 'sparse' folder contains the camera poses in COLMAP format. For using COLMAP, please refer to https://colmap.github.io/. If you only have event data without pose or RGB information, you can first estimate the intensity images from the events and then use these intensity images for calibration with COLMAP. 49 | 50 | 3. **Notification:** 51 | 52 | The image filenames in all storage folders must correspond one-to-one and be consistent with the results stored in COLMAP; otherwise, they will not be readable. 53 | 54 | There are many methods to obtain 'images' and 'images_event,' including but not limited to neural networks, classical integration methods, and filtering methods. You can choose based on your specific situation. If you're unfamiliar with this area, you can refer to https://github.com/ziweiWWANG/AKF and https://github.com/uzh-rpg/rpg_e2vid. For details on our method, please refer to the paper. 55 | 4. **Data Example:** 56 | Here, we provide a simulated scene (with blurred images and event data simulated based on an existing dataset) for easy debugging. The images and images_event have already been generated based on the simulated events. 57 | 58 | The file shared via Baidu Netdisk: `train_colmap_easy(1).zip` 59 | Link: https://pan.baidu.com/s/1I9AP7ihz8wTYb2gmH0py1Q 60 | Access code: m388 61 | 62 | via Google Drive:`train_colmap_easy(1).zip` 63 | https://drive.google.com/file/d/1OBPMNFtXsfNO5AJl5SwoKn75mY-Pk2Fg/view?usp=drive_link 64 | 5. **All data in the paper:** 65 | (Only original Data, tested only on Windows 11. For data sources, please refer to the paper citation.) 66 | 67 | The file shared via Baidu Netdisk:pure_data.zip 68 | Link: https://pan.baidu.com/s/1xOZ26c3tI5hwUN7lcB2brA Access code: 43b5 69 | 70 | via Google Drive:pure_data.zip 71 | https://drive.google.com/file/d/1fntzdCgRR7uy5j8gY7TDoCIP2x_1quv4/view?usp=sharing 72 | 73 | 74 | ## Getting Started 75 | Once you've set up the environment and arranged the dataset, you can use train.py to reconstruct the scene. 76 | 77 | This file's usage is generally consistent with the original 3DGS, but I have added more command-line parameters to accommodate my event modality. Specifically: 78 | 79 | ##### Command-Line Options 80 | 81 | - `--event`: This option enables the event reading and reconstruction mode, which is the primary mode. For specific functionality after enabling this option, please refer to the code. 82 | - `--gray`: This option changes the 3DGS output to grayscale. It should be enabled in pure event mode, as events are inherently single-channel, and outputting color in this case is meaningless. 83 | - `--deblur`: This option enables the event-assisted deblurring mode, demonstrating that my algorithm can be used in conjunction with both Event and RGB data. For specific functionality after enabling this option, please refer to the code. 84 | 85 | ##### Example Use 86 | 87 | To provide some examples, below are sample commands corresponding to `launch.json` in VSCode: 88 | 89 | ``` 90 | `"args": ["-s", "your_dataset_path/","--gray","--event","--iterations","8000","-m","your_dataset_path/","--start_checkpoint","your_checkpoint_path"],` 91 | ``` 92 | 93 | ## Others(Explanation of minor issues) 94 | - **If your training crashes** (e.g., the scene disappears), you can first try setting $\alpha$ to 0 and see if the reconstruction succeeds. If reconstruction works at this stage, then consider gradually increasing $\alpha$ or adjusting $\alpha$ based on the training step size. In general, reconstruction is more stable when $\alpha$ is small, and the quality improves as $\alpha$ increases, but if $\alpha$ is too large, it may lead to instability in the reconstruction.(setting alpha to zero will make the reconstruction very stable, I personally have not experienced any crashing issues in this situation.) 95 | 96 | When I conducted the experiments, for the sake of convenience in recording, the step of setting this parameter to 0 initially was implemented via the command line (essentially not using Event at the start), and it is not reflected in the code. You may need to run it via the command line too, or modify the train.py according to your needs. 97 | 98 | 3DGS is much more prone to crashing than NeRF in the differential aspect; strong differential weights or too many training epochs can lead to training failure. It’s especially important to pay attention to the setting of opacity_reset_interval, as this reset can easily cause subsequent training to stagnate, which is why I changed it to 10,000. The reason why 3DGS is more likely to crash than NeRF (in the context of events) remains an open question worth exploring. 99 | 100 | - `Event_sensor` was originally integrated here for event simulation during my experiments. However, I later separated the dataset creation process, so this folder is essentially deprecated. In fact, since the primary purpose of this code project is to validate the proposed algorithm, it’s not very engineering-focused and does not include one-click dataset generation. 101 | 102 | - Additionally, since this method is plug-and-play and does not require a training dataset, and I currently do not have a Google Drive account or similar for sharing, I have not provided the dataset. If needed, please discuss it in the issues. 103 | 104 | - `ViewDepth.py` is a file I use to read depth maps. Since the original 3DGS lacks the ability to generate depth maps, I added a depth generation feature (enabled with `--depth`) in the `render` function to create depth maps, although this was not included in the paper. In fact, I made significant modifications to `renders.py`, but they were only used for algorithm exploration 105 | 106 | - simulated event data mainly comes from: Dvs-voltmeter: Stochastic process-based event simulator for dynamic vision sensors. You can use it to generate your own testset. 107 | 108 | ## Acknowledgments 109 | We thank the authors of https://github.com/graphdeco-inria/gaussian-splatting and the other open-source libraries used in this work. 110 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/testing_effectiveness/searching_trigonometric_target.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: searching a moving target in spike cube based on distance metric, which's trajectories are trigonometric function. 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, Mar. 5th, 2019. 4 | 5 | """ 6 | 7 | import numpy as np 8 | import pickle 9 | from event_process import event_processing 10 | from spike_metric.cubes_3d_kernel import cubes_3d_kernel_distance 11 | from spike_metric.spike_cube_metric import cube_using_spike_train 12 | 13 | 14 | def initial_search_cube(initial_center, spike_cube, cube_width=10, cube_height=10, cube_length=100): 15 | """ 16 | Initial search spike cube. 17 | 18 | Inputs: 19 | ------- 20 | initial_center - the initial center in spike cube. 21 | spike_cube - the spike cube including events. 22 | 23 | Outputs: 24 | ------- 25 | initial_cube - the initial searching cube. 26 | """ 27 | 28 | initial_search_cube = spike_cube[int(initial_center[1] - cube_width/2):int(initial_center[1] + cube_width/2), 29 | int(initial_center[2] - cube_height/2):int(initial_center[2] + cube_height/2), 0:cube_length - 1] 30 | 31 | initial_cube = np.vstack((initial_search_cube[np.nonzero(initial_search_cube)], 32 | np.array(np.nonzero(initial_search_cube))[2], 33 | np.array(np.nonzero(initial_search_cube))[0], 34 | np.array(np.nonzero(initial_search_cube))[1])) 35 | 36 | return initial_cube 37 | 38 | 39 | def search_min_center(initial_center, initial_search_events, spike_cube, cube_width=10, cube_height=10, cube_length=100, search_width=8, search_height=8): 40 | """ 41 | Searching a moving target using distance metric, then return a minimum center. 42 | 43 | Inputs: 44 | ------- 45 | initial_center - the initial center in spike cube. 46 | spike_cube - the spike cube including events. 47 | cube_width, cube_height, cube_length - the width, height and length in the searching cube. 48 | search_width, search_height - the width and height of the searching area in spatial domain. 49 | 50 | Outputs: 51 | ------- 52 | minimum_center - the searching center in spike cube, which serves as initial center in next search. 53 | minimum_cube - the minimum distance cube, which serves for initial cube. 54 | 55 | """ 56 | search_areas = [] 57 | center_areas = [] 58 | 59 | # initial_search_cube = spike_cube[int(initial_center[1] - cube_width/2):int(initial_center[1] + cube_width/2), 60 | # int(initial_center[2] - cube_height/2):int(initial_center[2] + cube_height/2), 0:cube_length - 1] 61 | # 62 | # initial_search_events = np.vstack((initial_search_cube[np.nonzero(initial_search_cube)], 63 | # np.array(np.nonzero(initial_search_cube))[2], 64 | # np.array(np.nonzero(initial_search_cube))[0], 65 | # np.array(np.nonzero(initial_search_cube))[1])) 66 | 67 | # searching in designed areas. 68 | for i in range(-search_width, search_width): 69 | for j in range(-search_height, search_height): 70 | 71 | search_cube = spike_cube[int(initial_center[1] - cube_width / 2 + i):int(initial_center[1] + cube_width / 2 + i), 72 | int(initial_center[2] - cube_height / 2 + j):int(initial_center[2] + cube_height / 2 + j), 0:cube_length - 1] 73 | search_events = np.vstack((search_cube[np.nonzero(search_cube)], np.array(np.nonzero(search_cube))[2], 74 | np.array(np.nonzero(search_cube))[0], np.array(np.nonzero(search_cube))[1])) 75 | 76 | 77 | search_center = np.array([initial_center[0], initial_center[1] + i, initial_center[2] + j]) 78 | 79 | # print('search_t={}, search_x={}, search_y={}'.format(initial_center[0], initial_center[1] + i, initial_center[2] + j)) 80 | 81 | search_areas.append(search_events) 82 | center_areas.append(search_center) 83 | 84 | 85 | # computing distances and return center of the minimum distance. 86 | distances = np.zeros(len(search_areas)) 87 | for k in range(len(search_areas)): 88 | 89 | if len(search_areas[k][0]) == 0 or initial_search_events.shape[1] == 0: 90 | distances[k] = float('Inf') 91 | 92 | else: 93 | 94 | # distances[k] = cubes_3d_kernel_distance(initial_search_events, search_areas[k], cube_width, cube_height, cube_length) # spike cube using 3d kernel 95 | 96 | # print('initial_events_numbers={}, search_events_numbers={}'.format(len(search_areas[k][0]), initial_search_events.shape[1])) 97 | 98 | distances[k] = cube_using_spike_train(initial_search_events, search_areas[k], cube_width, cube_height, cube_length, cube_length) # spike train 99 | 100 | 101 | # select search center. 102 | index = np.argmin(abs(distances)) 103 | 104 | minimum_center = center_areas[index] 105 | minimum_cube = search_areas[index] 106 | 107 | if minimum_center.shape[0]==0: 108 | print('The search cube has been error!') 109 | 110 | return minimum_center, minimum_cube 111 | 112 | 113 | def search_trigonometric_trajectory(true_target, events, cube_width, cube_height, cube_length, search_width, search_height): 114 | """ 115 | Searching a moving target using distance metric, then return trajectory error. 116 | 117 | Inputs: 118 | ------- 119 | true_target - the ground truth of moving trajectory. 120 | events - events include polarity, timestamp, x and y. 121 | cube_width, cube_height, cube_length - the width, height and length in the searching cube. 122 | search_width, search_height - the width and height of the searching area in spatial domain. 123 | 124 | Outputs: 125 | ------- 126 | search_centers - the searching center in event stream. 127 | trajectory_errors - the errors of trajectory search. 128 | 129 | """ 130 | 131 | 132 | 133 | spike_cubes = event_processing.events_to_spike_cubes(events, 128, 128, 128, 128, cube_length) # events stream to spike cubes/frames. 134 | 135 | # initial search cube and center. 136 | first_spike_cube = event_processing.events_cube_matrix(spike_cubes[0], 128, 128, cube_length) 137 | initial_cube = initial_search_cube(true_target[:, 0], first_spike_cube, cube_width=cube_width, cube_height=cube_height, cube_length=cube_length) 138 | initial_center = true_target[:, 0] 139 | 140 | # search moving trajectory. 141 | search_centers = [] 142 | search_cubes = [] 143 | search_centers.append(initial_center) 144 | search_cubes.append(initial_cube) 145 | first_center = np.copy(initial_center) 146 | 147 | 148 | for i in range(1, len(spike_cubes)): 149 | 150 | spike_cube = event_processing.events_cube_matrix(spike_cubes[i], 128, 128, cube_length) 151 | search_center, search_cube = search_min_center(initial_center, initial_cube, spike_cube, cube_width=cube_width, 152 | cube_height=cube_height, cube_length=cube_length, search_width=search_width, search_height=search_height) 153 | 154 | # center operation. 155 | search_center[0] = search_center[0] + cube_length 156 | 157 | for j in range(1, 2): 158 | 159 | if abs(search_center[j] - first_center[j]) > cube_width + search_width: 160 | search_center[j] = first_center[j] 161 | 162 | # print('right_search_center={}'.format(search_center)) 163 | 164 | # recursive search. 165 | initial_center = search_center 166 | # initial_cube = search_cube 167 | 168 | search_centers.append(search_center) 169 | search_cubes.append(search_cube) 170 | 171 | return search_centers 172 | 173 | 174 | def computing_trajectory_error(true_target, search_centers): 175 | """ 176 | Searching a moving target using distance metric, then return trajectory error. 177 | 178 | Inputs: 179 | ------- 180 | true_target - the ground truth of moving trajectory, using numpy matrix. 181 | search_centers - the search trajectory using distance metrics. 182 | 183 | Outputs: 184 | ------- 185 | trajectory_errors - the errors of trajectory search. 186 | mean_error - the mean error of trajectory search. 187 | 188 | """ 189 | 190 | search_trajectory = np.zeros((3, len(search_centers))) 191 | 192 | for i in range(len(search_centers)): 193 | 194 | search_trajectory[:,i] = search_centers[i] 195 | 196 | trajectory_differences =np.vstack((true_target[0,:], true_target[1, :]- search_trajectory[1, :], true_target[2, :]- search_trajectory[2, :])) 197 | trajectory_errors = np.vstack((true_target[0, :], np.sqrt(np.square(trajectory_differences[1, :])+np.square(trajectory_differences[2, :])))) 198 | mean_error = np.mean(trajectory_errors[1,:]) 199 | 200 | return trajectory_errors, mean_error 201 | -------------------------------------------------------------------------------- /Event_sensor/src/event_buffer_old.py: -------------------------------------------------------------------------------- 1 | # from ICNS 2 | import numpy as np 3 | import os.path 4 | from dat_files import write_event_dat, write_event_csv, load_dat_event 5 | 6 | 7 | class EventBuffer(): 8 | """ Structure to handle a buffer of dvs events """ 9 | x = 0 # Array of x values 10 | y = 0 # Array of y values 11 | ts = 0 # Array of timestamps values (us) 12 | p = 0 # Array of polarity values (0 negative, 1 positive) 13 | i = 0 # Position of the next event 14 | 15 | def __init__(self, size): 16 | """ Resize the buffers 17 | Args: 18 | size: size of the new buffer, Minimum: 1 19 | """ 20 | if isinstance(size, str): 21 | ts, x, y, pol = load_dat_event(size) 22 | self.ts = np.array(ts, dtype=np.uint64) 23 | self.x = np.array(x, dtype=np.uint16) 24 | self.y = np.array(y, dtype=np.uint16) 25 | self.p = np.array(pol, dtype=np.uint8) 26 | self.i = ts.shape[0] 27 | else: 28 | if size == 0: 29 | size = 1 30 | self.x = np.zeros(size, dtype=np.uint16) 31 | self.y = np.zeros(size, dtype=np.uint16) 32 | self.p = np.zeros(size, dtype=np.uint8) 33 | self.ts = np.zeros(size, dtype=np.uint64) 34 | 35 | def get_x(self): 36 | return self.x[:self.i] 37 | 38 | def get_y(self): 39 | return self.y[:self.i] 40 | 41 | def get_p(self): 42 | return self.p[:self.i] 43 | 44 | def get_ts(self): 45 | return self.ts[:self.i] 46 | 47 | def increase(self, nsize): 48 | """ Increase the size of a buffer to self.shape[0] size + nsize 49 | Args: 50 | nsize: number of free space added 51 | """ 52 | prev_shape = self.x.shape[0] 53 | x = np.zeros(prev_shape + nsize, dtype=np.uint16) 54 | y = np.zeros(prev_shape + nsize, dtype=np.uint16) 55 | p = np.zeros(prev_shape + nsize, dtype=np.uint8) 56 | ts = np.zeros(prev_shape + nsize, dtype=np.uint64) 57 | x[:prev_shape] = self.x 58 | y[:prev_shape] = self.y 59 | p[:prev_shape] = self.p 60 | ts[:prev_shape] = self.ts 61 | self.x = x 62 | self.y = y 63 | self.p = p 64 | self.ts = ts 65 | 66 | def remove_time(self, t_min, t_max): 67 | ind = np.where((self.ts < t_min)|(self.ts > t_max)) 68 | self.x = np.delete(self.x, ind) 69 | self.y = np.delete(self.y, ind) 70 | self.ts = np.delete(self.ts, ind) 71 | self.p = np.delete(self.p, ind) 72 | self.i = self.ts.shape[0] 73 | 74 | def remove_elt(self, nsize): 75 | """ 76 | Remove the nsize first elements 77 | """ 78 | if self.i - nsize < 0: 79 | nsize = self.i 80 | ind = np.arange(0, nsize, 1) 81 | self.i = self.i-nsize 82 | self.x = np.delete(self.x, ind) 83 | self.y = np.delete(self.y, ind) 84 | self.ts = np.delete(self.ts, ind) 85 | self.p = np.delete(self.p, ind) 86 | 87 | def remove_ev(self, p): 88 | """ 89 | Remove the event at the position p 90 | """ 91 | if self.i <= p: 92 | return 93 | self.x = np.delete(self.x, p) 94 | self.y = np.delete(self.y, p) 95 | self.ts = np.delete(self.ts, p) 96 | self.p = np.delete(self.p, p) 97 | 98 | def remove_row(self, r, t): 99 | """ 100 | Remove the event in the row r at the time t 101 | """ 102 | if t == -1: 103 | ind = np.where((self.y == r) & (self.ts > 0)) 104 | else: 105 | ind = np.where((self.y == r) & (self.ts < t) & (self.ts > 0)) 106 | self.i -= ind[0].shape[0] 107 | self.x = np.delete(self.x, ind) 108 | self.y = np.delete(self.y, ind) 109 | self.ts = np.delete(self.ts, ind) 110 | self.p = np.delete(self.p, ind) 111 | 112 | def increase_ev(self, ev): 113 | """ Increase the event buffer with anotehr event buffer 114 | If ev can be inserted into self, ev inserted, if not, increase the size of a buffer to original 115 | self.shape[0] + ev.shape[0] 116 | Args: 117 | ev: the EventBuffer added 118 | """ 119 | if len(self.x) > 0 and not ev is None: 120 | if self.i + ev.x.shape[0] > self.x.shape[0] - 1: 121 | prev_shape = self.x.shape[0] 122 | x = np.zeros(prev_shape + ev.ts.shape[0], dtype=np.uint16) 123 | y = np.zeros(prev_shape + ev.ts.shape[0], dtype=np.uint16) 124 | p = np.zeros(prev_shape + ev.ts.shape[0], dtype=np.uint8) 125 | ts = np.zeros(prev_shape + ev.ts.shape[0], dtype=np.uint64) 126 | x[:self.i] = self.x[:self.i] 127 | y[:self.i] = self.y[:self.i] 128 | p[:self.i] = self.p[:self.i] 129 | ts[:self.i] = self.ts[:self.i] 130 | x[self.i:self.i + ev.x.shape[0]] = ev.x 131 | y[self.i:self.i + ev.x.shape[0]] = ev.y 132 | p[self.i:self.i + ev.x.shape[0]] = ev.p 133 | ts[self.i:self.i + ev.x.shape[0]] = ev.ts 134 | self.x = x 135 | self.y = y 136 | self.p = p 137 | self.ts = ts 138 | else: 139 | self.x[self.i:self.i + ev.i] = ev.x[:ev.i] 140 | self.y[self.i:self.i + ev.i] = ev.y[:ev.i] 141 | self.p[self.i:self.i + ev.i] = ev.p[:ev.i] 142 | self.ts[self.i:self.i + ev.i] = ev.ts[:ev.i] 143 | self.i += ev.i 144 | 145 | def copy(self, i1, ep, i2): 146 | """ Copy the i2 th event of the EventBuffer ep in to the i1 th position 147 | Args: 148 | i1: self will have a new event in i1 149 | ep: EventBuffer where the event comes from 150 | i2: i2th event from ep is takem 151 | """ 152 | if i1 < len(self.x): 153 | self.x[i1] = ep.x[i2] 154 | self.y[i1] = ep.y[i2] 155 | self.ts[i1] = ep.ts[i2] 156 | self.p[i1] = ep.p[i2] 157 | self.i = i1 + 1 158 | 159 | def merge(self, ep1, ep2): 160 | """ Resize tje EventBuffer and merge into the two EventBuffer ep1 nd ep2, sorted them with their timestamps 161 | Args: 162 | ep1, ep2: eventBuffer 163 | """ 164 | self.__init__(len(ep1.x) + len(ep2.x)) 165 | i1 = 0 166 | i2 = 0 167 | for j in range(0, ep1.i + ep2.i, 1): 168 | if i1 == ep1.i: 169 | self.copy(j, ep2, i2) 170 | i2 += 1 171 | elif i2 == ep2.i: 172 | self.copy(j, ep1, i1) 173 | i1 += 1 174 | else: 175 | if ep1.ts[i1] < ep2.ts[i2]: 176 | self.copy(j, ep1, i1) 177 | i1 += 1 178 | else: 179 | self.copy(j, ep2, i2) 180 | i2 += 1 181 | self.i = ep1.i + ep2.i 182 | 183 | def sort(self): 184 | """ Sort the EventBuffer according to its timestamp """ 185 | ind = np.argsort(self.ts[:self.i]) 186 | self.ts[:self.i] = self.ts[:self.i][ind] 187 | self.x[:self.i] = self.x[:self.i][ind] 188 | self.y[:self.i] = self.y[:self.i][ind] 189 | self.p[:self.i] = self.p[:self.i][ind] 190 | 191 | def add(self, ts, y, x, p): 192 | """ 193 | Add an event (ts, x, y, p) the the EventBuffer (push strategy) 194 | If y == -1, if means that x[0} contains the x position and x[1] the y's one 195 | Args: 196 | ts, y, x, p: new event array 197 | """ 198 | if self.x.shape[0] == self.i: 199 | self.increase(1000) 200 | self.add(ts, y, x, p) 201 | else: 202 | self.ts[self.i] = ts 203 | self.x[self.i] = x 204 | self.y[self.i] = y 205 | self.p[self.i] = p 206 | self.i += 1 207 | 208 | def add_array(self, ts, y, x, p, inc=1000): 209 | """ 210 | Add n event (ts, x, y, p) the the EventBuffer (push strategy) 211 | Args: 212 | ts, y, x, p: new event array 213 | inc: increment size 214 | """ 215 | s = len(ts) 216 | if s > len(self.ts) - self.i: 217 | self.increase(inc) 218 | self.add_array(ts, y, x, p) 219 | else: 220 | self.ts[self.i:self.i+s] = ts 221 | self.x[self.i:self.i+s] = x 222 | self.y[self.i:self.i+s] = y 223 | self.p[self.i:self.i+s] = p 224 | self.i += s 225 | 226 | def write(self, filename, event_type='dvs', width=None, height=None): 227 | """ Write the events into a .dat or .es file 228 | Args: 229 | filename: path of the file 230 | """ 231 | # sort events to have a monotonically timestamps 232 | self.sort() 233 | 234 | ext = os.path.splitext(filename)[1] 235 | if ext == '.dat': 236 | write_event_dat(filename, self.ts[:self.i], self.x[:self.i], 237 | self.y[:self.i], self.p[:self.i], 238 | event_type=event_type, width=width, height=height) 239 | # elif ext == '.es': 240 | # write_event_es(filename, self.ts[:self.i], self.x[:self.i], 241 | # self.y[:self.i], self.p[:self.i], 242 | # event_type=event_type, width=width, height=height) 243 | elif ext == '.csv': 244 | write_event_csv(filename, self.ts[:self.i], self.x[:self.i], 245 | self.y[:self.i], self.p[:self.i]) 246 | -------------------------------------------------------------------------------- /Event_sensor/src/asynchronous-spatio-temporal-spike-metric-master/event_process/event_processing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Function: Events processing basic library for dynamic vision sensor(DVS). 3 | Author: Jianing Li, lijianing@pku.edu.cn, Peking University, May 10th, 2018 4 | """ 5 | 6 | import numpy as np 7 | import math 8 | 9 | def aer_events(aer_data): 10 | """ 11 | aer_data to events in dynamic vision sensor. 12 | 13 | Inputs: 14 | ------- 15 | aer_data - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 16 | 17 | Outputs: 18 | ------- 19 | events - the Matrix dataset. 20 | 21 | """ 22 | events = np.zeros((4, len(aer_data.ts))) 23 | events[0, :] = aer_data.t *2 -1 24 | events[1, :] = aer_data.ts 25 | events[2, :] = aer_data.x 26 | events[3, :] = aer_data.y 27 | 28 | return events 29 | 30 | 31 | def events_to_cubes(events, time_interval): 32 | """ 33 | events are split into cubes. 34 | 35 | Inputs: 36 | ------- 37 | events - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 38 | time_interval - the split time interval. 39 | Outputs: 40 | ------- 41 | events_cubes - the cubes of events. 42 | 43 | """ 44 | 45 | events_cubes = [[] for _ in range(math.ceil(max(events[1, :] / time_interval)))] 46 | 47 | for i in range(events.shape[1]): 48 | k = math.floor(events[1, i] / time_interval) 49 | events_cubes[k].append(events[:, i]) 50 | 51 | return events_cubes 52 | 53 | 54 | def events_to_spike_cubes(events, width, height, x_cube_size, y_cube_size, t_cube_size): 55 | """ 56 | events are split into spike cubes. 57 | 58 | Inputs: 59 | ------- 60 | events - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 61 | width, height - the width and height resolutions of dynamic vision sensor. 62 | x_cube_size, y_cube_size, t_cube_size - the width, height and temporal size of spike cubes. 63 | Outputs: 64 | ------- 65 | events_cubes - the cubes of events. 66 | 67 | """ 68 | 69 | num = int((width/x_cube_size)*(height/y_cube_size)*(math.ceil(max(events[1, :] / t_cube_size)))) 70 | events_cube = [[] for _ in range(num)] 71 | #print('num={}'.format(num)) 72 | 73 | for i in range(events.shape[1]): 74 | 75 | k = math.floor(events[2, i]/x_cube_size) + math.floor(events[3, i]/y_cube_size)*int(width/x_cube_size) + math.floor(events[1, i]/t_cube_size)*int(width/x_cube_size)*int(height/y_cube_size) 76 | 77 | #print('t_cube_size={}, k={}, i={}, event={}, feature={}'.format(t_cube_size, k, i, events[:, i], math.floor(events[1, i]/t_cube_size))) 78 | events_cube[k].append(events[:, i]) 79 | 80 | 81 | return events_cube 82 | 83 | 84 | def events_cube_matrix(events_cube, width, height, time_interval): 85 | """ 86 | events are split into cubes. 87 | 88 | Inputs: 89 | ------- 90 | events_cube - the events in time interval. 91 | width - the width of AER sensor. 92 | height - the height of AER sensor. 93 | time_interval - the time interval. 94 | Outputs: 95 | ------- 96 | spike_cube - the spike cube of events. 97 | 98 | """ 99 | spike_cube = np.zeros((width, height, time_interval)) 100 | 101 | for i in range(len(events_cube)): 102 | 103 | # if int(events_cube[i][2])==0 or int(events_cube[i][3]) == 0: 104 | # print('x={}'.format(events_cube[i][2])) 105 | # print('y={}'.format(events_cube[i][3])) 106 | 107 | # print('x={},y={},t={},p={}'.format(int(events_cube[i][2]%width-1), int(events_cube[i][3]%height-1), int(events_cube[i][1] - events_cube[0][1]), events_cube[i][0])) 108 | 109 | spike_cube[int(events_cube[i][2]%width-1), int(events_cube[i][3]%height-1), int(events_cube[i][1] - events_cube[0][1])] = events_cube[i][0] 110 | 111 | 112 | return spike_cube 113 | 114 | 115 | def events_to_cube(events_cube, width, height, time_interval): 116 | """ 117 | events are split into cubes. 118 | 119 | Inputs: 120 | ------- 121 | events_cube - the events in time interval. 122 | width - the width of AER sensor. 123 | height - the height of AER sensor. 124 | time_interval - the time interval. 125 | Outputs: 126 | ------- 127 | spike_cube - the spike cube of events. 128 | 129 | """ 130 | spike_cube = np.zeros((width, height, time_interval)) 131 | 132 | for i in range(events_cube.shape[1]): 133 | 134 | # if int(events_cube[2][i])==0 or int(events_cube[3][i]) == 0: 135 | # print('x={}'.format(events_cube[2][i])) 136 | # print('y={}'.format(events_cube[3][i])) 137 | 138 | spike_cube[int(events_cube[2][i]%width-1), int(events_cube[3][i]%height-1), int(events_cube[1][i] - events_cube[1][0])] = events_cube[0][i] 139 | 140 | 141 | return spike_cube 142 | 143 | 144 | def aer_on_off_events(aer_data): 145 | """ 146 | separation ON & OFF events in dynamic vision sensor. 147 | 148 | Inputs: 149 | ------- 150 | aer_data - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 151 | 152 | Outputs: 153 | ------- 154 | events_ON - ON events in the increasing intensity. 155 | events_OFF - OFF events in the decreasing intensity. 156 | 157 | """ 158 | events_ON = np.zeros((4, np.count_nonzero(aer_data.t == 1))) 159 | events_OFF = np.zeros((4, np.count_nonzero(aer_data.t == 0))) 160 | index_ON = np.where(aer_data.t == 1)[0] 161 | index_OFF = np.where(aer_data.t == 0)[0] 162 | 163 | # save ON events for AER sensor 164 | events_ON[0, :] = aer_data.t[index_ON] 165 | events_ON[1, :] = aer_data.ts[index_ON] 166 | events_ON[2, :] = aer_data.x[index_ON] 167 | events_ON[3, :] = aer_data.y[index_ON] 168 | 169 | # save ON events for AER sensor 170 | events_OFF[0, :] = aer_data.t[index_OFF] 171 | events_OFF[1, :] = aer_data.ts[index_OFF] 172 | events_OFF[2, :] = aer_data.x[index_OFF] 173 | events_OFF[3, :] = aer_data.y[index_OFF] 174 | 175 | return events_ON, events_OFF 176 | 177 | 178 | def On_off_events(events): 179 | """ 180 | separation ON & OFF events in dynamic vision sensor. 181 | 182 | Inputs: 183 | ------- 184 | events - the matrix dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 185 | 186 | Outputs: 187 | ------- 188 | events_ON - ON events in the increasing intensity. 189 | events_OFF - OFF events in the decreasing intensity. 190 | 191 | """ 192 | events_ON = np.zeros((4, np.count_nonzero(events[0, :] == 1))) 193 | events_OFF = np.zeros((4, np.count_nonzero(events[0, :] == -1))) 194 | index_ON = np.where(events[0, :] == 1)[0] 195 | index_OFF = np.where(events[0, :] == -1)[0] 196 | 197 | # save ON events for AER sensor 198 | events_ON[0, :] = events[0, :][index_ON] 199 | events_ON[1, :] = events[1, :][index_ON] 200 | events_ON[2, :] = events[2, :][index_ON] 201 | events_ON[3, :] = events[3, :][index_ON] 202 | 203 | # save ON events for AER sensor 204 | events_OFF[0, :] = events[0, :][index_OFF] 205 | events_OFF[1, :] = events[1, :][index_OFF] 206 | events_OFF[2, :] = events[2, :][index_OFF] 207 | events_OFF[3, :] = events[3, :][index_OFF] 208 | 209 | return events_ON, events_OFF 210 | 211 | 212 | def select_aer_events(events, time_length): 213 | """ 214 | Selecting time length for aer_data to events in dynamic vision sensor. 215 | 216 | Inputs: 217 | ------- 218 | events - the matrix dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 219 | time_length - the selecting time length. 220 | 221 | Outputs: 222 | ------- 223 | select_aer_data - the selecting aer data. 224 | 225 | """ 226 | index = np.searchsorted(events[1, :], time_length) 227 | 228 | select_aer_data = events[:, 0:index] 229 | 230 | return select_aer_data 231 | 232 | 233 | def separating_on_off_events(aer_data): 234 | """ 235 | separation ON & OFF events in dynamic vision sensor. 236 | 237 | Inputs: 238 | ------- 239 | aer_data - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 240 | 241 | Outputs: 242 | ------- 243 | events_ON - ON events in the increasing intensity. 244 | events_OFF - OFF events in the decreasing intensity. 245 | 246 | """ 247 | events_ON = np.zeros((4, np.count_nonzero(aer_data.t == 1))) 248 | events_OFF = np.zeros((4, np.count_nonzero(aer_data.t == 0))) 249 | index_ON = np.where(aer_data.t == 1)[0] 250 | index_OFF = np.where(aer_data.t == 0)[0] 251 | 252 | # save ON events for AER sensor 253 | events_ON[0, :] = aer_data.t[index_ON] 254 | events_ON[1, :] = aer_data.ts[index_ON] 255 | events_ON[2, :] = aer_data.x[index_ON] 256 | events_ON[3, :] = aer_data.y[index_ON] 257 | 258 | # save ON events for AER sensor 259 | events_OFF[0, :] = aer_data.t[index_OFF] 260 | events_OFF[1, :] = aer_data.ts[index_OFF] 261 | events_OFF[2, :] = aer_data.x[index_OFF] 262 | events_OFF[3, :] = aer_data.y[index_OFF] 263 | 264 | return events_ON, events_OFF 265 | 266 | def separating_ON_OFF_events(events): 267 | """ 268 | separation ON & OFF events in dynamic vision sensor. 269 | 270 | Inputs: 271 | ------- 272 | events - the dataset of AER sensor including polarity(t), timestamp(ts), x coordinate(X) and y coordinate(Y). 273 | 274 | Outputs: 275 | ------- 276 | events_ON - ON events in the increasing intensity. 277 | events_OFF - OFF events in the decreasing intensity. 278 | 279 | """ 280 | index_ON = np.where(events[3,:]==1) 281 | index_OFF = np.where(events[3,:]==-1) 282 | ON_events = events[:, index_ON] 283 | OFF_events = events[:, index_OFF] 284 | 285 | return ON_events, OFF_events --------------------------------------------------------------------------------