├── .vscode ├── launch.json ├── settings.json └── tasks.json ├── CMakeLists.txt ├── README.md ├── buffer.cpp ├── buffer.hpp ├── buffer_pool.cpp ├── buffer_pool.hpp ├── cub-1.8.0 ├── .cproject ├── .project ├── .settings │ ├── .gitignore │ ├── org.eclipse.cdt.codan.core.prefs │ ├── org.eclipse.cdt.core.prefs │ ├── org.eclipse.cdt.ui.prefs │ └── org.eclipse.core.runtime.prefs ├── CHANGE_LOG.TXT ├── LICENSE.TXT ├── README.md ├── common.mk ├── cub │ ├── agent │ │ ├── agent_histogram.cuh │ │ ├── agent_radix_sort_downsweep.cuh │ │ ├── agent_radix_sort_upsweep.cuh │ │ ├── agent_reduce.cuh │ │ ├── agent_reduce_by_key.cuh │ │ ├── agent_rle.cuh │ │ ├── agent_scan.cuh │ │ ├── agent_segment_fixup.cuh │ │ ├── agent_select_if.cuh │ │ ├── agent_spmv_orig.cuh │ │ └── single_pass_scan_operators.cuh │ ├── block │ │ ├── block_adjacent_difference.cuh │ │ ├── block_discontinuity.cuh │ │ ├── block_exchange.cuh │ │ ├── block_histogram.cuh │ │ ├── block_load.cuh │ │ ├── block_radix_rank.cuh │ │ ├── block_radix_sort.cuh │ │ ├── block_raking_layout.cuh │ │ ├── block_reduce.cuh │ │ ├── block_scan.cuh │ │ ├── block_shuffle.cuh │ │ ├── block_store.cuh │ │ └── specializations │ │ │ ├── block_histogram_atomic.cuh │ │ │ ├── block_histogram_sort.cuh │ │ │ ├── block_reduce_raking.cuh │ │ │ ├── block_reduce_raking_commutative_only.cuh │ │ │ ├── block_reduce_warp_reductions.cuh │ │ │ ├── block_scan_raking.cuh │ │ │ ├── block_scan_warp_scans.cuh │ │ │ ├── block_scan_warp_scans2.cuh │ │ │ └── block_scan_warp_scans3.cuh │ ├── cub.cuh │ ├── device │ │ ├── device_histogram.cuh │ │ ├── device_partition.cuh │ │ ├── device_radix_sort.cuh │ │ ├── device_reduce.cuh │ │ ├── device_run_length_encode.cuh │ │ ├── device_scan.cuh │ │ ├── device_segmented_radix_sort.cuh │ │ ├── device_segmented_reduce.cuh │ │ ├── device_select.cuh │ │ ├── device_spmv.cuh │ │ └── dispatch │ │ │ ├── dispatch_histogram.cuh │ │ │ ├── dispatch_radix_sort.cuh │ │ │ ├── dispatch_reduce.cuh │ │ │ ├── dispatch_reduce_by_key.cuh │ │ │ ├── dispatch_rle.cuh │ │ │ ├── dispatch_scan.cuh │ │ │ ├── dispatch_select_if.cuh │ │ │ └── dispatch_spmv_orig.cuh │ ├── grid │ │ ├── grid_barrier.cuh │ │ ├── grid_even_share.cuh │ │ ├── grid_mapping.cuh │ │ └── grid_queue.cuh │ ├── host │ │ └── mutex.cuh │ ├── iterator │ │ ├── arg_index_input_iterator.cuh │ │ ├── cache_modified_input_iterator.cuh │ │ ├── cache_modified_output_iterator.cuh │ │ ├── constant_input_iterator.cuh │ │ ├── counting_input_iterator.cuh │ │ ├── discard_output_iterator.cuh │ │ ├── tex_obj_input_iterator.cuh │ │ ├── tex_ref_input_iterator.cuh │ │ └── transform_input_iterator.cuh │ ├── thread │ │ ├── thread_load.cuh │ │ ├── thread_operators.cuh │ │ ├── thread_reduce.cuh │ │ ├── thread_scan.cuh │ │ ├── thread_search.cuh │ │ └── thread_store.cuh │ ├── util_allocator.cuh │ ├── util_arch.cuh │ ├── util_debug.cuh │ ├── util_device.cuh │ ├── util_macro.cuh │ ├── util_namespace.cuh │ ├── util_ptx.cuh │ ├── util_type.cuh │ └── warp │ │ ├── specializations │ │ ├── warp_reduce_shfl.cuh │ │ ├── warp_reduce_smem.cuh │ │ ├── warp_scan_shfl.cuh │ │ └── warp_scan_smem.cuh │ │ ├── warp_reduce.cuh │ │ └── warp_scan.cuh ├── eclipse code style profile.xml ├── examples │ ├── block │ │ ├── .gitignore │ │ ├── Makefile │ │ ├── example_block_radix_sort.cu │ │ ├── example_block_reduce.cu │ │ ├── example_block_scan.cu │ │ └── reduce_by_key.cu │ └── device │ │ ├── .gitignore │ │ ├── Makefile │ │ ├── example_device_partition_flagged.cu │ │ ├── example_device_partition_if.cu │ │ ├── example_device_radix_sort.cu │ │ ├── example_device_reduce.cu │ │ ├── example_device_scan.cu │ │ ├── example_device_select_flagged.cu │ │ ├── example_device_select_if.cu │ │ ├── example_device_select_unique.cu │ │ └── example_device_sort_find_non_trivial_runs.cu ├── experimental │ ├── .gitignore │ ├── Makefile │ ├── defunct │ │ ├── example_coo_spmv.cu │ │ └── test_device_seg_reduce.cu │ ├── histogram │ │ ├── histogram_cub.h │ │ ├── histogram_gmem_atomics.h │ │ └── histogram_smem_atomics.h │ ├── histogram_compare.cu │ ├── sparse_matrix.h │ ├── spmv_compare.cu │ └── spmv_script.sh ├── test │ ├── .gitignore │ ├── Makefile │ ├── half.h │ ├── link_a.cu │ ├── link_b.cu │ ├── link_main.cpp │ ├── mersenne.h │ ├── test_allocator.cu │ ├── test_block_histogram.cu │ ├── test_block_load_store.cu │ ├── test_block_radix_sort.cu │ ├── test_block_reduce.cu │ ├── test_block_scan.cu │ ├── test_device_histogram.cu │ ├── test_device_radix_sort.cu │ ├── test_device_reduce.cu │ ├── test_device_reduce_by_key.cu │ ├── test_device_run_length_encode.cu │ ├── test_device_scan.cu │ ├── test_device_select_if.cu │ ├── test_device_select_unique.cu │ ├── test_grid_barrier.cu │ ├── test_iterator.cu │ ├── test_util.h │ ├── test_warp_reduce.cu │ └── test_warp_scan.cu └── tune │ ├── .gitignore │ ├── Makefile │ └── tune_device_reduce.cu ├── cuda_impl ├── convert_cuda_impl.cu ├── convert_cuda_impl.hpp ├── nonzero_cuda_impl.cu └── nonzero_cuda_impl.hpp ├── cuda_runtime.cpp ├── cuda_runtime.hpp ├── example ├── alexnet │ ├── generate_alexnet_onnx.py │ └── onnx_to_tensorrt.sh ├── alexnet_example.cpp ├── hfnet_example.cpp ├── lenet │ ├── __pycache__ │ │ └── lenet5.cpython-37.pyc │ ├── generate_lenet5_onnx.py │ ├── lenet.onnx │ ├── lenet5.py │ ├── lenet_simplify.onnx │ ├── net.engine │ ├── net_graph.json │ ├── net_inference.json │ ├── net_weights.bin │ └── onnx_to_tensorrt.sh ├── lenet_example.cpp ├── mobilenet_v2 │ ├── generate_mobilenet_onnx.py │ └── onnx_to_tensorrt.sh ├── mobilenet_v2_example.cpp ├── resnet │ ├── generate_resnet18_onnx.py │ ├── generate_resnet50_onnx.py │ └── onnx_to_tensorrt.sh ├── resnet_example.cpp ├── squeezenet │ ├── generate_squeezenet_onnx.py │ └── onnx_to_tensorrt.sh ├── squeezenet_example.cpp ├── vgg │ ├── generate_vgg_onnx.py │ └── onnx_to_tensorrt.sh ├── vgg_example.cpp ├── yolov3 │ ├── README.md │ ├── bus.jpg │ ├── detect_result.jpg │ ├── net_inference.json │ ├── onnx_to_tensorrt.sh │ ├── simplify_yolov3_onnx.py │ ├── simplify_yolov3_spp_onnx.py │ └── simplify_yolov3_tiny_onnx.py ├── yolov3_example.cpp ├── yolov4 │ ├── README.md │ ├── models.py │ ├── onnx_to_tensorrt.sh │ └── simplify_yolov4_onnx.py ├── yolov4_example.cpp ├── yolov5 │ ├── README.md │ ├── onnx_to_tensorrt.sh │ ├── simplify_yolov5l_onnx.py │ ├── simplify_yolov5m_onnx.py │ ├── simplify_yolov5s_onnx.py │ └── simplify_yolov5x_onnx.py └── yolov5_example.cpp ├── execution_info ├── dataformat_convert_execution_info.cu ├── dataformat_convert_execution_info.hpp ├── datatype_convert_execution_info.cu ├── datatype_convert_execution_info.hpp ├── execution_info.cpp ├── execution_info.hpp ├── normalization_execution_info.cu ├── normalization_execution_info.hpp ├── onnx_model_execution_info.cpp ├── onnx_model_execution_info.hpp ├── reshape_execution_info.cu ├── reshape_execution_info.hpp ├── transpose_execution_info.cu ├── transpose_execution_info.hpp ├── yolo_nms_execution_info.cu └── yolo_nms_execution_info.hpp ├── execution_parse.cpp ├── execution_parse.hpp ├── jsoncpp-00.11.0 ├── .clang-format ├── .clang-tidy ├── .gitattributes ├── .github │ └── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ └── feature_request.md ├── .gitignore ├── .travis.yml ├── .travis_scripts │ ├── cmake_builder.sh │ ├── meson_builder.sh │ ├── run-clang-format.py │ ├── run-clang-format.sh │ ├── travis.before_install.linux.sh │ ├── travis.before_install.osx.sh │ ├── travis.install.linux.sh │ └── travis.install.osx.sh ├── AUTHORS ├── CMakeLists.txt ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── amalgamate.py ├── appveyor.yml ├── dev.makefile ├── devtools │ ├── __init__.py │ ├── agent_vmw7.json │ ├── agent_vmxp.json │ ├── antglob.py │ ├── batchbuild.py │ ├── fixeol.py │ ├── licenseupdater.py │ └── tarball.py ├── doc │ ├── doxyfile.in │ ├── footer.html │ ├── header.html │ ├── jsoncpp.dox │ ├── readme.txt │ ├── roadmap.dox │ └── web_doxyfile.in ├── doxybuild.py ├── example │ ├── CMakeLists.txt │ ├── README.md │ ├── readFromStream │ │ ├── errorFormat.json │ │ ├── readFromStream.cpp │ │ └── withComment.json │ ├── readFromString │ │ └── readFromString.cpp │ ├── streamWrite │ │ └── streamWrite.cpp │ └── stringWrite │ │ └── stringWrite.cpp ├── include │ ├── CMakeLists.txt │ └── json │ │ ├── allocator.h │ │ ├── assertions.h │ │ ├── config.h │ │ ├── forwards.h │ │ ├── json.h │ │ ├── json_features.h │ │ ├── reader.h │ │ ├── value.h │ │ ├── version.h │ │ └── writer.h ├── meson.build ├── meson_options.txt ├── pkg-config │ └── jsoncpp.pc.in ├── src │ ├── CMakeLists.txt │ ├── jsontestrunner │ │ ├── CMakeLists.txt │ │ └── main.cpp │ ├── lib_json │ │ ├── CMakeLists.txt │ │ ├── json_reader.cpp │ │ ├── json_tool.h │ │ ├── json_value.cpp │ │ ├── json_valueiterator.inl │ │ └── json_writer.cpp │ └── test_lib_json │ │ ├── CMakeLists.txt │ │ ├── fuzz.cpp │ │ ├── fuzz.dict │ │ ├── fuzz.h │ │ ├── jsontest.cpp │ │ ├── jsontest.h │ │ └── main.cpp ├── test │ ├── cleantests.py │ ├── data │ │ ├── fail_test_array_01.json │ │ ├── fail_test_array_02.json │ │ ├── fail_test_object_01.json │ │ ├── fail_test_stack_limit.json │ │ ├── legacy_test_array_01.expected │ │ ├── legacy_test_array_01.json │ │ ├── legacy_test_array_02.expected │ │ ├── legacy_test_array_02.json │ │ ├── legacy_test_array_03.expected │ │ ├── legacy_test_array_03.json │ │ ├── legacy_test_array_04.expected │ │ ├── legacy_test_array_04.json │ │ ├── legacy_test_array_05.expected │ │ ├── legacy_test_array_05.json │ │ ├── legacy_test_array_06.expected │ │ ├── legacy_test_array_06.json │ │ ├── legacy_test_array_07.expected │ │ ├── legacy_test_array_07.json │ │ ├── legacy_test_basic_01.expected │ │ ├── legacy_test_basic_01.json │ │ ├── legacy_test_basic_02.expected │ │ ├── legacy_test_basic_02.json │ │ ├── legacy_test_basic_03.expected │ │ ├── legacy_test_basic_03.json │ │ ├── legacy_test_basic_04.expected │ │ ├── legacy_test_basic_04.json │ │ ├── legacy_test_basic_05.expected │ │ ├── legacy_test_basic_05.json │ │ ├── legacy_test_basic_06.expected │ │ ├── legacy_test_basic_06.json │ │ ├── legacy_test_basic_07.expected │ │ ├── legacy_test_basic_07.json │ │ ├── legacy_test_basic_08.expected │ │ ├── legacy_test_basic_08.json │ │ ├── legacy_test_basic_09.expected │ │ ├── legacy_test_basic_09.json │ │ ├── legacy_test_comment_00.expected │ │ ├── legacy_test_comment_00.json │ │ ├── legacy_test_comment_01.expected │ │ ├── legacy_test_comment_01.json │ │ ├── legacy_test_comment_02.expected │ │ ├── legacy_test_comment_02.json │ │ ├── legacy_test_complex_01.expected │ │ ├── legacy_test_complex_01.json │ │ ├── legacy_test_integer_01.expected │ │ ├── legacy_test_integer_01.json │ │ ├── legacy_test_integer_02.expected │ │ ├── legacy_test_integer_02.json │ │ ├── legacy_test_integer_03.expected │ │ ├── legacy_test_integer_03.json │ │ ├── legacy_test_integer_04.expected │ │ ├── legacy_test_integer_04.json │ │ ├── legacy_test_integer_05.expected │ │ ├── legacy_test_integer_05.json │ │ ├── legacy_test_integer_06_64bits.expected │ │ ├── legacy_test_integer_06_64bits.json │ │ ├── legacy_test_integer_07_64bits.expected │ │ ├── legacy_test_integer_07_64bits.json │ │ ├── legacy_test_integer_08_64bits.expected │ │ ├── legacy_test_integer_08_64bits.json │ │ ├── legacy_test_large_01.expected │ │ ├── legacy_test_large_01.json │ │ ├── legacy_test_object_01.expected │ │ ├── legacy_test_object_01.json │ │ ├── legacy_test_object_02.expected │ │ ├── legacy_test_object_02.json │ │ ├── legacy_test_object_03.expected │ │ ├── legacy_test_object_03.json │ │ ├── legacy_test_object_04.expected │ │ ├── legacy_test_object_04.json │ │ ├── legacy_test_preserve_comment_01.expected │ │ ├── legacy_test_preserve_comment_01.json │ │ ├── legacy_test_real_01.expected │ │ ├── legacy_test_real_01.json │ │ ├── legacy_test_real_02.expected │ │ ├── legacy_test_real_02.json │ │ ├── legacy_test_real_03.expected │ │ ├── legacy_test_real_03.json │ │ ├── legacy_test_real_04.expected │ │ ├── legacy_test_real_04.json │ │ ├── legacy_test_real_05.expected │ │ ├── legacy_test_real_05.json │ │ ├── legacy_test_real_06.expected │ │ ├── legacy_test_real_06.json │ │ ├── legacy_test_real_07.expected │ │ ├── legacy_test_real_07.json │ │ ├── legacy_test_real_08.expected │ │ ├── legacy_test_real_08.json │ │ ├── legacy_test_real_09.expected │ │ ├── legacy_test_real_09.json │ │ ├── legacy_test_real_10.expected │ │ ├── legacy_test_real_10.json │ │ ├── legacy_test_real_11.expected │ │ ├── legacy_test_real_11.json │ │ ├── legacy_test_real_12.expected │ │ ├── legacy_test_real_12.json │ │ ├── legacy_test_string_01.expected │ │ ├── legacy_test_string_01.json │ │ ├── legacy_test_string_02.expected │ │ ├── legacy_test_string_02.json │ │ ├── legacy_test_string_03.expected │ │ ├── legacy_test_string_03.json │ │ ├── legacy_test_string_04.expected │ │ ├── legacy_test_string_04.json │ │ ├── legacy_test_string_05.expected │ │ ├── legacy_test_string_05.json │ │ ├── legacy_test_string_unicode_01.expected │ │ ├── legacy_test_string_unicode_01.json │ │ ├── legacy_test_string_unicode_02.expected │ │ ├── legacy_test_string_unicode_02.json │ │ ├── legacy_test_string_unicode_03.expected │ │ ├── legacy_test_string_unicode_03.json │ │ ├── legacy_test_string_unicode_04.expected │ │ ├── legacy_test_string_unicode_04.json │ │ ├── legacy_test_string_unicode_05.expected │ │ ├── legacy_test_string_unicode_05.json │ │ ├── test_array_08.expected │ │ ├── test_array_08.json │ │ ├── test_object_05.expected │ │ └── test_object_05.json │ ├── generate_expected.py │ ├── jsonchecker │ │ ├── fail1.json │ │ ├── fail10.json │ │ ├── fail11.json │ │ ├── fail12.json │ │ ├── fail13.json │ │ ├── fail14.json │ │ ├── fail15.json │ │ ├── fail16.json │ │ ├── fail17.json │ │ ├── fail18.json │ │ ├── fail19.json │ │ ├── fail2.json │ │ ├── fail20.json │ │ ├── fail21.json │ │ ├── fail22.json │ │ ├── fail23.json │ │ ├── fail24.json │ │ ├── fail25.json │ │ ├── fail26.json │ │ ├── fail27.json │ │ ├── fail28.json │ │ ├── fail29.json │ │ ├── fail3.json │ │ ├── fail30.json │ │ ├── fail31.json │ │ ├── fail32.json │ │ ├── fail33.json │ │ ├── fail4.json │ │ ├── fail5.json │ │ ├── fail6.json │ │ ├── fail7.json │ │ ├── fail8.json │ │ ├── fail9.json │ │ ├── pass1.json │ │ ├── pass2.json │ │ ├── pass3.json │ │ └── readme.txt │ ├── pyjsontestrunner.py │ ├── runjsontests.py │ └── rununittests.py └── version.in ├── node_create ├── common.cpp ├── common.hpp ├── create_activation_node.cpp ├── create_activation_node.hpp ├── create_batchnormalization_node.cpp ├── create_batchnormalization_node.hpp ├── create_concatenation_node.cpp ├── create_concatenation_node.hpp ├── create_conv2d_node.cpp ├── create_conv2d_node.hpp ├── create_elementwise_node.cpp ├── create_elementwise_node.hpp ├── create_gather_node.cpp ├── create_gather_node.hpp ├── create_gemm_node.cpp ├── create_gemm_node.hpp ├── create_identity_node.cpp ├── create_identity_node.hpp ├── create_node.cpp ├── create_node.hpp ├── create_nonzero_node.cpp ├── create_nonzero_node.hpp ├── create_padding_node.cpp ├── create_padding_node.hpp ├── create_pooling_node.cpp ├── create_pooling_node.hpp ├── create_reduce_node.cpp ├── create_reduce_node.hpp ├── create_resize_node.cpp ├── create_resize_node.hpp ├── create_shape_node.cpp ├── create_shape_node.hpp ├── create_shuffle_node.cpp ├── create_shuffle_node.hpp ├── create_slice_node.cpp ├── create_slice_node.hpp ├── create_softmax_node.cpp ├── create_softmax_node.hpp ├── create_unary_node.cpp ├── create_unary_node.hpp ├── create_unsqueeze_node.cpp ├── create_unsqueeze_node.hpp └── plugin │ ├── nonzero_plugin.cu │ └── nonzero_plugin.hpp ├── node_info ├── activation_node_info.cpp ├── activation_node_info.hpp ├── batchnormalization_node_info.cpp ├── batchnormalization_node_info.hpp ├── concatenation_node_info.cpp ├── concatenation_node_info.hpp ├── conv2d_node_info.cpp ├── conv2d_node_info.hpp ├── elementwise_node_info.cpp ├── elementwise_node_info.hpp ├── gather_node_info.cpp ├── gather_node_info.hpp ├── gemm_node_info.cpp ├── gemm_node_info.hpp ├── identity_node_info.cpp ├── identity_node_info.hpp ├── node_info.cpp ├── node_info.hpp ├── nonzero_node_info.cpp ├── nonzero_node_info.hpp ├── padding_node_info.cpp ├── padding_node_info.hpp ├── pooling_node_info.cpp ├── pooling_node_info.hpp ├── reduce_node_info.cpp ├── reduce_node_info.hpp ├── resize_node_info.cpp ├── resize_node_info.hpp ├── shape_node_info.cpp ├── shape_node_info.hpp ├── shuffle_node_info.cpp ├── shuffle_node_info.hpp ├── slice_node_info.cpp ├── slice_node_info.hpp ├── softmax_node_info.cpp ├── softmax_node_info.hpp ├── unary_node_info.cpp ├── unary_node_info.hpp ├── unsqueeze_node_info.cpp └── unsqueeze_node_info.hpp ├── onnx_tensorrt_wrapper.cpp ├── onnx_tensorrt_wrapper.hpp ├── python_scripts ├── .vscode │ ├── launch.json │ └── settings.json ├── Readme ├── convert_fp32_to_fp16.py ├── execution.py ├── onnx_edit.py ├── parse_onnx_model.py ├── parse_onnx_model_new.py ├── shufflenetv2_simplify.py ├── simplfy_and_infer_shape.py ├── test_hfnet_onnx_model_with_execution.py ├── test_lenet_onnx_model_with_execution.py ├── test_onnx_model.py └── test_yolov3_onnx_model_with_execution.py ├── tensorrt_engine.cpp ├── tensorrt_engine.hpp ├── utils.cpp ├── utils.hpp ├── weights_graph_parse.cpp └── weights_graph_parse.hpp /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "g++ - 生成和调试活动文件", 9 | "type": "cppdbg", 10 | "request": "launch", 11 | "program": "/home/xj-zjd/tensorrt_wrapper_github/tensorrt_wrapper_for_onnx/build/lenet_example", 12 | "args": [], 13 | "stopAtEntry": false, 14 | "cwd": "${workspaceFolder}", 15 | "environment": [], 16 | "externalConsole": false, 17 | "MIMode": "gdb", 18 | "setupCommands": [ 19 | { 20 | "description": "为 gdb 启用整齐打印", 21 | "text": "-enable-pretty-printing", 22 | "ignoreFailures": true 23 | } 24 | ], 25 | // "preLaunchTask": "C/C++: g++ build active file", 26 | "miDebuggerPath": "/usr/bin/gdb" 27 | } 28 | ] 29 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "*.py": "python", 4 | "iostream": "cpp", 5 | "functional": "cpp", 6 | "istream": "cpp", 7 | "tuple": "cpp", 8 | "cctype": "cpp", 9 | "clocale": "cpp", 10 | "cmath": "cpp", 11 | "cstdarg": "cpp", 12 | "cstddef": "cpp", 13 | "cstdio": "cpp", 14 | "cstdlib": "cpp", 15 | "cstring": "cpp", 16 | "ctime": "cpp", 17 | "cwchar": "cpp", 18 | "cwctype": "cpp", 19 | "array": "cpp", 20 | "atomic": "cpp", 21 | "strstream": "cpp", 22 | "*.tcc": "cpp", 23 | "bitset": "cpp", 24 | "chrono": "cpp", 25 | "complex": "cpp", 26 | "condition_variable": "cpp", 27 | "cstdint": "cpp", 28 | "deque": "cpp", 29 | "list": "cpp", 30 | "unordered_map": "cpp", 31 | "unordered_set": "cpp", 32 | "vector": "cpp", 33 | "exception": "cpp", 34 | "algorithm": "cpp", 35 | "ratio": "cpp", 36 | "system_error": "cpp", 37 | "type_traits": "cpp", 38 | "fstream": "cpp", 39 | "initializer_list": "cpp", 40 | "iomanip": "cpp", 41 | "iosfwd": "cpp", 42 | "limits": "cpp", 43 | "memory": "cpp", 44 | "mutex": "cpp", 45 | "new": "cpp", 46 | "ostream": "cpp", 47 | "numeric": "cpp", 48 | "sstream": "cpp", 49 | "stdexcept": "cpp", 50 | "streambuf": "cpp", 51 | "thread": "cpp", 52 | "cfenv": "cpp", 53 | "cinttypes": "cpp", 54 | "utility": "cpp", 55 | "typeindex": "cpp", 56 | "typeinfo": "cpp", 57 | "string": "cpp", 58 | "*.cuh": "cpp", 59 | "*.ipp": "cpp", 60 | "random": "cpp", 61 | "csignal": "cpp" 62 | }, 63 | "python.pythonPath": "/home/xj-zjd/anaconda3/envs/tensorrt_wrapper/bin/python" 64 | } -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "tasks": [ 3 | { 4 | "type": "shell", 5 | "label": "C/C++: g++ build active file", 6 | "command": "/usr/bin/g++", 7 | "args": [ 8 | "-g", 9 | "${file}", 10 | "-o", 11 | "${fileDirname}/${fileBasenameNoExtension}" 12 | ], 13 | "options": { 14 | "cwd": "${workspaceFolder}" 15 | }, 16 | "problemMatcher": [ 17 | "$gcc" 18 | ], 19 | "group": { 20 | "kind": "build", 21 | "isDefault": true 22 | } 23 | } 24 | ], 25 | "version": "2.0.0" 26 | } -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(test_json) 2 | cmake_minimum_required(VERSION 3.0) 3 | find_package(OpenCV REQUIRED) 4 | find_package(CUDA) 5 | set(TENSORRT_DIR /home/xj-zjd/TensorRT-7.0.0.11) 6 | include_directories(/usr/local/cuda-10.0/include) 7 | include_directories(${OpenCV_INCLUDE_DIRS}) 8 | include_directories(${TENSORRT_DIR}/include) 9 | include_directories(${PROJECT_BINARY_DIR}/../ ${PROJECT_BINARY_DIR}/../jsoncpp-00.11.0/include) 10 | include_directories(${PROJECT_BINARY_DIR}/../node_info) 11 | include_directories(${PROJECT_BINARY_DIR}/../node_create) 12 | # include_directories(${PROJECT_BINARY_DIR}/../cuda_impl) 13 | include_directories(${PROJECT_BINARY_DIR}/../node_info/plugin) 14 | # include_directories(${PROJECT_BINARY_DIR}/../execution) 15 | include_directories(${PROJECT_BINARY_DIR}/../execution_info) 16 | include_directories(${PROJECT_BINARY_DIR}/../cub-1.8.0) 17 | 18 | link_directories(${TENSORRT_DIR}/lib) 19 | link_directories(/usr/local/cuda-10.0/lib64) 20 | link_directories(${PROJECT_BINARY_DIR}/../jsoncpp-00.11.0/build-shared/) 21 | add_compile_options(-std=c++11 -w) 22 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11 -w" ) 23 | # file(GLOB CUDA_SRC_FILES ${PROJECT_BINARY_DIR}/../cuda_impl/*.cu) 24 | file(GLOB EXAMPLE_FILES ${PROJECT_BINARY_DIR}/../example/*.cpp) 25 | file(GLOB COMMON_FILES ${PROJECT_BINARY_DIR}/../*.cpp) 26 | file(GLOB NODE_CREATE_FILES ${PROJECT_BINARY_DIR}/../node_create/*.cpp) 27 | file(GLOB NODE_INFO_FILES ${PROJECT_BINARY_DIR}/../node_info/*.cpp) 28 | # file(GLOB EXECUTION_FILES ${PROJECT_BINARY_DIR}/../execution/*.c*) 29 | file(GLOB EXECUTION_FILES ${PROJECT_BINARY_DIR}/../execution_info/*.c*) 30 | 31 | cuda_add_library(tensorrtWrapper SHARED ${COMMON_FILES} ${NODE_CREATE_FILES} ${NODE_INFO_FILES} ${CUDA_SRC_FILES} ${EXECUTION_FILES}) 32 | 33 | foreach(example_file IN LISTS EXAMPLE_FILES) 34 | get_filename_component(target_file ${example_file} NAME_WE) 35 | cuda_add_executable(${target_file} ${example_file}) 36 | target_link_libraries(${target_file} tensorrtWrapper) 37 | target_link_libraries(${target_file} jsoncpp) 38 | target_link_libraries(${target_file} nvinfer) 39 | target_link_libraries(${target_file} cudart) 40 | target_link_libraries(${target_file} cusolver) 41 | target_link_libraries(${target_file} ${OpenCV_LIBS}) 42 | endforeach(example_file) 43 | 44 | -------------------------------------------------------------------------------- /buffer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __BUFFER_HPP__ 2 | #define __BUFFER_HPP__ 3 | 4 | #include 5 | #include 6 | #include "utils.hpp" 7 | using namespace std; 8 | 9 | namespace tensorrtInference { 10 | 11 | /** backend buffer storage type */ 12 | enum StorageType { 13 | /** 14 | use NOT reusable memory. 15 | - allocates memory when `onAcquireBuffer` is called. 16 | - releases memory when `onReleaseBuffer` is called or when the backend is deleted. 17 | - do NOTHING when `onClearBuffer` is called. 18 | */ 19 | STATIC, 20 | /** 21 | use reusable memory. 22 | - allocates or reuses memory when `onAcquireBuffer` is called. prefers reusing. 23 | - collects memory for reuse when `onReleaseBuffer` is called. 24 | - releases memory when `onClearBuffer` is called or when the backend is deleted. 25 | */ 26 | DYNAMIC, 27 | /** 28 | * */ 29 | UNDEFINED_STORAGE_TYPE, 30 | }; 31 | class Buffer 32 | { 33 | public: 34 | Buffer(std::vector shape, OnnxDataType dataType, bool mallocFlag = false); 35 | Buffer(int size, OnnxDataType dataType, bool mallocFlag = false); 36 | ~Buffer(); 37 | Buffer(const Buffer&) = delete; 38 | Buffer(const Buffer&&) = delete; 39 | Buffer& operator=(const Buffer&) = delete; 40 | Buffer& operator=(const Buffer&&) = delete; 41 | static Buffer* create(std::vector shape, OnnxDataType dataType, void* userData); 42 | std::vector getShape(); 43 | OnnxDataType getDataType(); 44 | int getSize(); 45 | int getElementCount(); 46 | void setDevice(void* ptr); 47 | void setHost(void* ptr); 48 | template 49 | T* host() { return (T*)hostPtr;} 50 | template 51 | T* device() { return (T*)devicePtr;} 52 | void setStorageType(StorageType type) {storageType = type;} 53 | StorageType getStorageType() { return storageType; } 54 | 55 | private: 56 | OnnxDataType dataType; 57 | void* hostPtr; 58 | void* devicePtr; 59 | std::vector bufferShape; 60 | bool ownHost = false; 61 | StorageType storageType; 62 | }; 63 | } // namespace tensorrtInference 64 | #endif -------------------------------------------------------------------------------- /buffer_pool.cpp: -------------------------------------------------------------------------------- 1 | #include "buffer_pool.hpp" 2 | 3 | namespace tensorrtInference { 4 | 5 | void* BufferPool::alloc(int size, bool seperate) { 6 | if (!seperate) { 7 | auto iter = mFreeList.lower_bound(size); 8 | if (iter != mFreeList.end()) { 9 | auto buffer = iter->second->buffer; 10 | mFreeList.erase(iter); 11 | return buffer; 12 | } 13 | } 14 | std::shared_ptr node(new BufferNode(size)); 15 | mAllBuffer.insert(std::make_pair(node->buffer, node)); 16 | return node->buffer; 17 | } 18 | 19 | void BufferPool::recycle(void* buffer, bool release) { 20 | auto iter = mAllBuffer.find(buffer); 21 | if (iter == mAllBuffer.end()) { 22 | CHECK_ASSERT(false, "Error for recycle buffer\n"); 23 | return; 24 | } 25 | if (release) { 26 | mAllBuffer.erase(iter); 27 | return; 28 | } 29 | mFreeList.insert(std::make_pair(iter->second->size, iter->second)); 30 | } 31 | 32 | void BufferPool::clear() { 33 | mFreeList.clear(); 34 | mAllBuffer.clear(); 35 | } 36 | 37 | } // namespace tensorrtInference 38 | -------------------------------------------------------------------------------- /buffer_pool.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __BUFFER_POOL_HPP__ 2 | #define __BUFFER_POOL_HPP__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include "utils.hpp" 8 | 9 | namespace tensorrtInference { 10 | 11 | class BufferPool { 12 | public: 13 | BufferPool() { 14 | } 15 | ~BufferPool() { clear(); } 16 | void* alloc(int size, bool seperate = false); 17 | void recycle(void* buffer, bool release = false); 18 | void clear(); 19 | 20 | class BufferNode { 21 | public: 22 | int size; 23 | void* buffer; 24 | BufferNode(int size) { 25 | buffer = nullptr; 26 | this->size = 0; 27 | buffer = bufferMalloc(size); 28 | this->size = size; 29 | } 30 | ~BufferNode() { 31 | if(buffer != nullptr) { 32 | bufferFree(buffer); 33 | this->size = size; 34 | } 35 | } 36 | void *bufferMalloc(size_t size_in_bytes) 37 | { 38 | void *ptr; 39 | CUDA_CHECK(cudaMalloc(&ptr, size_in_bytes)); 40 | return ptr; 41 | } 42 | 43 | void bufferFree(void *ptr) 44 | { 45 | CUDA_CHECK(cudaFree(ptr)); 46 | } 47 | }; 48 | private: 49 | std::map> mAllBuffer; 50 | std::multimap> mFreeList; 51 | }; 52 | 53 | } // namespace tensorrtInference 54 | 55 | #endif //__BUFFER_POOL_HPP__ 56 | -------------------------------------------------------------------------------- /cub-1.8.0/.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | GIT_CUB 4 | 5 | 6 | 7 | 8 | 9 | org.eclipse.cdt.managedbuilder.core.genmakebuilder 10 | clean,full,incremental, 11 | 12 | 13 | 14 | 15 | org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder 16 | full,incremental, 17 | 18 | 19 | 20 | 21 | 22 | org.eclipse.cdt.core.cnature 23 | org.eclipse.cdt.managedbuilder.core.managedBuildNature 24 | org.eclipse.cdt.managedbuilder.core.ScannerConfigNature 25 | org.eclipse.cdt.core.ccnature 26 | 27 | 28 | -------------------------------------------------------------------------------- /cub-1.8.0/.settings/.gitignore: -------------------------------------------------------------------------------- 1 | /language.settings.xml 2 | -------------------------------------------------------------------------------- /cub-1.8.0/.settings/org.eclipse.cdt.ui.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | formatter_profile=_B40C 3 | formatter_settings_version=1 4 | -------------------------------------------------------------------------------- /cub-1.8.0/.settings/org.eclipse.core.runtime.prefs: -------------------------------------------------------------------------------- 1 | content-types/enabled=true 2 | content-types/org.eclipse.cdt.core.cxxHeader/file-extensions=cuh 3 | content-types/org.eclipse.cdt.core.cxxSource/file-extensions=cu 4 | eclipse.preferences.version=1 5 | -------------------------------------------------------------------------------- /cub-1.8.0/LICENSE.TXT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010-2011, Duane Merrill. All rights reserved. 2 | Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | * Redistributions of source code must retain the above copyright 7 | notice, this list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright 9 | notice, this list of conditions and the following disclaimer in the 10 | documentation and/or other materials provided with the distribution. 11 | * Neither the name of the NVIDIA CORPORATION nor the 12 | names of its contributors may be used to endorse or promote products 13 | derived from this software without specific prior written permission. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY 19 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /cub-1.8.0/cub/util_namespace.cuh: -------------------------------------------------------------------------------- 1 | /****************************************************************************** 2 | * Copyright (c) 2011, Duane Merrill. All rights reserved. 3 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are met: 7 | * * Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * * Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * * Neither the name of the NVIDIA CORPORATION nor the 13 | * names of its contributors may be used to endorse or promote products 14 | * derived from this software without specific prior written permission. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY 20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | * 27 | ******************************************************************************/ 28 | 29 | /** 30 | * \file 31 | * Place-holder for prefixing the cub namespace 32 | */ 33 | 34 | #pragma once 35 | 36 | // For example: 37 | //#define CUB_NS_PREFIX namespace thrust{ namespace detail { 38 | //#define CUB_NS_POSTFIX } } 39 | 40 | #ifndef CUB_NS_PREFIX 41 | #define CUB_NS_PREFIX 42 | #endif 43 | 44 | #ifndef CUB_NS_POSTFIX 45 | #define CUB_NS_POSTFIX 46 | #endif 47 | -------------------------------------------------------------------------------- /cub-1.8.0/examples/block/.gitignore: -------------------------------------------------------------------------------- 1 | /bin 2 | /Debug 3 | /Release 4 | /cuda55.sdf 5 | /cuda55.suo 6 | /cuda60.sdf 7 | /cuda60.suo 8 | -------------------------------------------------------------------------------- /cub-1.8.0/examples/block/reduce_by_key.cu: -------------------------------------------------------------------------------- 1 | 2 | 3 | #include 4 | 5 | 6 | template < 7 | int BLOCK_THREADS, ///< Number of CTA threads 8 | typename KeyT, ///< Key type 9 | typename ValueT> ///< Value type 10 | __global__ void Kernel() 11 | { 12 | // Tuple type for scanning (pairs accumulated segment-value with segment-index) 13 | typedef cub::KeyValuePair OffsetValuePairT; 14 | 15 | // Reduce-value-by-segment scan operator 16 | typedef cub::ReduceBySegmentOp ReduceBySegmentOpT; 17 | 18 | // Parameterized BlockDiscontinuity type for setting head flags 19 | typedef cub::BlockDiscontinuity< 20 | KeyT, 21 | BLOCK_THREADS> 22 | BlockDiscontinuityKeysT; 23 | 24 | // Parameterized BlockScan type 25 | typedef cub::BlockScan< 26 | OffsetValuePairT, 27 | BLOCK_THREADS, 28 | cub::BLOCK_SCAN_WARP_SCANS> 29 | BlockScanT; 30 | 31 | // Shared memory 32 | __shared__ union TempStorage 33 | { 34 | typename BlockScanT::TempStorage scan; // Scan storage 35 | typename BlockDiscontinuityKeysT::TempStorage discontinuity; // Discontinuity storage 36 | } temp_storage; 37 | 38 | 39 | // Read data (each thread gets 3 items each, every 9 items is a segment) 40 | KeyT my_keys[3] = {threadIdx.x / 3, threadIdx.x / 3, threadIdx.x / 3}; 41 | ValueT my_values[3] = {1, 1, 1}; 42 | 43 | // Set head segment head flags 44 | int my_flags[3]; 45 | BlockDiscontinuityKeysT(temp_storage.discontinuity).FlagHeads( 46 | my_flags, 47 | my_keys, 48 | cub::Inequality()); 49 | 50 | __syncthreads(); 51 | 52 | 53 | 54 | 55 | 56 | 57 | } 58 | -------------------------------------------------------------------------------- /cub-1.8.0/examples/device/.gitignore: -------------------------------------------------------------------------------- 1 | /bin 2 | /Debug 3 | /ipch 4 | /Release 5 | /cuda55.sdf 6 | /cuda55.suo 7 | /cuda60.sdf 8 | /cuda60.suo 9 | -------------------------------------------------------------------------------- /cub-1.8.0/experimental/.gitignore: -------------------------------------------------------------------------------- 1 | /bin 2 | -------------------------------------------------------------------------------- /cub-1.8.0/experimental/spmv_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for i in 1 2 4 8 16 32 64 128 256 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608 16777216 4 | do 5 | echo `date`, `$1 --dense=$i $2 $3 $4 $5 $6 $7` 6 | done 7 | 8 | echo 9 | echo 10 | 11 | for i in `ls /home/dumerrill/graphs/spmv/*.mtx` 12 | do 13 | if [[ ( "`head -n 50 $i | grep complex`" = "" ) && ( "`head -n 50 $i | grep array`" = "" ) ]] 14 | then 15 | echo `date`, `$1 --mtx=$i $2 $3 $4 $5 $6 $7 2>/dev/null` 16 | fi 17 | done 18 | 19 | echo 20 | echo 21 | 22 | for i in `ls /scratch/dumerrill/graphs/mtx/*.mtx` 23 | #for i in `ls /cygdrive/w/Dev/UFget/mtx/*.mtx` 24 | do 25 | if [[ ( "`head -n 50 $i | grep complex`" = "" ) && ( "`head -n 50 $i | grep array`" = "" ) ]] 26 | then 27 | echo `date`, `$1 --mtx=$i $2 $3 $4 $5 $6 $7 2>/dev/null` 28 | fi 29 | done 30 | 31 | -------------------------------------------------------------------------------- /cub-1.8.0/test/.gitignore: -------------------------------------------------------------------------------- 1 | /bin 2 | /link_main.obj 3 | /dummy/ 4 | -------------------------------------------------------------------------------- /cub-1.8.0/test/link_a.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void a() 4 | { 5 | printf("a() called\n"); 6 | 7 | cub::DoubleBuffer d_keys; 8 | cub::DoubleBuffer d_values; 9 | size_t temp_storage_bytes = 0; 10 | cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values, 1024); 11 | } 12 | -------------------------------------------------------------------------------- /cub-1.8.0/test/link_b.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void b() 4 | { 5 | printf("b() called\n"); 6 | 7 | cub::DoubleBuffer d_keys; 8 | cub::DoubleBuffer d_values; 9 | size_t temp_storage_bytes = 0; 10 | cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values, 1024); 11 | } 12 | -------------------------------------------------------------------------------- /cub-1.8.0/test/link_main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | extern void a(); 4 | extern void b(); 5 | 6 | int main() 7 | { 8 | printf("hello world\n"); 9 | return 0; 10 | } 11 | -------------------------------------------------------------------------------- /cub-1.8.0/tune/.gitignore: -------------------------------------------------------------------------------- 1 | /bin 2 | -------------------------------------------------------------------------------- /cuda_impl/convert_cuda_impl.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CONVERT_CUDA_IMPL_HPP__ 2 | #define __CONVERT_CUDA_IMPL_HPP__ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace CudaImpl 9 | { 10 | extern void ConvertFp16ToFp32CudaImpl(const void* input, const int inputSize, void* output, cudaStream_t stream); 11 | extern void ConvertFp32ToFp16CudaImpl(const void* input, const int inputSize, void* output, cudaStream_t stream); 12 | } 13 | 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /cuda_impl/nonzero_cuda_impl.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __NONZERO_CUDA_IMPL_HPP__ 2 | #define __NONZERO_CUDA_IMPL_HPP__ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace CudaImpl 9 | { 10 | extern void NoneZeroCudaImpl(const unsigned char* inputs, const int inputSize, int* output, cudaStream_t stream); 11 | } 12 | 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /example/alexnet/generate_alexnet_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | 8 | def main(): 9 | net = torchvision.models.alexnet(pretrained=True) 10 | net = net.eval() 11 | tmp = torch.ones(1, 3, 224, 224) 12 | execute_path = os.path.dirname(os.path.realpath(__file__)) 13 | onnx_file = os.path.join(execute_path, "alexnet.onnx") 14 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 15 | out = net(tmp) 16 | print('alexnet out shape:', out.shape) 17 | print('alexnet out:', out) 18 | 19 | model = onnx.load(onnx_file) 20 | model_simp, check = simplify(model) 21 | onnx_simplify_file = os.path.join(execute_path, "alexnet_simplify.onnx") 22 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 23 | 24 | if __name__ == '__main__': 25 | main() 26 | 27 | -------------------------------------------------------------------------------- /example/alexnet/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model_new.py ./alexnet_simplify.onnx -------------------------------------------------------------------------------- /example/hfnet_example.cpp: -------------------------------------------------------------------------------- 1 | #include "json/json.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "tensorrt_engine.hpp" 8 | #include 9 | 10 | using namespace std; 11 | using namespace tensorrtInference; 12 | 13 | #define NET_NAME "./example/hfnet/" 14 | #define GRAPH_JSON_FILE(net) net "net_graph.json" 15 | #define GRAPH_WEIGHTS_FILE(net) net "net_weights.bin" 16 | #define GRAPH_ENGINE_FILE(net) net "net.engine" 17 | #define INFERENCE_JSON_FILE(net) net "/net_inference.json" 18 | 19 | #define SAVE_ENGINE 0 20 | #define FP16_FLAG false 21 | 22 | #define BACTCH_SIZE 1 23 | #define CHANNEL_SIZE 3 24 | #define HEIGHT_SIZE 720 25 | #define WIDTH_SIZE 1280 26 | 27 | int main() 28 | { 29 | std::string jsonFileName = GRAPH_JSON_FILE(NET_NAME); 30 | std::string weightsFileName = GRAPH_WEIGHTS_FILE(NET_NAME); 31 | std::string engineFileName = GRAPH_ENGINE_FILE(NET_NAME); 32 | std::string inferenceFileName = INFERENCE_JSON_FILE(NET_NAME); 33 | #if SAVE_ENGINE 34 | // save engine file 35 | tensorrtEngine engine(jsonFileName, weightsFileName, FP16_FLAG); 36 | engine.saveEnginePlanFile(engineFileName); 37 | #else 38 | //engine inference 39 | std::string jpgFile = "./example/hfnet/gray_test.bmp"; 40 | cv::Mat colorMat = cv::imread(jpgFile.c_str()); 41 | cv::Mat grayMat; 42 | cv::Mat inputMat( 721, 1281, CV_8UC1, cv::Scalar(0)); 43 | cv::Rect rect(0, 0, 1280, 720); 44 | cv::cvtColor(colorMat, grayMat, cv::COLOR_BGR2GRAY); 45 | grayMat.copyTo(inputMat(rect)); 46 | 47 | tensorrtEngine engine(inferenceFileName); 48 | std::map inputs; 49 | inputs["gray_image"] = (void*)inputMat.data; 50 | engine.prepareData(inputs); 51 | 52 | engine.doInference(true); 53 | // for (int i = 0; i < 10; i++) { 54 | // auto start = std::chrono::system_clock::now(); 55 | // engine.doInference(true); 56 | // auto end = std::chrono::system_clock::now(); 57 | // std::cout << std::chrono::duration_cast(end - start).count() << "ms" << std::endl; 58 | // } 59 | auto result = engine.getInferenceResult(); 60 | #endif 61 | } -------------------------------------------------------------------------------- /example/lenet/__pycache__/lenet5.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/lenet/__pycache__/lenet5.cpython-37.pyc -------------------------------------------------------------------------------- /example/lenet/generate_lenet5_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from lenet5 import Lenet5 4 | import os 5 | import onnx 6 | import onnxmltools 7 | from onnxsim import simplify 8 | 9 | def main(): 10 | torch.manual_seed(1234) 11 | net = Lenet5() 12 | net.eval() 13 | tmp = torch.ones(1, 1, 32, 32) 14 | execute_path = os.path.dirname(os.path.realpath(__file__)) 15 | onnx_file = os.path.join(execute_path, "lenet.onnx") 16 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 17 | out = net(tmp) 18 | print('lenet out shape:', out.shape) 19 | print('lenet out:', out) 20 | 21 | model = onnx.load(onnx_file) 22 | model_simp, check = simplify(model) 23 | onnx_simplify_file = os.path.join(execute_path, "lenet_simplify.onnx") 24 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 25 | 26 | if __name__ == '__main__': 27 | main() 28 | 29 | -------------------------------------------------------------------------------- /example/lenet/lenet.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/lenet/lenet.onnx -------------------------------------------------------------------------------- /example/lenet/lenet5.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | class Lenet5(nn.Module): 6 | """ 7 | for cifar10 dataset. 8 | """ 9 | def __init__(self): 10 | super(Lenet5, self).__init__() 11 | 12 | self.conv1 = nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0) 13 | self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) 14 | self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0) 15 | self.fc1 = nn.Linear(16*5*5, 120) 16 | self.fc2 = nn.Linear(120, 84) 17 | self.fc3 = nn.Linear(84, 10) 18 | 19 | def forward(self, x): 20 | print('input: ', x.shape) 21 | x = F.relu(self.conv1(x)) 22 | print('conv1',x.shape) 23 | x = self.pool1(x) 24 | print('pool1: ', x.shape) 25 | x = F.relu(self.conv2(x)) 26 | print('conv2',x.shape) 27 | x = self.pool1(x) 28 | print('pool2',x.shape) 29 | x = x.view(x.size(0), -1) 30 | print('view: ', x.shape) 31 | x = F.relu(self.fc1(x)) 32 | print('fc1: ', x.shape) 33 | x = F.relu(self.fc2(x)) 34 | x = F.softmax(self.fc3(x), dim=1) 35 | return x 36 | -------------------------------------------------------------------------------- /example/lenet/lenet_simplify.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/lenet/lenet_simplify.onnx -------------------------------------------------------------------------------- /example/lenet/net.engine: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/lenet/net.engine -------------------------------------------------------------------------------- /example/lenet/net_inference.json: -------------------------------------------------------------------------------- 1 | {"execution_info": {"OnnxModel_0": {"type": "OnnxModel", "attr": {"onnx_file": "./example/lenet/net.engine"}, "inputs": ["input"], "outputs": ["output"], "tensor_info": {"input": {"shape": [1, 1, 32, 32], "data_type": 1, "malloc_host": false, "malloc_type": "STATIC", "memcpy_dir": "host_to_device"}, "output": {"shape": [1, 10], "data_type": 1, "malloc_host": true, "malloc_type": "STATIC", "memcpy_dir": "device_to_host"}}}}, "topo_order": ["OnnxModel_0"], "input_tensor_names": ["input"], "output_tensor_names": ["output"]} -------------------------------------------------------------------------------- /example/lenet/net_weights.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/lenet/net_weights.bin -------------------------------------------------------------------------------- /example/lenet/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model_new.py ./lenet_simplify.onnx -------------------------------------------------------------------------------- /example/mobilenet_v2/generate_mobilenet_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | 8 | def main(): 9 | torch.manual_seed(1234) 10 | net = torchvision.models.mobilenet_v2(pretrained=True) 11 | net = net.eval() 12 | tmp = torch.ones(1, 3, 224, 224) 13 | execute_path = os.path.dirname(os.path.realpath(__file__)) 14 | onnx_file = os.path.join(execute_path, "mobilenet_v2.onnx") 15 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 16 | out = net(tmp) 17 | print('mobilenet_v2 out shape:', out.shape) 18 | print('mobilenet_v2 out:', out) 19 | 20 | model = onnx.load(onnx_file) 21 | model_simp, check = simplify(model) 22 | onnx_simplify_file = os.path.join(execute_path, "mobilenet_v2_simplify.onnx") 23 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 24 | 25 | if __name__ == '__main__': 26 | main() 27 | 28 | -------------------------------------------------------------------------------- /example/mobilenet_v2/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model.py ./mobilenet_v2_simplify.onnx -------------------------------------------------------------------------------- /example/resnet/generate_resnet18_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | 8 | def main(): 9 | net = torchvision.models.resnet18(pretrained=True) 10 | net = net.eval() 11 | print(net) 12 | tmp = torch.ones(1, 3, 224, 224) 13 | execute_path = os.path.dirname(os.path.realpath(__file__)) 14 | onnx_file = os.path.join(execute_path, "resnet18.onnx") 15 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 16 | out = net(tmp) 17 | print('resnet18 out shape:', out.shape) 18 | print('resnet18 out:', out) 19 | 20 | model = onnx.load(onnx_file) 21 | model_simp, check = simplify(model, skip_fuse_bn = True) 22 | # model_simp, check = simplify(model) # get errors https://github.com/daquexian/onnx-simplifier/issues/53 23 | onnx_simplify_file = os.path.join(execute_path, "resnet18_simplify.onnx") 24 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 25 | 26 | 27 | if __name__ == '__main__': 28 | main() 29 | 30 | -------------------------------------------------------------------------------- /example/resnet/generate_resnet50_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | 8 | def main(): 9 | net = torchvision.models.resnet50(pretrained=True) 10 | net = net.eval() 11 | print(net) 12 | tmp = torch.ones(1, 3, 224, 224) 13 | execute_path = os.path.dirname(os.path.realpath(__file__)) 14 | onnx_file = os.path.join(execute_path, "resnet50.onnx") 15 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 16 | out = net(tmp) 17 | print('resnet50 out shape:', out.shape) 18 | print('resnet50 out:', out) 19 | 20 | model = onnx.load(onnx_file) 21 | model_simp, check = simplify(model, skip_fuse_bn = True) 22 | # model_simp, check = simplify(model) # get errors https://github.com/daquexian/onnx-simplifier/issues/53 23 | onnx_simplify_file = os.path.join(execute_path, "resnet50_simplify.onnx") 24 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 25 | 26 | 27 | if __name__ == '__main__': 28 | main() 29 | 30 | -------------------------------------------------------------------------------- /example/resnet/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model_new.py ./resnet18_simplify.onnx 2 | # python ../../python_scripts/parse_onnx_model_new.py ./resnet50_simplify.onnx -------------------------------------------------------------------------------- /example/squeezenet/generate_squeezenet_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | from torchsummary import summary 8 | 9 | def main(): 10 | torch.manual_seed(1234) 11 | net = torchvision.models.squeezenet1_1(pretrained=True) 12 | net = net.eval() 13 | print(net) 14 | tmp = torch.ones(1, 3, 227, 227) 15 | execute_path = os.path.dirname(os.path.realpath(__file__)) 16 | onnx_file = os.path.join(execute_path, "squeezenet.onnx") 17 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 18 | out = net(tmp) 19 | summary(net, (3, 227, 227)) 20 | print('squeezenet out shape:', out.shape) 21 | print('squeezenet out:', out) 22 | 23 | 24 | model = onnx.load(onnx_file) 25 | model_simp, check = simplify(model) 26 | onnx_simplify_file = os.path.join(execute_path, "squeezenet_simplify.onnx") 27 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 28 | 29 | if __name__ == '__main__': 30 | main() 31 | 32 | -------------------------------------------------------------------------------- /example/squeezenet/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model_new.py ./squeezenet_simplify.onnx 2 | # python ../../python_scripts/parse_onnx_model.py ./squeezenet_edit.onnx -------------------------------------------------------------------------------- /example/vgg/generate_vgg_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | 8 | def main(): 9 | torch.manual_seed(1234) 10 | net = torchvision.models.vgg11(pretrained=True) 11 | net = net.eval() 12 | print(net) 13 | tmp = torch.ones(1, 3, 224, 224) 14 | execute_path = os.path.dirname(os.path.realpath(__file__)) 15 | onnx_file = os.path.join(execute_path, "vgg.onnx") 16 | torch.onnx.export(net, tmp, onnx_file, export_params=True, opset_version=11, input_names = ['input'], output_names = ['output']) 17 | out = net(tmp) 18 | print('vgg out shape:', out.shape) 19 | print('vgg out:', out) 20 | 21 | 22 | model = onnx.load(onnx_file) 23 | model_simp, check = simplify(model) 24 | onnx_simplify_file = os.path.join(execute_path, "vgg_simplify.onnx") 25 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 26 | 27 | if __name__ == '__main__': 28 | main() 29 | 30 | -------------------------------------------------------------------------------- /example/vgg/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model.py ./vgg_simplify.onnx -------------------------------------------------------------------------------- /example/yolov3/README.md: -------------------------------------------------------------------------------- 1 | # prepare 2 | * pytorch 1.4+ 3 | 4 | * git clone https://github.com/ultralytics/yolov3.git 5 | 6 | * download yolov3.pt/yolov3-tiny.pt/yolov3-spp.pt from https://drive.google.com/drive/folders/1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0 , and place to yolov3/weights dir 7 | 8 | # convert model to onnx 9 | 1. cd yolov3(https://github.com/ultralytics/yolov3.git) 10 | 11 | 2. modify ONNX_EXPORT=True int models.py 12 | 13 | 3. modify torch.onnx.export(xxxxx, opset_version=10, xxxxx) in detect.py 14 | 15 | 4. python3 detect.py --cfg cfg/yolov3.cfg --weights weights/yolov3.pt, you will get a yolov3.onnx (same as yolov3-tiny/yolov3-spp) 16 | 17 | 5. cd xxx/example/yolov3/ 18 | 19 | 6. python3 simplify_yolov3_onnx.py to get yolov3_simplify.onnx 20 | 21 | 7. and run sh onnx_to_tensorrt.sh, generate net_graph.json net_weights.bin for c++ code. 22 | 23 | -------------------------------------------------------------------------------- /example/yolov3/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/yolov3/bus.jpg -------------------------------------------------------------------------------- /example/yolov3/detect_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zjd1988/tensorrt_wrapper_for_onnx/7547ceaa8c187beaf79627d80dba0f3c8c67b8a7/example/yolov3/detect_result.jpg -------------------------------------------------------------------------------- /example/yolov3/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | # python ../../python_scripts/parse_onnx_model_new.py ./yolov3_simplify.onnx 2 | python ../../python_scripts/parse_onnx_model_new.py ./yolov3-tiny_simplify.onnx 3 | # python ../../python_scripts/parse_onnx_model_new.py ./yolov3-spp_simplify.onnx 4 | -------------------------------------------------------------------------------- /example/yolov3/simplify_yolov3_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | from torchsummary import summary 8 | 9 | def main(): 10 | torch.manual_seed(1234) 11 | tmp = torch.ones(1, 3, 512, 384) 12 | execute_path = os.path.dirname(os.path.realpath(__file__)) 13 | onnx_file = os.path.join(execute_path, "yolov3.onnx") 14 | 15 | model = onnx.load(onnx_file) 16 | model_simp, check = simplify(model) 17 | onnx_simplify_file = os.path.join(execute_path, "yolov3_simplify.onnx") 18 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 19 | 20 | if __name__ == '__main__': 21 | main() 22 | 23 | -------------------------------------------------------------------------------- /example/yolov3/simplify_yolov3_spp_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | from torchsummary import summary 8 | 9 | def main(): 10 | torch.manual_seed(1234) 11 | tmp = torch.ones(1, 3, 512, 384) 12 | execute_path = os.path.dirname(os.path.realpath(__file__)) 13 | onnx_file = os.path.join(execute_path, "yolov3-spp.onnx") 14 | 15 | model = onnx.load(onnx_file) 16 | model_simp, check = simplify(model) 17 | onnx_simplify_file = os.path.join(execute_path, "yolov3-spp_simplify.onnx") 18 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 19 | 20 | if __name__ == '__main__': 21 | main() 22 | 23 | -------------------------------------------------------------------------------- /example/yolov3/simplify_yolov3_tiny_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | from torchsummary import summary 8 | 9 | def main(): 10 | torch.manual_seed(1234) 11 | tmp = torch.ones(1, 3, 512, 384) 12 | execute_path = os.path.dirname(os.path.realpath(__file__)) 13 | onnx_file = os.path.join(execute_path, "yolov3-tiny.onnx") 14 | 15 | model = onnx.load(onnx_file) 16 | model_simp, check = simplify(model) 17 | onnx_simplify_file = os.path.join(execute_path, "yolov3-tiny_simplify.onnx") 18 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 19 | 20 | if __name__ == '__main__': 21 | main() 22 | 23 | -------------------------------------------------------------------------------- /example/yolov4/README.md: -------------------------------------------------------------------------------- 1 | # prepare 2 | * pytorch 1.4+ 3 | 4 | * git clone https://github.com/ultralytics/yolov3.git 5 | 6 | * download yolov4.pt from https://drive.google.com/drive/folders/1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0 , and place to yolov3/weights dir 7 | 8 | # convert model to onnx 9 | 1. replace xxx/example/yolov4/models.py in https://github.com/ultralytics/yolov3.git 10 | 11 | 2. python3 detect.py --cfg cfg/yolov4.cfg --weights weights/yolov4.pt, you will get a yolov4.onnx 12 | 13 | 3. python3 simplify_yolov4_onnx.py to get yolov4_simplify.onnx 14 | 15 | 4. cd xxx/example/yolov4/, and run sh onnx_to_tensorrt.sh, generate net_graph.json net_weights.bin for c++ code. 16 | 17 | -------------------------------------------------------------------------------- /example/yolov4/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model_new.py ./yolov4_simplify.onnx -------------------------------------------------------------------------------- /example/yolov4/simplify_yolov4_onnx.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import os 4 | import onnx 5 | import onnxmltools 6 | from onnxsim import simplify 7 | from torchsummary import summary 8 | 9 | def main(): 10 | torch.manual_seed(1234) 11 | tmp = torch.ones(1, 3, 512, 384) 12 | execute_path = os.path.dirname(os.path.realpath(__file__)) 13 | onnx_file = os.path.join(execute_path, "yolov4.onnx") 14 | 15 | model = onnx.load(onnx_file) 16 | model_simp, check = simplify(model) 17 | onnx_simplify_file = os.path.join(execute_path, "yolov4_simplify.onnx") 18 | onnxmltools.utils.save_model(model_simp, onnx_simplify_file) 19 | 20 | if __name__ == '__main__': 21 | main() 22 | 23 | -------------------------------------------------------------------------------- /example/yolov5/README.md: -------------------------------------------------------------------------------- 1 | # prepare 2 | * pytorch 1.4+ 3 | 4 | * git clone https://github.com/ultralytics/yolov5.git 5 | 6 | * download yolov5s.pt/yolov5m.pt/yolov5l.pt/yolov5x.pt from https://drive.google.com/drive/folders/1Drs_Aiu7xx6S-ix95f9kNsA6ueKRpN2J , and place to yolov5/weights dir 7 | 8 | # convert model to onnx 9 | 1. export PYTHONPATH="$PWD" && python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 10 | 11 | 2. cp yolov5s.onnx to tensorrt_wrapper_for_onnx/example/yolov5 dir 12 | 13 | 3. cd example/yolov5 ,and run python3 simplify_yolov5s_onnx.py to get yolov5s_simplify.onnx 14 | 15 | 4. run sh onnx_to_tensorrt.sh, generate net_graph.json net_weights.bin for c++ code. 16 | 17 | -------------------------------------------------------------------------------- /example/yolov5/onnx_to_tensorrt.sh: -------------------------------------------------------------------------------- 1 | python ../../python_scripts/parse_onnx_model_new.py ./yolov5s_simplify.onnx 2 | # python ../../python_scripts/parse_onnx_model_new.py ./yolov5m_simplify.onnx 3 | # python ../../python_scripts/parse_onnx_model_new.py ./yolov5l_simplify.onnx 4 | # python ../../python_scripts/parse_onnx_model_new.py ./yolov5x_simplify.onnx -------------------------------------------------------------------------------- /execution_info/dataformat_convert_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __DATAFORMAT_CONVERT_EXECUTION_HPP__ 2 | #define __DATAFORMAT_CONVERT_EXECUTION_HPP__ 3 | #include "execution_info.hpp" 4 | 5 | namespace tensorrtInference 6 | { 7 | class DataFormatConvertExecutionInfo : public ExecutionInfo 8 | { 9 | public: 10 | DataFormatConvertExecutionInfo(CUDARuntime *runtime, 11 | std::map> &tensorsInfo, Json::Value& root); 12 | ~DataFormatConvertExecutionInfo(); 13 | bool init(Json::Value& root) override; 14 | void run() override; 15 | private: 16 | std::string convertType; 17 | int blockSize; 18 | int gridSize; 19 | int totalElementSize; 20 | Buffer* srcTensor; 21 | Buffer* dstTensor; 22 | }; 23 | } // namespace tensorrtInference 24 | 25 | #endif -------------------------------------------------------------------------------- /execution_info/datatype_convert_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __DATATYPE_CONVERT_EXECUTION_INFO_HPP__ 2 | #define __DATATYPE_CONVERT_EXECUTION_INFO_HPP__ 3 | #include "execution_info.hpp" 4 | 5 | namespace tensorrtInference 6 | { 7 | class DataTypeConvertExecutionInfo : public ExecutionInfo 8 | { 9 | public: 10 | DataTypeConvertExecutionInfo(CUDARuntime *runtime, 11 | std::map> &tensorsInfo, Json::Value& root); 12 | ~DataTypeConvertExecutionInfo(); 13 | bool init(Json::Value& root) override; 14 | void run() override; 15 | private: 16 | std::string convertType; 17 | int blockSize; 18 | int gridSize; 19 | int totalElementSize; 20 | Buffer* srcTensor; 21 | Buffer* dstTensor; 22 | }; 23 | } // namespace tensorrtInference 24 | 25 | #endif -------------------------------------------------------------------------------- /execution_info/normalization_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __NORMALIZATION_EXECUTION_INFO_HPP__ 2 | #define __NORMALIZATION_EXECUTION_INFO_HPP__ 3 | #include "execution_info.hpp" 4 | 5 | namespace tensorrtInference 6 | { 7 | class NormalizationExecutionInfo : public ExecutionInfo 8 | { 9 | public: 10 | NormalizationExecutionInfo(CUDARuntime *runtime, 11 | std::map> &tensorsInfo, Json::Value& root); 12 | ~NormalizationExecutionInfo(); 13 | bool init(Json::Value& root) override; 14 | void run() override; 15 | private: 16 | float alpha; 17 | float beta; 18 | float bias; 19 | int blockSize; 20 | int gridSize; 21 | int totalElementSize; 22 | Buffer* srcTensor; 23 | Buffer* dstTensor; 24 | }; 25 | } // namespace tensorrtInference 26 | 27 | #endif -------------------------------------------------------------------------------- /execution_info/onnx_model_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __ONNX_MODEL_EXECUTION_INFO_HPP__ 2 | #define __ONNX_MODEL_EXECUTION_INFO_HPP__ 3 | #include 4 | #include 5 | #include "cuda_runtime.hpp" 6 | #include "execution_info.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | class OnnxModelExecutionInfo : public ExecutionInfo 11 | { 12 | public: 13 | OnnxModelExecutionInfo(CUDARuntime *runtime, 14 | std::map> &tensorsInfo, Json::Value& root); 15 | ~OnnxModelExecutionInfo(); 16 | bool init(Json::Value& root) override; 17 | void run() override; 18 | private: 19 | Logger mLogger; 20 | nvinfer1::IRuntime* inferRuntime; 21 | nvinfer1::ICudaEngine* cudaEngine; 22 | nvinfer1::IExecutionContext* executionContext; 23 | std::vector engineBufferArray; 24 | int batchSize = 1; 25 | }; 26 | } // namespace tensorrtInference 27 | 28 | #endif -------------------------------------------------------------------------------- /execution_info/reshape_execution_info.cu: -------------------------------------------------------------------------------- 1 | #include "reshape_execution_info.hpp" 2 | 3 | namespace tensorrtInference 4 | { 5 | 6 | ReshapeExecutionInfo::ReshapeExecutionInfo(CUDARuntime *runtime, 7 | std::map> &tensorsInfo, Json::Value& root) : ExecutionInfo(runtime, tensorsInfo, root) 8 | { 9 | newShape.clear(); 10 | totalElementSize = 0; 11 | srcTensor = nullptr; 12 | dstTensor = nullptr; 13 | } 14 | 15 | ReshapeExecutionInfo::~ReshapeExecutionInfo() 16 | { 17 | } 18 | 19 | bool ReshapeExecutionInfo::init(Json::Value& root) 20 | { 21 | int size = root["attr"]["shape"].size(); 22 | int count = 1; 23 | for(int i = 0; i < size; i++) 24 | { 25 | int dim = root["attr"]["shape"][i].asInt(); 26 | newShape.push_back(dim); 27 | count *= dim; 28 | } 29 | 30 | auto runtime = getCudaRuntime(); 31 | auto srcTensorNames = getInputTensorNames(); 32 | auto dstTensorNames = getOutputTensorNames(); 33 | CHECK_ASSERT(srcTensorNames.size() == dstTensorNames.size(), "input tensor size should be equal to output!\n"); 34 | CHECK_ASSERT(srcTensorNames.size() == 1, "input tensor size should be equal to 1!\n"); 35 | auto tensorsInfo = getTensorsInfo(); 36 | srcTensor = tensorsInfo[srcTensorNames[0]]; 37 | dstTensor = tensorsInfo[dstTensorNames[0]]; 38 | totalElementSize = srcTensor->getElementCount(); 39 | CHECK_ASSERT(count == totalElementSize, "src tensor elemet count should equal to dst tensor!\n"); 40 | recycleBuffers(); 41 | return true; 42 | } 43 | 44 | void ReshapeExecutionInfo::run() 45 | { 46 | auto runtime = getCudaRuntime(); 47 | auto stream = runtime->stream(); 48 | beforeRun(); 49 | runtime->copyFromDeviceToDevice(srcTensor, dstTensor); 50 | cudaError_t cudastatus = cudaGetLastError(); 51 | CHECK_ASSERT(cudastatus == cudaSuccess, "launch reshape kernel fail: %s\n", cudaGetErrorString(cudastatus)); 52 | // { 53 | // printBuffer(dstTensor, 0, 10); 54 | // } 55 | afterRun(); 56 | return; 57 | } 58 | } -------------------------------------------------------------------------------- /execution_info/reshape_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __RESHAPE_EXECUTION_INFO_HPP__ 2 | #define __RESHAPE_EXECUTION_INFO_HPP__ 3 | #include "execution_info.hpp" 4 | 5 | namespace tensorrtInference 6 | { 7 | class ReshapeExecutionInfo : public ExecutionInfo 8 | { 9 | public: 10 | ReshapeExecutionInfo(CUDARuntime *runtime, 11 | std::map> &tensorsInfo, Json::Value& root); 12 | ~ReshapeExecutionInfo(); 13 | bool init(Json::Value& root) override; 14 | void run() override; 15 | private: 16 | std::vector newShape; 17 | int totalElementSize; 18 | Buffer* srcTensor; 19 | Buffer* dstTensor; 20 | }; 21 | } // namespace tensorrtInference 22 | 23 | #endif -------------------------------------------------------------------------------- /execution_info/transpose_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __TRANSSPOSE_EXECUTION_INFO_HPP__ 2 | #define __TRANSSPOSE_EXECUTION_INFO_HPP__ 3 | #include "execution_info.hpp" 4 | #define TRANSPOSE_MAX_DIMENSION 4 5 | 6 | namespace tensorrtInference 7 | { 8 | class TransposeExecutionInfo : public ExecutionInfo 9 | { 10 | public: 11 | TransposeExecutionInfo(CUDARuntime *runtime, 12 | std::map> &tensorsInfo, Json::Value& root); 13 | ~TransposeExecutionInfo(); 14 | bool init(Json::Value& root) override; 15 | void run() override; 16 | private: 17 | int blockSize; 18 | int gridSize; 19 | int totalElementSize; 20 | int shapeSize; 21 | std::shared_ptr inputShape; 22 | std::shared_ptr inputAxis; 23 | Buffer* srcTensor; 24 | Buffer* dstTensor; 25 | }; 26 | } // namespace tensorrtInference 27 | 28 | #endif -------------------------------------------------------------------------------- /execution_info/yolo_nms_execution_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __YOLO_NMS_EXECUTION_INFO_HPP__ 2 | #define __YOLO_NMS_EXECUTION_INFO_HPP__ 3 | #include "execution_info.hpp" 4 | 5 | 6 | namespace tensorrtInference 7 | { 8 | class YoloNMSExecutionInfo : public ExecutionInfo 9 | { 10 | public: 11 | YoloNMSExecutionInfo(CUDARuntime *runtime, 12 | std::map> &tensorsInfo, Json::Value& root); 13 | ~YoloNMSExecutionInfo(); 14 | bool init(Json::Value& root) override; 15 | void run() override; 16 | void callYoloNMSExecutionKernel(); 17 | void recycleBuffers(); 18 | private: 19 | std::shared_ptr sortIdxBuffer; 20 | std::shared_ptr sortProbBuffer; 21 | std::shared_ptr idxBuffer; 22 | std::shared_ptr probBuffer; 23 | std::shared_ptr maskBuffer; 24 | std::shared_ptr cubBuffer; 25 | std::shared_ptr maskRemoveBuffer; 26 | int boxesNum; 27 | int classesNum; 28 | int cubBufferSize; 29 | int imgHeight; 30 | int imgWidth; 31 | float confThresh; 32 | float iouThresh; 33 | }; 34 | } // namespace tensorrtInference 35 | 36 | #endif -------------------------------------------------------------------------------- /execution_parse.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __EXECUTION_PARSE_HPP__ 2 | #define __EXECUTION_PARSE_HPP__ 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "utils.hpp" 9 | #include "json/json.h" 10 | #include "execution_info.hpp" 11 | 12 | 13 | namespace tensorrtInference 14 | { 15 | class executionParse { 16 | public: 17 | executionParse(CUDARuntime *cudaRuntime, std::string &jsonFile); 18 | ~executionParse(); 19 | const std::vector& getTopoNodeOrder(); 20 | const std::map>& getTensorsInfo(); 21 | const std::map>& getExecutionInfoMap(); 22 | bool getInitFlag() {return initFlag;} 23 | void runInference(); 24 | std::map getInferenceResult(); 25 | private: 26 | bool extractExecutionInfo(Json::Value &root); 27 | CUDARuntime* getCudaRuntime() {return cudaRuntime;} 28 | std::vector topoExecutionInfoOrder; 29 | std::map> executionInfoMap; 30 | std::map> tensorsInfo; 31 | std::vector inputTensorNames; 32 | std::vector outputTensorNames; 33 | CUDARuntime *cudaRuntime; 34 | bool initFlag = false; 35 | }; 36 | } //tensorrtInference 37 | 38 | #endif //__EXECUTION_PARSE_HPP__ -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: LLVM 2 | DerivePointerAlignment: false 3 | PointerAlignment: Left 4 | 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.clang-tidy: -------------------------------------------------------------------------------- 1 | --- 2 | Checks: 'google-readability-casting,modernize-deprecated-headers,modernize-loop-convert,modernize-use-auto,modernize-use-default-member-init,modernize-use-using,readability-else-after-return,readability-redundant-member-init,readability-redundant-string-cstr' 3 | WarningsAsErrors: '' 4 | HeaderFilterRegex: '' 5 | AnalyzeTemporaryDtors: false 6 | FormatStyle: none 7 | CheckOptions: 8 | - key: modernize-use-using.IgnoreMacros 9 | value: '0' 10 | ... 11 | 12 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | *.h text 3 | *.cpp text 4 | *.json text 5 | *.in text 6 | *.sh eol=lf 7 | *.bat eol=crlf 8 | *.vcproj eol=crlf 9 | *.vcxproj eol=crlf 10 | *.sln eol=crlf 11 | devtools/agent_vm* eol=crlf 12 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. 16 | 17 | **Expected behavior** 18 | A clear and concise description of what you expected to happen. 19 | 20 | **Desktop (please complete the following information):** 21 | - OS: [e.g. iOS] 22 | - Meson version 23 | - Ninja version 24 | 25 | **Additional context** 26 | Add any other context about the problem here. 27 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.gitignore: -------------------------------------------------------------------------------- 1 | /build/ 2 | /build-*/ 3 | *.pyc 4 | *.swp 5 | *.actual 6 | *.actual-rewrite 7 | *.process-output 8 | *.rewrite 9 | /bin/ 10 | /libs/ 11 | /doc/doxyfile 12 | /dist/ 13 | 14 | # MSVC project files: 15 | *.sln 16 | *.vcxproj 17 | *.filters 18 | *.user 19 | *.sdf 20 | *.opensdf 21 | *.suo 22 | 23 | # MSVC build files: 24 | *.lib 25 | *.obj 26 | *.tlog/ 27 | *.pdb 28 | 29 | # CMake-generated files: 30 | CMakeFiles/ 31 | *.cmake 32 | /pkg-config/jsoncpp.pc 33 | jsoncpp_lib_static.dir/ 34 | 35 | # In case someone runs cmake in the root-dir: 36 | /CMakeCache.txt 37 | /Makefile 38 | /include/Makefile 39 | /src/Makefile 40 | /src/jsontestrunner/Makefile 41 | /src/jsontestrunner/jsontestrunner_exe 42 | /src/lib_json/Makefile 43 | /src/test_lib_json/Makefile 44 | /src/test_lib_json/jsoncpp_test 45 | *.a 46 | 47 | # eclipse project files 48 | .project 49 | .cproject 50 | /.settings/ 51 | 52 | # DS_Store 53 | .DS_Store 54 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.travis_scripts/run-clang-format.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | python $DIR/run-clang-format.py -r $DIR/../src/**/ $DIR/../include/**/ -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.travis_scripts/travis.before_install.linux.sh: -------------------------------------------------------------------------------- 1 | set -vex 2 | 3 | # Preinstalled versions of python are dependent on which Ubuntu distribution 4 | # you are running. The below version needs to be updated whenever we roll 5 | # the Ubuntu version used in Travis. 6 | # https://docs.travis-ci.com/user/languages/python/ 7 | 8 | pyenv global 3.7.1 9 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.travis_scripts/travis.before_install.osx.sh: -------------------------------------------------------------------------------- 1 | # NOTHING TO DO HERE 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.travis_scripts/travis.install.linux.sh: -------------------------------------------------------------------------------- 1 | set -vex 2 | 3 | wget https://github.com/ninja-build/ninja/releases/download/v1.9.0/ninja-linux.zip 4 | unzip -q ninja-linux.zip -d build 5 | 6 | pip3 install meson 7 | echo ${PATH} 8 | ls /usr/local 9 | ls /usr/local/bin 10 | export PATH="${PWD}"/build:/usr/local/bin:/usr/bin:${PATH} 11 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/.travis_scripts/travis.install.osx.sh: -------------------------------------------------------------------------------- 1 | # NOTHING TO DO HERE 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/appveyor.yml: -------------------------------------------------------------------------------- 1 | clone_folder: c:\projects\jsoncpp 2 | 3 | environment: 4 | matrix: 5 | - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 6 | CMAKE_GENERATOR: Visual Studio 14 2015 7 | - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 8 | CMAKE_GENERATOR: Visual Studio 14 2015 Win64 9 | - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 10 | CMAKE_GENERATOR: Visual Studio 15 2017 11 | - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 12 | CMAKE_GENERATOR: Visual Studio 15 2017 Win64 13 | 14 | build_script: 15 | - cmake --version 16 | - cd c:\projects\jsoncpp 17 | - cmake -G "%CMAKE_GENERATOR%" -DCMAKE_INSTALL_PREFIX:PATH=%CD:\=/%/install -DBUILD_SHARED_LIBS:BOOL=ON . 18 | # Use ctest to make a dashboard build: 19 | # - ctest -D Experimental(Start|Update|Configure|Build|Test|Coverage|MemCheck|Submit) 20 | # NOTE: Testing on window is not yet finished: 21 | # - ctest -C Release -D ExperimentalStart -D ExperimentalConfigure -D ExperimentalBuild -D ExperimentalTest -D ExperimentalSubmit 22 | - ctest -C Release -D ExperimentalStart -D ExperimentalConfigure -D ExperimentalBuild -D ExperimentalSubmit 23 | # Final step is to verify that installation succeeds 24 | - cmake --build . --config Release --target install 25 | 26 | deploy: 27 | provider: GitHub 28 | auth_token: 29 | secure: K2Tp1q8pIZ7rs0Ot24ZMWuwr12Ev6Tc6QkhMjGQxoQG3ng1pXtgPasiJ45IDXGdg 30 | on: 31 | branch: master 32 | appveyor_repo_tag: true 33 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/dev.makefile: -------------------------------------------------------------------------------- 1 | # This is only for jsoncpp developers/contributors. 2 | # We use this to sign releases, generate documentation, etc. 3 | VER?=$(shell cat version.txt) 4 | 5 | default: 6 | @echo "VER=${VER}" 7 | sign: jsoncpp-${VER}.tar.gz 8 | gpg --armor --detach-sign $< 9 | gpg --verify $<.asc 10 | # Then upload .asc to the release. 11 | jsoncpp-%.tar.gz: 12 | curl https://github.com/open-source-parsers/jsoncpp/archive/$*.tar.gz -o $@ 13 | dox: 14 | python doxybuild.py --doxygen=$$(which doxygen) --in doc/web_doxyfile.in 15 | rsync -va -c --delete dist/doxygen/jsoncpp-api-html-${VER}/ ../jsoncpp-docs/doxygen/ 16 | # Then 'git add -A' and 'git push' in jsoncpp-docs. 17 | build: 18 | mkdir -p build/debug 19 | cd build/debug; cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_SHARED_LIBS=ON -G "Unix Makefiles" ../.. 20 | make -C build/debug 21 | 22 | # Currently, this depends on include/json/version.h generated 23 | # by cmake. 24 | test-amalgamate: 25 | python2.7 amalgamate.py 26 | python3.4 amalgamate.py 27 | cd dist; gcc -I. -c jsoncpp.cpp 28 | 29 | valgrind: 30 | valgrind --error-exitcode=42 --leak-check=full ./build/debug/src/test_lib_json/jsoncpp_test 31 | 32 | clean: 33 | \rm -rf *.gz *.asc dist/ 34 | 35 | .PHONY: build 36 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/devtools/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2010 Baptiste Lepilleur and The JsonCpp Authors 2 | # Distributed under MIT license, or public domain if desired and 3 | # recognized in your jurisdiction. 4 | # See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | # module 7 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/devtools/agent_vmw7.json: -------------------------------------------------------------------------------- 1 | { 2 | "cmake_variants" : [ 3 | {"name": "generator", 4 | "generators": [ 5 | {"generator": [ 6 | "Visual Studio 7 .NET 2003", 7 | "Visual Studio 9 2008", 8 | "Visual Studio 9 2008 Win64", 9 | "Visual Studio 10", 10 | "Visual Studio 10 Win64", 11 | "Visual Studio 11", 12 | "Visual Studio 11 Win64" 13 | ] 14 | }, 15 | {"generator": ["MinGW Makefiles"], 16 | "env_prepend": [{"path": "c:/wut/prg/MinGW/bin"}] 17 | } 18 | ] 19 | }, 20 | {"name": "shared_dll", 21 | "variables": [ 22 | ["BUILD_SHARED_LIBS=true"], 23 | ["BUILD_SHARED_LIBS=false"] 24 | ] 25 | }, 26 | {"name": "build_type", 27 | "build_types": [ 28 | "debug", 29 | "release" 30 | ] 31 | } 32 | ] 33 | } 34 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/devtools/agent_vmxp.json: -------------------------------------------------------------------------------- 1 | { 2 | "cmake_variants" : [ 3 | {"name": "generator", 4 | "generators": [ 5 | {"generator": [ 6 | "Visual Studio 6", 7 | "Visual Studio 7", 8 | "Visual Studio 8 2005" 9 | ] 10 | } 11 | ] 12 | }, 13 | {"name": "shared_dll", 14 | "variables": [ 15 | ["BUILD_SHARED_LIBS=true"], 16 | ["BUILD_SHARED_LIBS=false"] 17 | ] 18 | }, 19 | {"name": "build_type", 20 | "build_types": [ 21 | "debug", 22 | "release" 23 | ] 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/doc/footer.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 12 | 13 | 14 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/doc/header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | $treeview 13 | $search 14 | $mathjax 15 | 16 | $extrastylesheet 17 | 18 | 19 |
20 | 21 | 22 |
23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
$searchbox
37 |
38 | 39 | 40 | 41 | 42 | 47 | 52 | 57 | 60 | 61 |
43 | 44 | JsonCpp project page 45 | 46 | 48 | 49 | Classes 50 | 51 | 53 | 54 | Namespace 55 | 56 | 58 | JsonCpp home page 59 |
62 | 63 |
64 | 65 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/doc/readme.txt: -------------------------------------------------------------------------------- 1 | The documentation is generated using doxygen (http://www.doxygen.org). 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/doc/roadmap.dox: -------------------------------------------------------------------------------- 1 | /*! \page roadmap JsonCpp roadmap 2 | Moved to: https://github.com/open-source-parsers/jsoncpp/wiki/Roadmap 3 | */ 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #vim: et ts =4 sts = 4 sw = 4 tw = 0 2 | set(EXAMPLES 3 | readFromString 4 | readFromStream 5 | stringWrite 6 | streamWrite 7 | ) 8 | add_definitions(-D_GLIBCXX_USE_CXX11_ABI) 9 | 10 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") 11 | add_compile_options(-Wall -Wextra) 12 | elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") 13 | add_definitions( 14 | -D_SCL_SECURE_NO_WARNINGS 15 | -D_CRT_SECURE_NO_WARNINGS 16 | -D_WIN32_WINNT=0x601 17 | -D_WINSOCK_DEPRECATED_NO_WARNINGS 18 | ) 19 | endif() 20 | 21 | foreach(example ${EXAMPLES}) 22 | add_executable(${example} ${example}/${example}.cpp) 23 | target_include_directories(${example} PUBLIC ${CMAKE_SOURCE_DIR}/include) 24 | target_link_libraries(${example} jsoncpp_lib) 25 | endforeach() 26 | 27 | add_custom_target(examples ALL DEPENDS ${EXAMPLES}) 28 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/README.md: -------------------------------------------------------------------------------- 1 | ***NOTE*** 2 | 3 | If you get linker errors about undefined references to symbols that involve types in the `std::__cxx11` namespace or the tag 4 | `[abi:cxx11]` then it probably indicates that you are trying to link together object files that were compiled with different 5 | values for the _GLIBCXX_USE_CXX11_ABI marco. This commonly happens when linking to a third-party library that was compiled with 6 | an older version of GCC. If the third-party library cannot be rebuilt with the new ABI, then you need to recompile your code with 7 | the old ABI,just like: 8 | **g++ stringWrite.cpp -ljsoncpp -std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0 -o stringWrite** 9 | 10 | Not all of uses of the new ABI will cause changes in symbol names, for example a class with a `std::string` member variable will 11 | have the same mangled name whether compiled with the older or new ABI. In order to detect such problems, the new types and functions 12 | are annotated with the abi_tag attribute, allowing the compiler to warn about potential ABI incompatibilities in code using them. 13 | Those warnings can be enabled with the `-Wabi-tag` option. 14 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/readFromStream/errorFormat.json: -------------------------------------------------------------------------------- 1 | { 2 | 1: "value" 3 | } -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/readFromStream/readFromStream.cpp: -------------------------------------------------------------------------------- 1 | #include "json/json.h" 2 | #include 3 | #include 4 | #include 5 | /** \brief Parse from stream, collect comments and capture error info. 6 | * Example Usage: 7 | * $g++ readFromStream.cpp -ljsoncpp -std=c++11 -o readFromStream 8 | * $./readFromStream 9 | * // comment head 10 | * { 11 | * // comment before 12 | * "key" : "value" 13 | * } 14 | * // comment after 15 | * // comment tail 16 | */ 17 | int main(int argc, char* argv[]) { 18 | Json::Value root; 19 | std::ifstream ifs; 20 | ifs.open(argv[1]); 21 | 22 | Json::CharReaderBuilder builder; 23 | builder["collectComments"] = true; 24 | JSONCPP_STRING errs; 25 | if (!parseFromStream(builder, ifs, &root, &errs)) { 26 | std::cout << errs << std::endl; 27 | return EXIT_FAILURE; 28 | } 29 | std::cout << root << std::endl; 30 | return EXIT_SUCCESS; 31 | } 32 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/readFromStream/withComment.json: -------------------------------------------------------------------------------- 1 | // comment head 2 | { 3 | // comment before 4 | "key" : "value" 5 | // comment after 6 | }// comment tail 7 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/readFromString/readFromString.cpp: -------------------------------------------------------------------------------- 1 | #include "json/json.h" 2 | #include 3 | #include 4 | /** 5 | * \brief Parse a raw string into Value object using the CharReaderBuilder 6 | * class, or the legacy Reader class. 7 | * Example Usage: 8 | * $g++ readFromString.cpp -ljsoncpp -std=c++11 -o readFromString 9 | * $./readFromString 10 | * colin 11 | * 20 12 | */ 13 | int main() { 14 | const std::string rawJson = "{\"Age\": 20, \"Name\": \"colin\"}"; 15 | const int rawJsonLength = static_cast(rawJson.length()); 16 | JSONCPP_CONST bool shouldUseOldWay = false; 17 | JSONCPP_STRING err; 18 | Json::Value root; 19 | 20 | if (shouldUseOldWay) { 21 | Json::Reader reader; 22 | reader.parse(rawJson, root); 23 | } else { 24 | Json::CharReaderBuilder builder; 25 | Json::CharReader* reader(builder.newCharReader()); 26 | if (!reader->parse(rawJson.c_str(), rawJson.c_str() + rawJsonLength, &root, 27 | &err)) { 28 | std::cout << "error" << std::endl; 29 | return EXIT_FAILURE; 30 | } 31 | delete reader; 32 | } 33 | const std::string name = root["Name"].asString(); 34 | const int age = root["Age"].asInt(); 35 | 36 | std::cout << name << std::endl; 37 | std::cout << age << std::endl; 38 | return EXIT_SUCCESS; 39 | } 40 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/streamWrite/streamWrite.cpp: -------------------------------------------------------------------------------- 1 | #include "json/json.h" 2 | #include 3 | /** \brief Write the Value object to a stream. 4 | * Example Usage: 5 | * $g++ streamWrite.cpp -ljsoncpp -std=c++11 -o streamWrite 6 | * $./streamWrite 7 | * { 8 | * "Age" : 20, 9 | * "Name" : "robin" 10 | * } 11 | */ 12 | int main() { 13 | Json::Value root; 14 | Json::StreamWriterBuilder builder; 15 | Json::StreamWriter* writer(builder.newStreamWriter()); 16 | 17 | root["Name"] = "robin"; 18 | root["Age"] = 20; 19 | writer->write(root, &std::cout); 20 | delete writer; 21 | return EXIT_SUCCESS; 22 | } 23 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/example/stringWrite/stringWrite.cpp: -------------------------------------------------------------------------------- 1 | #include "json/json.h" 2 | #include 3 | #include 4 | /** \brief Write a Value object to a string. 5 | * Example Usage: 6 | * $g++ stringWrite.cpp -ljsoncpp -std=c++11 -o stringWrite 7 | * $./stringWrite 8 | * { 9 | * "action" : "run", 10 | * "data" : 11 | * { 12 | * "number" : 1 13 | * } 14 | * } 15 | */ 16 | int main() { 17 | Json::Value root; 18 | Json::Value data; 19 | JSONCPP_CONST bool shouldUseOldWay = false; 20 | root["action"] = "run"; 21 | data["number"] = 1; 22 | root["data"] = data; 23 | 24 | if (shouldUseOldWay) { 25 | Json::FastWriter writer; 26 | const std::string json_file = writer.write(root); 27 | std::cout << json_file << std::endl; 28 | } else { 29 | Json::StreamWriterBuilder builder; 30 | const std::string json_file = Json::writeString(builder, root); 31 | std::cout << json_file << std::endl; 32 | } 33 | return EXIT_SUCCESS; 34 | } 35 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/include/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | file(GLOB INCLUDE_FILES "json/*.h") 2 | install(FILES 3 | ${INCLUDE_FILES} 4 | DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/json) 5 | 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/include/json/forwards.h: -------------------------------------------------------------------------------- 1 | // Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors 2 | // Distributed under MIT license, or public domain if desired and 3 | // recognized in your jurisdiction. 4 | // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | #ifndef JSON_FORWARDS_H_INCLUDED 7 | #define JSON_FORWARDS_H_INCLUDED 8 | 9 | #if !defined(JSON_IS_AMALGAMATION) 10 | #include "config.h" 11 | #endif // if !defined(JSON_IS_AMALGAMATION) 12 | 13 | namespace Json { 14 | 15 | // writer.h 16 | class StreamWriter; 17 | class StreamWriterBuilder; 18 | class Writer; 19 | class FastWriter; 20 | class StyledWriter; 21 | class StyledStreamWriter; 22 | 23 | // reader.h 24 | class Reader; 25 | class CharReader; 26 | class CharReaderBuilder; 27 | 28 | // json_features.h 29 | class Features; 30 | 31 | // value.h 32 | typedef unsigned int ArrayIndex; 33 | class StaticString; 34 | class Path; 35 | class PathArgument; 36 | class Value; 37 | class ValueIteratorBase; 38 | class ValueIterator; 39 | class ValueConstIterator; 40 | 41 | } // namespace Json 42 | 43 | #endif // JSON_FORWARDS_H_INCLUDED 44 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/include/json/json.h: -------------------------------------------------------------------------------- 1 | // Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors 2 | // Distributed under MIT license, or public domain if desired and 3 | // recognized in your jurisdiction. 4 | // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | #ifndef JSON_JSON_H_INCLUDED 7 | #define JSON_JSON_H_INCLUDED 8 | 9 | #include "config.h" 10 | #include "json_features.h" 11 | #include "reader.h" 12 | #include "value.h" 13 | #include "writer.h" 14 | 15 | #endif // JSON_JSON_H_INCLUDED 16 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/include/json/json_features.h: -------------------------------------------------------------------------------- 1 | // Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors 2 | // Distributed under MIT license, or public domain if desired and 3 | // recognized in your jurisdiction. 4 | // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | #ifndef JSON_FEATURES_H_INCLUDED 7 | #define JSON_FEATURES_H_INCLUDED 8 | 9 | #if !defined(JSON_IS_AMALGAMATION) 10 | #include "forwards.h" 11 | #endif // if !defined(JSON_IS_AMALGAMATION) 12 | 13 | #pragma pack(push, 8) 14 | 15 | namespace Json { 16 | 17 | /** \brief Configuration passed to reader and writer. 18 | * This configuration object can be used to force the Reader or Writer 19 | * to behave in a standard conforming way. 20 | */ 21 | class JSON_API Features { 22 | public: 23 | /** \brief A configuration that allows all features and assumes all strings 24 | * are UTF-8. 25 | * - C & C++ comments are allowed 26 | * - Root object can be any JSON value 27 | * - Assumes Value strings are encoded in UTF-8 28 | */ 29 | static Features all(); 30 | 31 | /** \brief A configuration that is strictly compatible with the JSON 32 | * specification. 33 | * - Comments are forbidden. 34 | * - Root object must be either an array or an object value. 35 | * - Assumes Value strings are encoded in UTF-8 36 | */ 37 | static Features strictMode(); 38 | 39 | /** \brief Initialize the configuration like JsonConfig::allFeatures; 40 | */ 41 | Features(); 42 | 43 | /// \c true if comments are allowed. Default: \c true. 44 | bool allowComments_; 45 | 46 | /// \c true if root must be either an array or an object value. Default: \c 47 | /// false. 48 | bool strictRoot_; 49 | 50 | /// \c true if dropped null placeholders are allowed. Default: \c false. 51 | bool allowDroppedNullPlaceholders_; 52 | 53 | /// \c true if numeric object key are allowed. Default: \c false. 54 | bool allowNumericKeys_; 55 | }; 56 | 57 | } // namespace Json 58 | 59 | #pragma pack(pop) 60 | 61 | #endif // JSON_FEATURES_H_INCLUDED 62 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/include/json/version.h: -------------------------------------------------------------------------------- 1 | #ifndef JSON_VERSION_H_INCLUDED 2 | #define JSON_VERSION_H_INCLUDED 3 | 4 | // Note: version must be updated in three places when doing a release. This 5 | // annoying process ensures that amalgamate, CMake, and meson all report the 6 | // correct version. 7 | // 1. /meson.build 8 | // 2. /include/json/version.h 9 | // 3. /CMakeLists.txt 10 | // IMPORTANT: also update the SOVERSION!! 11 | 12 | #define JSONCPP_VERSION_STRING "00.11.0" 13 | #define JSONCPP_VERSION_MAJOR 00 14 | #define JSONCPP_VERSION_MINOR 11 15 | #define JSONCPP_VERSION_PATCH 0 16 | #define JSONCPP_VERSION_QUALIFIER 17 | #define JSONCPP_VERSION_HEXA \ 18 | ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | \ 19 | (JSONCPP_VERSION_PATCH << 8)) 20 | 21 | #ifdef JSONCPP_USING_SECURE_MEMORY 22 | #undef JSONCPP_USING_SECURE_MEMORY 23 | #endif 24 | #define JSONCPP_USING_SECURE_MEMORY 0 25 | // If non-zero, the library zeroes any memory that it has allocated before 26 | // it frees its memory. 27 | 28 | #endif // JSON_VERSION_H_INCLUDED 29 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/meson_options.txt: -------------------------------------------------------------------------------- 1 | option( 2 | 'tests', 3 | type : 'boolean', 4 | value : true, 5 | description : 'Enable building tests') 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/pkg-config/jsoncpp.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@CMAKE_INSTALL_PREFIX@ 2 | exec_prefix=@CMAKE_INSTALL_PREFIX@ 3 | libdir=${exec_prefix}/@CMAKE_INSTALL_LIBDIR@ 4 | includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ 5 | 6 | Name: jsoncpp 7 | Description: A C++ library for interacting with JSON 8 | Version: @JSONCPP_VERSION@ 9 | URL: https://github.com/open-source-parsers/jsoncpp 10 | Libs: -L${libdir} -ljsoncpp 11 | Cflags: -I${includedir} 12 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(lib_json) 2 | if(JSONCPP_WITH_TESTS) 3 | add_subdirectory(jsontestrunner) 4 | add_subdirectory(test_lib_json) 5 | endif() 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/src/jsontestrunner/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.12.0) 2 | # The new Python3 module is much more robust than the previous PythonInterp 3 | find_package(Python3 COMPONENTS Interpreter) 4 | # Set variables for backwards compatibility with cmake < 3.12.0 5 | set(PYTHONINTERP_FOUND ${Python3_Interpreter_FOUND}) 6 | set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE}) 7 | else() 8 | set(Python_ADDITIONAL_VERSIONS 3.8) 9 | find_package(PythonInterp 3) 10 | endif() 11 | 12 | add_executable(jsontestrunner_exe 13 | main.cpp 14 | ) 15 | 16 | if(BUILD_SHARED_LIBS) 17 | if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.12.0) 18 | add_compile_definitions( JSON_DLL ) 19 | else() 20 | add_definitions(-DJSON_DLL) 21 | endif() 22 | endif() 23 | target_link_libraries(jsontestrunner_exe jsoncpp_lib) 24 | 25 | set_target_properties(jsontestrunner_exe PROPERTIES OUTPUT_NAME jsontestrunner_exe) 26 | 27 | if(PYTHONINTERP_FOUND) 28 | # Run end to end parser/writer tests 29 | set(TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../test) 30 | set(RUNJSONTESTS_PATH ${TEST_DIR}/runjsontests.py) 31 | 32 | # Run unit tests in post-build 33 | # (default cmake workflow hides away the test result into a file, resulting in poor dev workflow?!?) 34 | add_custom_target(jsoncpp_readerwriter_tests 35 | "${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" $ "${TEST_DIR}/data" 36 | DEPENDS jsontestrunner_exe jsoncpp_test 37 | ) 38 | add_custom_target(jsoncpp_check DEPENDS jsoncpp_readerwriter_tests) 39 | 40 | ## Create tests for dashboard submission, allows easy review of CI results https://my.cdash.org/index.php?project=jsoncpp 41 | add_test(NAME jsoncpp_readerwriter 42 | COMMAND "${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" $ "${TEST_DIR}/data" 43 | WORKING_DIRECTORY "${TEST_DIR}/data" 44 | ) 45 | add_test(NAME jsoncpp_readerwriter_json_checker 46 | COMMAND "${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" --with-json-checker $ "${TEST_DIR}/data" 47 | WORKING_DIRECTORY "${TEST_DIR}/data" 48 | ) 49 | endif() 50 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/src/test_lib_json/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # vim: et ts=4 sts=4 sw=4 tw=0 2 | 3 | add_executable(jsoncpp_test 4 | jsontest.cpp 5 | jsontest.h 6 | fuzz.cpp 7 | fuzz.h 8 | main.cpp 9 | ) 10 | 11 | 12 | if(BUILD_SHARED_LIBS) 13 | if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.12.0) 14 | add_compile_definitions( JSON_DLL ) 15 | else() 16 | add_definitions( -DJSON_DLL ) 17 | endif() 18 | endif() 19 | target_link_libraries(jsoncpp_test jsoncpp_lib) 20 | 21 | # another way to solve issue #90 22 | #set_target_properties(jsoncpp_test PROPERTIES COMPILE_FLAGS -ffloat-store) 23 | 24 | ## Create tests for dashboard submission, allows easy review of CI results https://my.cdash.org/index.php?project=jsoncpp 25 | add_test(NAME jsoncpp_test 26 | COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $ 27 | ) 28 | set_target_properties(jsoncpp_test PROPERTIES OUTPUT_NAME jsoncpp_test) 29 | 30 | # Run unit tests in post-build 31 | # (default cmake workflow hides away the test result into a file, resulting in poor dev workflow?!?) 32 | if(JSONCPP_WITH_POST_BUILD_UNITTEST) 33 | add_custom_command(TARGET jsoncpp_test 34 | POST_BUILD 35 | COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $ 36 | ) 37 | endif() 38 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/src/test_lib_json/fuzz.cpp: -------------------------------------------------------------------------------- 1 | // Copyright 2007-2019 The JsonCpp Authors 2 | // Distributed under MIT license, or public domain if desired and 3 | // recognized in your jurisdiction. 4 | // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | #include "fuzz.h" 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace Json { 14 | class Exception; 15 | } 16 | 17 | extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { 18 | Json::CharReaderBuilder builder; 19 | 20 | if (size < sizeof(uint32_t)) { 21 | return 0; 22 | } 23 | 24 | const uint32_t hash_settings = static_cast(data[0]) | 25 | (static_cast(data[1]) << 8) | 26 | (static_cast(data[2]) << 16) | 27 | (static_cast(data[3]) << 24); 28 | data += sizeof(uint32_t); 29 | size -= sizeof(uint32_t); 30 | 31 | builder.settings_["failIfExtra"] = hash_settings & (1 << 0); 32 | builder.settings_["allowComments_"] = hash_settings & (1 << 1); 33 | builder.settings_["strictRoot_"] = hash_settings & (1 << 2); 34 | builder.settings_["allowDroppedNullPlaceholders_"] = hash_settings & (1 << 3); 35 | builder.settings_["allowNumericKeys_"] = hash_settings & (1 << 4); 36 | builder.settings_["allowSingleQuotes_"] = hash_settings & (1 << 5); 37 | builder.settings_["failIfExtra_"] = hash_settings & (1 << 6); 38 | builder.settings_["rejectDupKeys_"] = hash_settings & (1 << 7); 39 | builder.settings_["allowSpecialFloats_"] = hash_settings & (1 << 8); 40 | builder.settings_["collectComments"] = hash_settings & (1 << 9); 41 | builder.settings_["allowTrailingCommas_"] = hash_settings & (1 << 10); 42 | 43 | Json::CharReader* reader(builder.newCharReader()); 44 | Json::Value root; 45 | const char* data_str = reinterpret_cast(data); 46 | try { 47 | reader->parse(data_str, data_str + size, &root, JSONCPP_NULL); 48 | } catch (Json::Exception const&) { 49 | } 50 | delete reader; 51 | // Whether it succeeded or not doesn't matter. 52 | return 0; 53 | } 54 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/src/test_lib_json/fuzz.dict: -------------------------------------------------------------------------------- 1 | # 2 | # AFL dictionary for JSON 3 | # ----------------------- 4 | # 5 | # Just the very basics. 6 | # 7 | # Inspired by a dictionary by Jakub Wilk 8 | # 9 | # https://github.com/rc0r/afl-fuzz/blob/master/dictionaries/json.dict 10 | # 11 | 12 | "0" 13 | ",0" 14 | ":0" 15 | "0:" 16 | "-1.2e+3" 17 | 18 | "true" 19 | "false" 20 | "null" 21 | 22 | "\"\"" 23 | ",\"\"" 24 | ":\"\"" 25 | "\"\":" 26 | 27 | "{}" 28 | ",{}" 29 | ":{}" 30 | "{\"\":0}" 31 | "{{}}" 32 | 33 | "[]" 34 | ",[]" 35 | ":[]" 36 | "[0]" 37 | "[[]]" 38 | 39 | "''" 40 | "\\" 41 | "\\b" 42 | "\\f" 43 | "\\n" 44 | "\\r" 45 | "\\t" 46 | "\\u0000" 47 | "\\x00" 48 | "\\0" 49 | "\\uD800\\uDC00" 50 | "\\uDBFF\\uDFFF" 51 | 52 | "\"\":0" 53 | "//" 54 | "/**/" 55 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/src/test_lib_json/fuzz.h: -------------------------------------------------------------------------------- 1 | // Copyright 2007-2010 The JsonCpp Authors 2 | // Distributed under MIT license, or public domain if desired and 3 | // recognized in your jurisdiction. 4 | // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | #ifndef FUZZ_H_INCLUDED 7 | #define FUZZ_H_INCLUDED 8 | 9 | #include 10 | #include 11 | 12 | extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size); 13 | 14 | #endif // ifndef FUZZ_H_INCLUDED 15 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/cleantests.py: -------------------------------------------------------------------------------- 1 | # Copyright 2007 Baptiste Lepilleur and The JsonCpp Authors 2 | # Distributed under MIT license, or public domain if desired and 3 | # recognized in your jurisdiction. 4 | # See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | """Removes all files created during testing.""" 7 | 8 | import glob 9 | import os 10 | 11 | paths = [] 12 | for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]: 13 | paths += glob.glob('data/' + pattern) 14 | 15 | for path in paths: 16 | os.unlink(path) 17 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/fail_test_array_01.json: -------------------------------------------------------------------------------- 1 | [ 1 2 3] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/fail_test_array_02.json: -------------------------------------------------------------------------------- 1 | [1,,] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/fail_test_object_01.json: -------------------------------------------------------------------------------- 1 | { "count" : 1234,, } 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_01.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_01.json: -------------------------------------------------------------------------------- 1 | [] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_02.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | .[0]=1 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_02.json: -------------------------------------------------------------------------------- 1 | [1] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_03.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | .[0]=1 3 | .[1]=2 4 | .[2]=3 5 | .[3]=4 6 | .[4]=5 7 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_03.json: -------------------------------------------------------------------------------- 1 | [ 1, 2 , 3,4,5] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_04.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | .[0]=1 3 | .[1]="abc" 4 | .[2]=12.3 5 | .[3]=-4 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_04.json: -------------------------------------------------------------------------------- 1 | [1, "abc" , 12.3, -4] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_05.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | .[0]=1 3 | .[1]=2 4 | .[2]=3 5 | .[3]=4 6 | .[4]=5 7 | .[5]=6 8 | .[6]=7 9 | .[7]=8 10 | .[8]=9 11 | .[9]=10 12 | .[10]=11 13 | .[11]=12 14 | .[12]=13 15 | .[13]=14 16 | .[14]=15 17 | .[15]=16 18 | .[16]=17 19 | .[17]=18 20 | .[18]=19 21 | .[19]=20 22 | .[20]=21 23 | .[21]=22 24 | .[22]=23 25 | .[23]=24 26 | .[24]=25 27 | .[25]=26 28 | .[26]=27 29 | .[27]=28 30 | .[28]=29 31 | .[29]=30 32 | .[30]=31 33 | .[31]=32 34 | .[32]=33 35 | .[33]=34 36 | .[34]=35 37 | .[35]=36 38 | .[36]=37 39 | .[37]=38 40 | .[38]=39 41 | .[39]=40 42 | .[40]=41 43 | .[41]=42 44 | .[42]=43 45 | .[43]=44 46 | .[44]=45 47 | .[45]=46 48 | .[46]=47 49 | .[47]=48 50 | .[48]=49 51 | .[49]=50 52 | .[50]=51 53 | .[51]=52 54 | .[52]=53 55 | .[53]=54 56 | .[54]=55 57 | .[55]=56 58 | .[56]=57 59 | .[57]=58 60 | .[58]=59 61 | .[59]=60 62 | .[60]=61 63 | .[61]=62 64 | .[62]=63 65 | .[63]=64 66 | .[64]=65 67 | .[65]=66 68 | .[66]=67 69 | .[67]=68 70 | .[68]=69 71 | .[69]=70 72 | .[70]=71 73 | .[71]=72 74 | .[72]=73 75 | .[73]=74 76 | .[74]=75 77 | .[75]=76 78 | .[76]=77 79 | .[77]=78 80 | .[78]=79 81 | .[79]=80 82 | .[80]=81 83 | .[81]=82 84 | .[82]=83 85 | .[83]=84 86 | .[84]=85 87 | .[85]=86 88 | .[86]=87 89 | .[87]=88 90 | .[88]=89 91 | .[89]=90 92 | .[90]=91 93 | .[91]=92 94 | .[92]=93 95 | .[93]=94 96 | .[94]=95 97 | .[95]=96 98 | .[96]=97 99 | .[97]=98 100 | .[98]=99 101 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_05.json: -------------------------------------------------------------------------------- 1 | [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_06.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | .[0]="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 3 | .[1]="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" 4 | .[2]="ccccccccccccccccccccccc" 5 | .[3]="dddddddddddddddddddddddddddddddddddddddddddddddddddd" 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_array_06.json: -------------------------------------------------------------------------------- 1 | [ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 2 | "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", 3 | "ccccccccccccccccccccccc", 4 | "dddddddddddddddddddddddddddddddddddddddddddddddddddd" ] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_01.expected: -------------------------------------------------------------------------------- 1 | .=123456789 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_01.json: -------------------------------------------------------------------------------- 1 | 0123456789 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_02.expected: -------------------------------------------------------------------------------- 1 | .=-123456789 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_02.json: -------------------------------------------------------------------------------- 1 | -0123456789 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_03.expected: -------------------------------------------------------------------------------- 1 | .=1.2345678 2 | 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_03.json: -------------------------------------------------------------------------------- 1 | 1.2345678 2 | 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_04.expected: -------------------------------------------------------------------------------- 1 | .="abcdef" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_04.json: -------------------------------------------------------------------------------- 1 | "abcdef" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_05.expected: -------------------------------------------------------------------------------- 1 | .=null 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_05.json: -------------------------------------------------------------------------------- 1 | null 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_06.expected: -------------------------------------------------------------------------------- 1 | .=true 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_06.json: -------------------------------------------------------------------------------- 1 | true 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_07.expected: -------------------------------------------------------------------------------- 1 | .=false 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_07.json: -------------------------------------------------------------------------------- 1 | false 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_08.expected: -------------------------------------------------------------------------------- 1 | // C++ style comment 2 | .=null 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_08.json: -------------------------------------------------------------------------------- 1 | // C++ style comment 2 | null 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_09.expected: -------------------------------------------------------------------------------- 1 | /* C style comment 2 | */ 3 | .=null 4 | 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_basic_09.json: -------------------------------------------------------------------------------- 1 | /* C style comment 2 | */ 3 | null 4 | 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_comment_00.expected: -------------------------------------------------------------------------------- 1 | // Comment for array 2 | .=[] 3 | // Comment within array 4 | .[0]="one-element" 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_comment_00.json: -------------------------------------------------------------------------------- 1 | // Comment for array 2 | [ 3 | // Comment within array 4 | "one-element" 5 | ] 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_comment_01.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | // Comment for array 3 | .test=[] 4 | // Comment within array 5 | .test[0]={} 6 | .test[0].a="aaa" 7 | .test[1]={} 8 | .test[1].b="bbb" 9 | .test[2]={} 10 | .test[2].c="ccc" 11 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_comment_01.json: -------------------------------------------------------------------------------- 1 | { 2 | "test": 3 | // Comment for array 4 | [ 5 | // Comment within array 6 | { "a" : "aaa" }, // Comment for a 7 | { "b" : "bbb" }, // Comment for b 8 | { "c" : "ccc" } // Comment for c 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_comment_02.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | /* C-style comment 3 | 4 | C-style-2 comment */ 5 | .c-test={} 6 | .c-test.a=1 7 | /* Internal comment c-style */ 8 | .c-test.b=2 9 | // C++-style comment 10 | .cpp-test={} 11 | // Multiline comment cpp-style 12 | // Second line 13 | .cpp-test.c=3 14 | // Comment before double 15 | .cpp-test.d=4.1 16 | // Comment before string 17 | .cpp-test.e="e-string" 18 | // Comment before true 19 | .cpp-test.f=true 20 | // Comment before false 21 | .cpp-test.g=false 22 | // Comment before null 23 | .cpp-test.h=null 24 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_comment_02.json: -------------------------------------------------------------------------------- 1 | { 2 | /* C-style comment 3 | 4 | C-style-2 comment */ 5 | "c-test" : { 6 | "a" : 1, 7 | /* Internal comment c-style */ 8 | "b" : 2 9 | }, 10 | // C++-style comment 11 | "cpp-test" : { 12 | // Multiline comment cpp-style 13 | // Second line 14 | "c" : 3, 15 | // Comment before double 16 | "d" : 4.1, 17 | // Comment before string 18 | "e" : "e-string", 19 | // Comment before true 20 | "f" : true, 21 | // Comment before false 22 | "g" : false, 23 | // Comment before null 24 | "h" : null 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_complex_01.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | .attribute=[] 3 | .attribute[0]="random" 4 | .attribute[1]="short" 5 | .attribute[2]="bold" 6 | .attribute[3]=12 7 | .attribute[4]={} 8 | .attribute[4].height=7 9 | .attribute[4].width=64 10 | .count=1234 11 | .name={} 12 | .name.aka="T.E.S.T." 13 | .name.id=123987 14 | .test={} 15 | .test.1={} 16 | .test.1.2={} 17 | .test.1.2.3={} 18 | .test.1.2.3.coord=[] 19 | .test.1.2.3.coord[0]=1 20 | .test.1.2.3.coord[1]=2 21 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_complex_01.json: -------------------------------------------------------------------------------- 1 | { 2 | "count" : 1234, 3 | "name" : { "aka" : "T.E.S.T.", "id" : 123987 }, 4 | "attribute" : [ 5 | "random", 6 | "short", 7 | "bold", 8 | 12, 9 | { "height" : 7, "width" : 64 } 10 | ], 11 | "test": { "1" : 12 | { "2" : 13 | { "3" : { "coord" : [ 1,2] } 14 | } 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_01.expected: -------------------------------------------------------------------------------- 1 | // Max signed integer 2 | .=2147483647 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_01.json: -------------------------------------------------------------------------------- 1 | // Max signed integer 2 | 2147483647 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_02.expected: -------------------------------------------------------------------------------- 1 | // Min signed integer 2 | .=-2147483648 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_02.json: -------------------------------------------------------------------------------- 1 | // Min signed integer 2 | -2147483648 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_03.expected: -------------------------------------------------------------------------------- 1 | // Max unsigned integer 2 | .=4294967295 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_03.json: -------------------------------------------------------------------------------- 1 | // Max unsigned integer 2 | 4294967295 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_04.expected: -------------------------------------------------------------------------------- 1 | // Min unsigned integer 2 | .=0 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_04.json: -------------------------------------------------------------------------------- 1 | // Min unsigned integer 2 | 0 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_05.expected: -------------------------------------------------------------------------------- 1 | .=1 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_05.json: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_06_64bits.expected: -------------------------------------------------------------------------------- 1 | .=9223372036854775808 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_06_64bits.json: -------------------------------------------------------------------------------- 1 | 9223372036854775808 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_07_64bits.expected: -------------------------------------------------------------------------------- 1 | .=-9223372036854775808 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_07_64bits.json: -------------------------------------------------------------------------------- 1 | -9223372036854775808 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_08_64bits.expected: -------------------------------------------------------------------------------- 1 | .=18446744073709551615 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_integer_08_64bits.json: -------------------------------------------------------------------------------- 1 | 18446744073709551615 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_01.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_01.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_02.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | .count=1234 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_02.json: -------------------------------------------------------------------------------- 1 | { "count" : 1234 } 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_03.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | .attribute="random" 3 | .count=1234 4 | .name="test" 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_03.json: -------------------------------------------------------------------------------- 1 | { 2 | "count" : 1234, 3 | "name" : "test", 4 | "attribute" : "random" 5 | } 6 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_04.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | .=1234 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_object_04.json: -------------------------------------------------------------------------------- 1 | { 2 | "" : 1234 3 | } 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_preserve_comment_01.expected: -------------------------------------------------------------------------------- 1 | /* A comment 2 | at the beginning of the file. 3 | */ 4 | .={} 5 | .first=1 6 | /* Comment before 'second' 7 | */ 8 | .second=2 9 | /* A comment at 10 | the end of the file. 11 | */ 12 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_preserve_comment_01.json: -------------------------------------------------------------------------------- 1 | /* A comment 2 | at the beginning of the file. 3 | */ 4 | { 5 | "first" : 1, // comment after 'first' on the same line 6 | 7 | /* Comment before 'second' 8 | */ 9 | "second" : 2 10 | } 11 | 12 | /* A comment at 13 | the end of the file. 14 | */ 15 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_01.expected: -------------------------------------------------------------------------------- 1 | // 2^33 => out of integer range, switch to double 2 | .=8589934592 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_01.json: -------------------------------------------------------------------------------- 1 | // 2^33 => out of integer range, switch to double 2 | 8589934592 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_02.expected: -------------------------------------------------------------------------------- 1 | // -2^32 => out of signed integer range, switch to double 2 | .=-4294967295 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_02.json: -------------------------------------------------------------------------------- 1 | // -2^32 => out of signed integer range, switch to double 2 | -4294967295 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_03.expected: -------------------------------------------------------------------------------- 1 | // -2^32 => out of signed integer range, switch to double 2 | .=-4294967295 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_03.json: -------------------------------------------------------------------------------- 1 | // -2^32 => out of signed integer range, switch to double 2 | -4294967295 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_04.expected: -------------------------------------------------------------------------------- 1 | // 1.2345678 2 | .=1.2345678 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_04.json: -------------------------------------------------------------------------------- 1 | // 1.2345678 2 | 12345678e-7 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_05.expected: -------------------------------------------------------------------------------- 1 | // 1234567.8 2 | .=1234567.8 3 | 4 | 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_05.json: -------------------------------------------------------------------------------- 1 | // 1234567.8 2 | 0.12345678e7 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_06.expected: -------------------------------------------------------------------------------- 1 | // -1.2345678 2 | .=-1.2345678 3 | 4 | 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_06.json: -------------------------------------------------------------------------------- 1 | // -1.2345678 2 | -12345678e-7 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_07.expected: -------------------------------------------------------------------------------- 1 | // -1234567.8 2 | .=-1234567.8 3 | 4 | 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_07.json: -------------------------------------------------------------------------------- 1 | // -1234567.8 2 | -0.12345678e7 3 | 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_08.expected: -------------------------------------------------------------------------------- 1 | // Out of 32-bit integer range, switch to double in 32-bit mode. Length the 2 | // same as UINT_MAX in base 10 and digit less than UINT_MAX's last digit in 3 | // order to catch a bug in the parsing code. 4 | .=4300000001 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_08.json: -------------------------------------------------------------------------------- 1 | // Out of 32-bit integer range, switch to double in 32-bit mode. Length the 2 | // same as UINT_MAX in base 10 and digit less than UINT_MAX's last digit in 3 | // order to catch a bug in the parsing code. 4 | 4300000001 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_09.expected: -------------------------------------------------------------------------------- 1 | // Out of 64-bit integer range, switch to double in all modes. Length the same 2 | // as ULONG_MAX in base 10 and digit less than ULONG_MAX's last digit in order 3 | // to catch a bug in the parsing code. 4 | .=1.9e+19 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_09.json: -------------------------------------------------------------------------------- 1 | // Out of 64-bit integer range, switch to double in all modes. Length the same 2 | // as ULONG_MAX in base 10 and digit less than ULONG_MAX's last digit in order 3 | // to catch a bug in the parsing code. 4 | 19000000000000000001 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_10.expected: -------------------------------------------------------------------------------- 1 | // Out of 32-bit signed integer range, switch to double in all modes. Length 2 | // the same as INT_MIN in base 10 and digit less than INT_MIN's last digit in 3 | // order to catch a bug in the parsing code. 4 | .=-2200000001 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_10.json: -------------------------------------------------------------------------------- 1 | // Out of 32-bit signed integer range, switch to double in all modes. Length 2 | // the same as INT_MIN in base 10 and digit less than INT_MIN's last digit in 3 | // order to catch a bug in the parsing code. 4 | -2200000001 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_11.expected: -------------------------------------------------------------------------------- 1 | // Out of 64-bit signed integer range, switch to double in all modes. Length 2 | // the same as LONG_MIN in base 10 and digit less than LONG_MIN's last digit in 3 | // order to catch a bug in the parsing code. 4 | .=-9.3e+18 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_11.json: -------------------------------------------------------------------------------- 1 | // Out of 64-bit signed integer range, switch to double in all modes. Length 2 | // the same as LONG_MIN in base 10 and digit less than LONG_MIN's last digit in 3 | // order to catch a bug in the parsing code. 4 | -9300000000000000001 5 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_12.expected: -------------------------------------------------------------------------------- 1 | // 2^64 -> switch to double. 2 | .=1.844674407370955e+19 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_real_12.json: -------------------------------------------------------------------------------- 1 | // 2^64 -> switch to double. 2 | 18446744073709551616 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_01.expected: -------------------------------------------------------------------------------- 1 | .="!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_01.json: -------------------------------------------------------------------------------- 1 | "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_02.expected: -------------------------------------------------------------------------------- 1 | .="!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_02.json: -------------------------------------------------------------------------------- 1 | "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_03.expected: -------------------------------------------------------------------------------- 1 | .="http://jsoncpp.sourceforge.net/" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_03.json: -------------------------------------------------------------------------------- 1 | "http:\/\/jsoncpp.sourceforge.net\/" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_04.expected: -------------------------------------------------------------------------------- 1 | .=""abc\def"" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_04.json: -------------------------------------------------------------------------------- 1 | "\"abc\\def\"" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_05.expected: -------------------------------------------------------------------------------- 1 | .="\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_05.json: -------------------------------------------------------------------------------- 1 | "\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_01.expected: -------------------------------------------------------------------------------- 1 | .="a" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_01.json: -------------------------------------------------------------------------------- 1 | "\u0061" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_02.expected: -------------------------------------------------------------------------------- 1 | .="¢" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_02.json: -------------------------------------------------------------------------------- 1 | "\u00A2" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_03.expected: -------------------------------------------------------------------------------- 1 | .="€" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_03.json: -------------------------------------------------------------------------------- 1 | "\u20AC" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_04.expected: -------------------------------------------------------------------------------- 1 | .="𝄞" 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_04.json: -------------------------------------------------------------------------------- 1 | "\uD834\uDD1E" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_05.expected: -------------------------------------------------------------------------------- 1 | .="Zażółć gęślą jaźń" 2 | 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/legacy_test_string_unicode_05.json: -------------------------------------------------------------------------------- 1 | "Zażółć gęślą jaźń" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/test_array_08.expected: -------------------------------------------------------------------------------- 1 | .=[] 2 | .[0]=1 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/test_array_08.json: -------------------------------------------------------------------------------- 1 | [1,] 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/test_object_05.expected: -------------------------------------------------------------------------------- 1 | .={} 2 | .count=1234 3 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/data/test_object_05.json: -------------------------------------------------------------------------------- 1 | { "count" : 1234, } 2 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/generate_expected.py: -------------------------------------------------------------------------------- 1 | # Copyright 2007 Baptiste Lepilleur and The JsonCpp Authors 2 | # Distributed under MIT license, or public domain if desired and 3 | # recognized in your jurisdiction. 4 | # See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE 5 | 6 | from __future__ import print_function 7 | import glob 8 | import os.path 9 | for path in glob.glob('*.json'): 10 | text = file(path,'rt').read() 11 | target = os.path.splitext(path)[0] + '.expected' 12 | if os.path.exists(target): 13 | print('skipping:', target) 14 | else: 15 | print('creating:', target) 16 | file(target,'wt').write(text) 17 | 18 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail1.json: -------------------------------------------------------------------------------- 1 | "A JSON payload should be an object or array, not a string." -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail10.json: -------------------------------------------------------------------------------- 1 | {"Extra value after close": true} "misplaced quoted value" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail11.json: -------------------------------------------------------------------------------- 1 | {"Illegal expression": 1 + 2} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail12.json: -------------------------------------------------------------------------------- 1 | {"Illegal invocation": alert()} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail13.json: -------------------------------------------------------------------------------- 1 | {"Numbers cannot have leading zeroes": 013} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail14.json: -------------------------------------------------------------------------------- 1 | {"Numbers cannot be hex": 0x14} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail15.json: -------------------------------------------------------------------------------- 1 | ["Illegal backslash escape: \x15"] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail16.json: -------------------------------------------------------------------------------- 1 | [\naked] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail17.json: -------------------------------------------------------------------------------- 1 | ["Illegal backslash escape: \017"] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail18.json: -------------------------------------------------------------------------------- 1 | [[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail19.json: -------------------------------------------------------------------------------- 1 | {"Missing colon" null} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail2.json: -------------------------------------------------------------------------------- 1 | ["Unclosed array" -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail20.json: -------------------------------------------------------------------------------- 1 | {"Double colon":: null} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail21.json: -------------------------------------------------------------------------------- 1 | {"Comma instead of colon", null} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail22.json: -------------------------------------------------------------------------------- 1 | ["Colon instead of comma": false] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail23.json: -------------------------------------------------------------------------------- 1 | ["Bad value", truth] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail24.json: -------------------------------------------------------------------------------- 1 | ['single quote'] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail25.json: -------------------------------------------------------------------------------- 1 | [" tab character in string "] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail26.json: -------------------------------------------------------------------------------- 1 | ["tab\ character\ in\ string\ "] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail27.json: -------------------------------------------------------------------------------- 1 | ["line 2 | break"] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail28.json: -------------------------------------------------------------------------------- 1 | ["line\ 2 | break"] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail29.json: -------------------------------------------------------------------------------- 1 | [0e] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail3.json: -------------------------------------------------------------------------------- 1 | {unquoted_key: "keys must be quoted"} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail30.json: -------------------------------------------------------------------------------- 1 | [0e+] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail31.json: -------------------------------------------------------------------------------- 1 | [0e+-1] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail32.json: -------------------------------------------------------------------------------- 1 | {"Comma instead if closing brace": true, -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail33.json: -------------------------------------------------------------------------------- 1 | ["mismatch"} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail4.json: -------------------------------------------------------------------------------- 1 | ["extra comma",] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail5.json: -------------------------------------------------------------------------------- 1 | ["double extra comma",,] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail6.json: -------------------------------------------------------------------------------- 1 | [ , "<-- missing value"] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail7.json: -------------------------------------------------------------------------------- 1 | ["Comma after the close"], -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail8.json: -------------------------------------------------------------------------------- 1 | ["Extra close"]] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/fail9.json: -------------------------------------------------------------------------------- 1 | {"Extra comma": true,} -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/pass1.json: -------------------------------------------------------------------------------- 1 | [ 2 | "JSON Test Pattern pass1", 3 | {"object with 1 member":["array with 1 element"]}, 4 | {}, 5 | [], 6 | -42, 7 | true, 8 | false, 9 | null, 10 | { 11 | "integer": 1234567890, 12 | "real": -9876.543210, 13 | "e": 0.123456789e-12, 14 | "E": 1.234567890E+34, 15 | "": 23456789012E66, 16 | "zero": 0, 17 | "one": 1, 18 | "space": " ", 19 | "quote": "\"", 20 | "backslash": "\\", 21 | "controls": "\b\f\n\r\t", 22 | "slash": "/ & \/", 23 | "alpha": "abcdefghijklmnopqrstuvwyz", 24 | "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", 25 | "digit": "0123456789", 26 | "0123456789": "digit", 27 | "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", 28 | "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", 29 | "true": true, 30 | "false": false, 31 | "null": null, 32 | "array":[ ], 33 | "object":{ }, 34 | "address": "50 St. James Street", 35 | "url": "http://www.JSON.org/", 36 | "comment": "// /* */": " ", 38 | " s p a c e d " :[1,2 , 3 39 | 40 | , 41 | 42 | 4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7], 43 | "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", 44 | "quotes": "" \u0022 %22 0x22 034 "", 45 | "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" 46 | : "A key can be any string" 47 | }, 48 | 0.5 ,98.6 49 | , 50 | 99.44 51 | , 52 | 53 | 1066, 54 | 1e1, 55 | 0.1e1, 56 | 1e-1, 57 | 1e00,2e+00,2e-00 58 | ,"rosebud"] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/pass2.json: -------------------------------------------------------------------------------- 1 | [[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/pass3.json: -------------------------------------------------------------------------------- 1 | { 2 | "JSON Test Pattern pass3": { 3 | "The outermost value": "must be an object or array.", 4 | "In this test": "It is an object." 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/test/jsonchecker/readme.txt: -------------------------------------------------------------------------------- 1 | Test suite from http://json.org/JSON_checker/. 2 | 3 | If the JSON_checker is working correctly, it must accept all of the pass*.json files and reject all of the fail*.json files. 4 | -------------------------------------------------------------------------------- /jsoncpp-00.11.0/version.in: -------------------------------------------------------------------------------- 1 | @JSONCPP_VERSION@ 2 | -------------------------------------------------------------------------------- /node_create/common.cpp: -------------------------------------------------------------------------------- 1 | #include "common.hpp" 2 | 3 | namespace tensorrtInference 4 | { 5 | bool broadcastTensor(nvinfer1::INetworkDefinition* network, nvinfer1::ITensor*& t, const int nbDims) 6 | { 7 | const nvinfer1::Dims inputDims = t->getDimensions(); 8 | const int nbInputDims = inputDims.nbDims; 9 | if (nbInputDims < nbDims) 10 | { 11 | auto shape = dimsToVector(inputDims); 12 | std::vector newShape(nbDims); 13 | int prefix = nbDims - nbInputDims; 14 | for(int i = 0; i < nbDims; i++) 15 | { 16 | if(i < prefix) 17 | newShape[i] = 1; 18 | else 19 | newShape[i] = shape[i - prefix]; 20 | } 21 | auto newDims = vectorToDims(newShape); 22 | nvinfer1::IShuffleLayer* reshape = network->addShuffle(*t); 23 | reshape->setReshapeDimensions(newDims); 24 | t = reshape->getOutput(0); 25 | } 26 | return true; 27 | } 28 | bool broadcastTensors(nvinfer1::INetworkDefinition* network, nvinfer1::ITensor*& tensor1, nvinfer1::ITensor*& tensor2) 29 | { 30 | const int t1Dims = tensor1->getDimensions().nbDims; 31 | const int t2Dims = tensor2->getDimensions().nbDims; 32 | 33 | if (t1Dims == t2Dims) 34 | { 35 | return true; 36 | } 37 | 38 | if (t1Dims > t2Dims) 39 | { 40 | return broadcastTensor(network, tensor2, t1Dims); 41 | } 42 | return broadcastTensor(network, tensor1, t2Dims); 43 | } 44 | } -------------------------------------------------------------------------------- /node_create/common.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON_HPP__ 2 | #define __COMMON_HPP__ 3 | #include "NvInfer.h" 4 | #include "cuda_runtime_api.h" 5 | #include "node_info.hpp" 6 | #include "weights_graph_parse.hpp" 7 | #include "utils.hpp" 8 | #include 9 | #include 10 | #include 11 | using namespace std; 12 | 13 | 14 | namespace tensorrtInference 15 | { 16 | extern bool broadcastTensors(nvinfer1::INetworkDefinition* network, nvinfer1::ITensor*& tensor1, nvinfer1::ITensor*& tensor2); 17 | } 18 | 19 | 20 | #endif //__COMMON_HPP__ -------------------------------------------------------------------------------- /node_create/create_activation_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_ACTIVATION_NODE_HPP__ 2 | #define __CREATE_ACTIVATION_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createActivationNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_batchnormalization_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_BATCHNORMALIZATION_NODE_HPP__ 2 | #define __CREATE_BATCHNORMALIZATION_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createBatchNormalizationNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } //tensorrtInference 10 | 11 | #endif //__CREATE_BATCHNORMALIZATION_NODE_HPP__ -------------------------------------------------------------------------------- /node_create/create_concatenation_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "create_concatenation_node.hpp" 6 | #include "concatenation_node_info.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | nvinfer1::ILayer* createConcatenationNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 11 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 12 | { 13 | auto concatenationNodeInfo = (ConcatenationNodeInfo*)nodeConfInfo; 14 | auto inputs = concatenationNodeInfo->getInputs(); 15 | auto axis = concatenationNodeInfo->getAxis(); 16 | std::vector inputTensors; 17 | for(int i = 0; i < inputs.size(); i++) { 18 | nvinfer1::ITensor* inputTensor = (tensors.count(inputs[i]) != 0) ? tensors[inputs[i]] : nullptr; 19 | CHECK_ASSERT(inputTensor != nullptr, "get concatenation input %d tensor fail, topo order error\n", i); 20 | inputTensors.push_back(inputTensor); 21 | } 22 | auto dims = inputTensors[0]->getDimensions(); 23 | if(axis < 0) 24 | { 25 | axis = dims.nbDims + axis; 26 | CHECK_ASSERT(axis >= 0, "axis value wrong: %d\n", axis); 27 | } 28 | nvinfer1::IConcatenationLayer* concat = network->addConcatenation(inputTensors.data(), inputTensors.size()); 29 | CHECK_ASSERT(concat, "create concatenation node fail\n"); 30 | concat->setAxis(axis); 31 | return concat; 32 | } 33 | } //tensorrtInference -------------------------------------------------------------------------------- /node_create/create_concatenation_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_CONCATENATION_NODE_HPP__ 2 | #define __CREATE_CONCATENATION_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createConcatenationNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } //tensorrtInference 10 | 11 | #endif //__CREATE_CONCATENATION_NODE_HPP__ -------------------------------------------------------------------------------- /node_create/create_conv2d_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_CONV2D_NODE_HPP__ 2 | #define __CREATE_CONV2D_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createConv2dNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_elementwise_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_ELEMENTWISE_NODE_HPP__ 2 | #define __CREATE_ELEMENTWISE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createElementWiseNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_gather_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "create_gather_node.hpp" 6 | #include "gather_node_info.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | nvinfer1::ILayer* createGatherNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 11 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 12 | { 13 | auto gatherNodeInfo = (GatherNodeInfo*)nodeConfInfo; 14 | auto inputs = gatherNodeInfo->getInputs(); 15 | int axis = gatherNodeInfo->getAxis(); 16 | nvinfer1::ITensor* data = (tensors.count(inputs[0]) != 0) ? tensors[inputs[0]] : nullptr; 17 | nvinfer1::ITensor* indices = (tensors.count(inputs[1]) != 0) ? tensors[inputs[1]] : nullptr; 18 | nvinfer1::Dims dims = data->getDimensions(); 19 | CHECK_ASSERT(data != nullptr && indices != nullptr, "get gather input tensor topo order error\n"); 20 | if(axis < 0) 21 | { 22 | axis = dims.nbDims + axis; 23 | CHECK_ASSERT(axis >= 0, "axis value wrong\n"); 24 | } 25 | nvinfer1::IGatherLayer* gather = network->addGather(*data, *indices, axis); 26 | CHECK_ASSERT(gather, "create gather node fail\n"); 27 | return gather; 28 | } 29 | } -------------------------------------------------------------------------------- /node_create/create_gather_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_GATHER_NODE_HPP__ 2 | #define __CREATE_GATHER_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createGatherNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif //__CREATE_GATHER_NODE_HPP__ -------------------------------------------------------------------------------- /node_create/create_gemm_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_GEMM_NODE_HPP__ 2 | #define __CREATE_GEMM_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createGemmNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } //tensorrtInference 10 | 11 | #endif //__CREATE_GEMM_NODE_HPP__ -------------------------------------------------------------------------------- /node_create/create_identity_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "identity_node_info.hpp" 6 | #include "create_identity_node.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | nvinfer1::ILayer* createIdentityNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 11 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 12 | { 13 | IdentityNodeInfo *nodeConfigInfo = (IdentityNodeInfo *)nodeConfInfo; 14 | auto inputs = nodeConfigInfo->getInputs(); 15 | nvinfer1::ITensor* inputTensor = tensors[inputs[0]]; 16 | nvinfer1::IIdentityLayer* identity = network->addIdentity(*inputTensor); 17 | CHECK_ASSERT(identity, "create identity node fail\n"); 18 | int type = getTensorrtDataType(tensorrtInference::OnnxDataType(nodeConfigInfo->getDataType())); 19 | CHECK_ASSERT(type != -1, "only support float/half!\n"); 20 | identity->setOutputType(0, nvinfer1::DataType(type)); 21 | return identity; 22 | } 23 | } -------------------------------------------------------------------------------- /node_create/create_identity_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_IDENTITY_NODE_HPP__ 2 | #define __CREATE_IDENTITY_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createIdentityNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_NODE_HPP__ 2 | #define __CREATE_NODE_HPP__ 3 | #include "NvInfer.h" 4 | #include "cuda_runtime_api.h" 5 | #include "node_info.hpp" 6 | #include "weights_graph_parse.hpp" 7 | #include "utils.hpp" 8 | #include "common.hpp" 9 | #include 10 | #include 11 | #include 12 | using namespace std; 13 | 14 | namespace tensorrtInference 15 | { 16 | extern nvinfer1::ILayer* createNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 17 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 18 | } 19 | 20 | #endif -------------------------------------------------------------------------------- /node_create/create_nonzero_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "NvInferRuntimeCommon.h" 3 | #include "cuda_runtime_api.h" 4 | #include "weights_graph_parse.hpp" 5 | #include "create_node.hpp" 6 | #include "create_nonzero_node.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | nvinfer1::ILayer* createNonZeroNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 11 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 12 | { 13 | auto inputs = nodeConfInfo->getInputs(); 14 | nvinfer1::ITensor* inputTensor = tensors[inputs[0]]; 15 | auto creator = getPluginRegistry()->getPluginCreator("NonZero_TRT", "1"); 16 | auto pfc = creator->getFieldNames(); 17 | nvinfer1::IPluginV2 *pluginObj = creator->createPlugin("nonzero_plugin", pfc); 18 | auto nonzero = network->addPluginV2(&inputTensor, 1, *pluginObj); 19 | CHECK_ASSERT(nonzero, "create nonzero node fail\n"); 20 | return nonzero; 21 | } 22 | } -------------------------------------------------------------------------------- /node_create/create_nonzero_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_NONZERO_NODE_HPP__ 2 | #define __CREATE_NONZERO_NODE_HPP__ 3 | 4 | //plugin currently not support DataType::kBOOL 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createNonZeroNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_padding_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_PADDING_NODE_HPP__ 2 | #define __CREATE_PADDING_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createPaddingNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_pooling_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_POOLING_NODE_HPP__ 2 | #define __CREATE_POOLING_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createPoolingNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_reduce_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_reduce_node.hpp" 5 | #include "reduce_node_info.hpp" 6 | namespace tensorrtInference 7 | { 8 | nvinfer1::ILayer* createReduceNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 9 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 10 | { 11 | auto reduceNodeInfo = (ReduceNodeInfo*)nodeConfInfo; 12 | auto subType = reduceNodeInfo->getSubNodeType(); 13 | nvinfer1::ReduceOperation operation; 14 | nvinfer1::IReduceLayer* reduce = nullptr; 15 | //ReduceSum 16 | if(subType.compare("ReduceSum") == 0) { 17 | operation = nvinfer1::ReduceOperation::kSUM; 18 | } 19 | else if(subType.compare("GlobalAveragePool") == 0) { 20 | operation = nvinfer1::ReduceOperation::kAVG; 21 | } 22 | else { 23 | LOG("Current not support unary operation(%s) \n", subType); 24 | return nullptr; 25 | } 26 | auto inputs = reduceNodeInfo->getInputs(); 27 | nvinfer1::ITensor* inputTensors = tensors[inputs[0]]; 28 | auto axesNodeConfig = reduceNodeInfo->getAxes(); 29 | unsigned int axes = 0; 30 | for(int i = 0; i < axesNodeConfig.size(); i++) 31 | { 32 | axes |= (1 << axesNodeConfig[i]); 33 | } 34 | bool keepdims = reduceNodeInfo->getKeepdims(); 35 | if(subType.compare("GlobalAveragePool") == 0){ 36 | keepdims = true; 37 | nvinfer1::Dims dims = inputTensors->getDimensions(); 38 | // Generate a bitmask of all 1s except the last 2 bits (N and C axes) 39 | axes = ((1 << dims.nbDims) - 1) & ~0b11; 40 | reduce = network->addReduce(*inputTensors, operation, axes, keepdims); 41 | } 42 | else 43 | reduce = network->addReduce(*inputTensors, operation, axes, keepdims); 44 | CHECK_ASSERT(reduce, "create reduce node fail\n"); 45 | return reduce; 46 | } 47 | } -------------------------------------------------------------------------------- /node_create/create_reduce_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_REDUCE_NODE_HPP__ 2 | #define __CREATE_REDUCE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createReduceNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_resize_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "create_resize_node.hpp" 6 | #include "resize_node_info.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | nvinfer1::ILayer* createResizeNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 11 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 12 | { 13 | auto resizeNodeInfo = (ResizeNodeInfo*)nodeConfInfo; 14 | auto inputs = resizeNodeInfo->getInputs(); 15 | std::string mode = resizeNodeInfo->getMode(); 16 | nvinfer1::ResizeMode resizeMode; 17 | if(mode.compare("nearest") == 0) 18 | resizeMode = nvinfer1::ResizeMode::kNEAREST; 19 | else if(mode.compare("linear") == 0) 20 | resizeMode = nvinfer1::ResizeMode::kLINEAR; 21 | else 22 | CHECK_ASSERT(0, "current only support nearest/linear resize mode\n"); 23 | nvinfer1::ITensor* inputTensor = tensors[inputs[0]]; 24 | nvinfer1::IResizeLayer* resize = network->addResize(*inputTensor); 25 | CHECK_ASSERT(resize, "create resize node fail\n"); 26 | 27 | auto scaleWeights = nodeWeightsInfo[inputs[1]]; 28 | auto scales = parseFloatArrayValue(scaleWeights.dataType, scaleWeights.data, scaleWeights.byteCount, scaleWeights.shape); 29 | resize->setScales(scales.data(), scales.size()); 30 | resize->setResizeMode(resizeMode); 31 | return resize; 32 | } 33 | } -------------------------------------------------------------------------------- /node_create/create_resize_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_RESIZE_NODE_HPP__ 2 | #define __CREATE_RESIZE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createResizeNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_shape_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "create_shape_node.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | nvinfer1::ILayer* createShapeNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 10 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 11 | { 12 | auto inputs = nodeConfInfo->getInputs(); 13 | nvinfer1::ITensor* inputTensor = nullptr; 14 | inputTensor = (tensors.count(inputs[0]) != 0) ? tensors[inputs[0]] : nullptr; 15 | CHECK_ASSERT(inputTensor != nullptr, "topo order error\n"); 16 | nvinfer1::IShapeLayer* shape = network->addShape(*inputTensor); 17 | CHECK_ASSERT(shape, "create shape node fail\n"); 18 | return shape; 19 | } 20 | } -------------------------------------------------------------------------------- /node_create/create_shape_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_SHAPE_NODE_HPP__ 2 | #define __CREATE_SHAPE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createShapeNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_shuffle_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_SHUFFLE_NODE_HPP__ 2 | #define __CREATE_SHUFFLE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createShuffleNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_slice_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_SLICE_NODE_HPP__ 2 | #define __CREATE_SLICE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createSliceNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_softmax_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "create_softmax_node.hpp" 6 | #include "softmax_node_info.hpp" 7 | 8 | namespace tensorrtInference 9 | { 10 | nvinfer1::ILayer* createSoftmaxNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 11 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 12 | { 13 | auto softmaxNodeInfo = (SoftmaxNodeInfo*)nodeConfInfo; 14 | auto inputs = softmaxNodeInfo->getInputs(); 15 | int axes = softmaxNodeInfo->getAxis(); 16 | // CHECK_ASSERT(axes >= 0, "axes only support positive\n"); 17 | nvinfer1::ITensor* inputTensor = tensors[inputs[0]]; 18 | nvinfer1::Dims dims = inputTensor->getDimensions(); 19 | nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*inputTensor); 20 | CHECK_ASSERT(softmax, "create softmax node fail\n"); 21 | if(axes < 0) 22 | { 23 | axes = dims.nbDims + axes; 24 | CHECK_ASSERT(axes >= 0, "axes value wrong\n"); 25 | } 26 | softmax->setAxes(1 << axes); 27 | return softmax; 28 | } 29 | } -------------------------------------------------------------------------------- /node_create/create_softmax_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_SOFTMAX_NODE_HPP__ 2 | #define __CREATE_SOFTMAX_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createSoftmaxNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_unary_node.cpp: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "cuda_runtime_api.h" 3 | #include "weights_graph_parse.hpp" 4 | #include "create_node.hpp" 5 | #include "create_unary_node.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | nvinfer1::ILayer* createUnaryNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 10 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo) 11 | { 12 | auto subType = nodeConfInfo->getSubNodeType(); 13 | nvinfer1::UnaryOperation operation; 14 | //Sqrt Reciprocal Abs 15 | if(subType.compare("Sqrt") == 0) { 16 | operation = nvinfer1::UnaryOperation::kSQRT; 17 | } 18 | else if(subType.compare("Reciprocal") == 0) { 19 | operation = nvinfer1::UnaryOperation::kRECIP; 20 | } 21 | else if(subType.compare("Abs") == 0) { 22 | operation = nvinfer1::UnaryOperation::kABS; 23 | } 24 | else if(subType.compare("Exp") == 0) { 25 | operation = nvinfer1::UnaryOperation::kEXP; 26 | } 27 | else { 28 | LOG("Current not support unary operation(%s) \n", subType); 29 | return nullptr; 30 | } 31 | auto inputs = nodeConfInfo->getInputs(); 32 | nvinfer1::ITensor* inputTensors = tensors[inputs[0]]; 33 | nvinfer1::IUnaryLayer* unary = network->addUnary(*inputTensors, operation); 34 | CHECK_ASSERT(unary, "create unary node fail\n"); 35 | return unary; 36 | } 37 | } -------------------------------------------------------------------------------- /node_create/create_unary_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_UNARY_NODE_HPP__ 2 | #define __CREATE_UNARY_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createUnaryNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } 10 | 11 | #endif -------------------------------------------------------------------------------- /node_create/create_unsqueeze_node.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __CREATE_UNSQUEEZE_NODE_HPP__ 2 | #define __CREATE_UNSQUEEZE_NODE_HPP__ 3 | 4 | 5 | namespace tensorrtInference 6 | { 7 | extern nvinfer1::ILayer* createUnsqueezeNode(nvinfer1::INetworkDefinition* network, std::map& tensors, 8 | tensorrtInference::nodeInfo* nodeConfInfo, std::map& nodeWeightsInfo); 9 | } //tensorrtInference 10 | 11 | #endif //__CREATE_UNSQUEEZE_NODE_HPP__ -------------------------------------------------------------------------------- /node_info/activation_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "activation_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Activation Node 7 | ActivationNodeInfo::ActivationNodeInfo() 8 | { 9 | setNodeType("Activation"); 10 | setSubNodeType(""); 11 | } 12 | ActivationNodeInfo::~ActivationNodeInfo() 13 | { 14 | 15 | } 16 | bool ActivationNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize <= 3, "Activation node must less than 3 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "Activation node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | auto attr = root["attributes"]; 33 | for (auto elem : attr.getMemberNames()) 34 | { 35 | if(elem.compare("alpha") == 0) 36 | { 37 | auto size = attr[elem].size(); 38 | CHECK_ASSERT(size == 1, "Activation node's alpha must have 1 element\n"); 39 | alpha = attr[elem][0].asFloat(); 40 | } 41 | else if(elem.compare("beta") == 0) 42 | { 43 | auto size = attr[elem].size(); 44 | CHECK_ASSERT(size == 1, "Activation node's beta must have 1 element\n"); 45 | beta = attr[elem][0].asFloat(); 46 | } 47 | else 48 | { 49 | LOG("currnet Activation node not support %s \n", elem.c_str()); 50 | } 51 | } 52 | return true; 53 | } 54 | } -------------------------------------------------------------------------------- /node_info/activation_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __CLIP_NODE_INFO_HPP__ 3 | #define __CLIP_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ActivationNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ActivationNodeInfo(); 13 | ~ActivationNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | float getAlpha() { return alpha; } 16 | float getBeta() { return alpha; } 17 | private: 18 | float alpha; 19 | float beta; 20 | }; 21 | } // tensorrtInference 22 | #endif // __CLIP_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/batchnormalization_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __BATCHNORMALIZATION_NODE_INFO_HPP__ 3 | #define __BATCHNORMALIZATION_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class BatchNormalizationNodeInfo : public nodeInfo 10 | { 11 | public: 12 | BatchNormalizationNodeInfo(); 13 | ~BatchNormalizationNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | float getEpsilon() {return epsilon;} 17 | float getMomentum() {return momentum;} 18 | private: 19 | float epsilon; 20 | float momentum; 21 | }; 22 | } // tensorrtInference 23 | #endif //__BATCHNORMALIZATION_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/concatenation_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "concatenation_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Concatenation Node 7 | ConcatenationNodeInfo::ConcatenationNodeInfo() 8 | { 9 | axis = 0; 10 | setNodeType("Concatenation"); 11 | setSubNodeType(""); 12 | } 13 | ConcatenationNodeInfo::~ConcatenationNodeInfo() 14 | { 15 | axis = 0; 16 | } 17 | bool ConcatenationNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 18 | { 19 | setSubNodeType(type); 20 | auto inputSize = root["inputs"].size(); 21 | CHECK_ASSERT(inputSize >= 1, "Concatenation node must have larger than 1 inputs\n"); 22 | for(int i = 0; i < inputSize; i++) 23 | { 24 | addInput(root["inputs"][i].asString()); 25 | } 26 | auto outputSize = root["outputs"].size(); 27 | CHECK_ASSERT(outputSize == 1, "Concatenation node must have 1 output\n"); 28 | auto nodeOutputs = getOutputs(); 29 | for(int i = 0; i < outputSize; i++) 30 | { 31 | addOutput(root["outputs"][i].asString()); 32 | } 33 | auto attr = root["attributes"]; 34 | for (auto elem : attr.getMemberNames()) 35 | { 36 | if(elem.compare("axis") == 0) 37 | { 38 | auto size = attr[elem].size(); 39 | CHECK_ASSERT(size == 1, "Concatenation node's axis must have 1 element\n"); 40 | axis = attr[elem][0].asInt(); 41 | } 42 | else 43 | { 44 | LOG("currnet Concatenation node not support %s \n", elem.c_str()); 45 | } 46 | } 47 | return true; 48 | } 49 | void ConcatenationNodeInfo::printNodeInfo() 50 | { 51 | nodeInfo::printNodeInfo(); 52 | LOG("node attribute is as follows:\n"); 53 | LOG("----axes is : %d\n", axis); 54 | } 55 | } //tensorrtInference -------------------------------------------------------------------------------- /node_info/concatenation_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __CONCATENATION_NODE_INFO_HPP__ 3 | #define __CONCATENATION_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ConcatenationNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ConcatenationNodeInfo(); 13 | ~ConcatenationNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | int getAxis(){return axis;} 17 | private: 18 | int axis; 19 | }; 20 | } // tensorrtInference 21 | #endif //__CONCATENATION_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/conv2d_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __CONV2D_NODE_INFO_HPP__ 3 | #define __CONV2D_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class Conv2dNodeInfo : public nodeInfo 10 | { 11 | public: 12 | Conv2dNodeInfo(); 13 | ~Conv2dNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | int getGroup() { return group; } 17 | std::vector getKernelShape() { return kernel_shape; } 18 | std::vector getPads() { return pads; } 19 | std::vector getStrides() { return strides; } 20 | std::vector getDilation() { return dilation; } 21 | private: 22 | int group; 23 | std::vector kernel_shape; 24 | std::vector pads; 25 | std::vector strides; 26 | std::vector dilation; 27 | }; 28 | } // tensorrtInference 29 | #endif //__CONV2D_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/elementwise_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "elementwise_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // ElementWise Node 7 | ElementWiseNodeInfo::ElementWiseNodeInfo() 8 | { 9 | setNodeType("ElementWise"); 10 | setSubNodeType(""); 11 | } 12 | ElementWiseNodeInfo::~ElementWiseNodeInfo() 13 | { 14 | 15 | } 16 | bool ElementWiseNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize == 2, "ElementWise node must have 2 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "ElementWise node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | return true; 33 | } 34 | } -------------------------------------------------------------------------------- /node_info/elementwise_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __ELEMENTWISE_NODE_INFO_HPP__ 3 | #define __ELEMENTWISE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ElementWiseNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ElementWiseNodeInfo(); 13 | ~ElementWiseNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | private: 16 | 17 | }; 18 | } // tensorrtInference 19 | #endif //__ELEMENTWISE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/gather_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "gather_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Gather Node 7 | GatherNodeInfo::GatherNodeInfo() 8 | { 9 | setNodeType("Gather"); 10 | setSubNodeType(""); 11 | } 12 | GatherNodeInfo::~GatherNodeInfo() 13 | { 14 | 15 | } 16 | bool GatherNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize == 2, "Gather node must have 2 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "Gather node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | auto attr = root["attributes"]; 33 | for (auto elem : attr.getMemberNames()) 34 | { 35 | if(elem.compare("axis") == 0) 36 | { 37 | auto size = attr[elem].size(); 38 | CHECK_ASSERT(size == 1, "Gather node's axis must have 1 element\n"); 39 | axis = attr[elem][0].asInt(); 40 | } 41 | else 42 | { 43 | LOG("current Gather node not support %s \n", elem.c_str()); 44 | } 45 | } 46 | return true; 47 | } 48 | } //tensorrtInference -------------------------------------------------------------------------------- /node_info/gather_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __GATHER_NODE_INFO_HPP__ 3 | #define __GATHER_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class GatherNodeInfo : public nodeInfo 10 | { 11 | public: 12 | GatherNodeInfo(); 13 | ~GatherNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | int getAxis() { return axis;} 16 | private: 17 | int axis; 18 | }; 19 | } // tensorrtInference 20 | #endif //__GATHER_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/gemm_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __GEMM_NODE_INFO_HPP__ 3 | #define __GEMM_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class GemmNodeInfo : public nodeInfo 10 | { 11 | public: 12 | GemmNodeInfo(); 13 | ~GemmNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | float getAlpha(){return alpha;} 17 | float getBeta(){return beta;} 18 | int getTransA(){return transA;} 19 | int getTransB(){return transB;} 20 | private: 21 | float alpha; 22 | float beta; 23 | int transA; 24 | int transB; 25 | }; 26 | } // tensorrtInference 27 | #endif //__GEMM_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/identity_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "identity_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Identity Node 7 | IdentityNodeInfo::IdentityNodeInfo() 8 | { 9 | setNodeType("Identity"); 10 | setSubNodeType(""); 11 | dataType = 0; 12 | } 13 | IdentityNodeInfo::~IdentityNodeInfo() 14 | { 15 | dataType = 0; 16 | } 17 | bool IdentityNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 18 | { 19 | setSubNodeType(type); 20 | auto inputSize = root["inputs"].size(); 21 | CHECK_ASSERT(inputSize == 1, "Identity node must have 1 inputs\n"); 22 | for(int i = 0; i < inputSize; i++) 23 | { 24 | addInput(root["inputs"][i].asString()); 25 | } 26 | auto outputSize = root["outputs"].size(); 27 | CHECK_ASSERT(outputSize == 1, "Identity node must have 1 output\n"); 28 | auto nodeOutputs = getOutputs(); 29 | for(int i = 0; i < outputSize; i++) 30 | { 31 | addOutput(root["outputs"][i].asString()); 32 | } 33 | auto attr = root["attributes"]; 34 | for (auto elem : attr.getMemberNames()) 35 | { 36 | if(elem.compare("to") == 0) 37 | { 38 | auto size = attr[elem].size(); 39 | CHECK_ASSERT(size == 1, "Identity node's to must have 1 element\n"); 40 | dataType = attr[elem][0].asInt(); 41 | } 42 | else 43 | { 44 | LOG("currnet Identity node not support %s \n", elem.c_str()); 45 | } 46 | } 47 | return true; 48 | } 49 | void IdentityNodeInfo::printNodeInfo() 50 | { 51 | nodeInfo::printNodeInfo(); 52 | LOG("node attribute is as follows:\n"); 53 | LOG("----dataType is : %d \n", dataType); 54 | } 55 | } -------------------------------------------------------------------------------- /node_info/identity_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __IDENTITY_NODE_INFO_HPP__ 3 | #define __IDENTITY_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class IdentityNodeInfo : public nodeInfo 10 | { 11 | public: 12 | IdentityNodeInfo(); 13 | ~IdentityNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | int getDataType() {return dataType;} 17 | private: 18 | int dataType; 19 | }; 20 | } // tensorrtInference 21 | #endif //__IDENTITY_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/node_info.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __NODE_INFO_HPP__ 2 | #define __NODE_INFO_HPP__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include "json/json.h" 8 | #include "utils.hpp" 9 | using namespace std; 10 | 11 | namespace tensorrtInference 12 | { 13 | class nodeInfo 14 | { 15 | public: 16 | nodeInfo(); 17 | ~nodeInfo(); 18 | void setNodeType(std::string type); 19 | std::string getNodeType(); 20 | void setSubNodeType(std::string type); 21 | std::string getSubNodeType(); 22 | std::vector getInputs(); 23 | std::vector getOutputs(); 24 | void addInput(std::string input); 25 | void addOutput(std::string output); 26 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) = 0; 27 | void printNodeInfo(); 28 | private: 29 | std::string nodeType; 30 | std::string subNodeType; 31 | std::vector inputs; 32 | std::vector outputs; 33 | }; 34 | 35 | typedef nodeInfo* (*nodeParseFunc)(std::string, Json::Value&); 36 | 37 | class NodeParse 38 | { 39 | private: 40 | static NodeParse* instance; 41 | void registerNodeParseFunc(); 42 | std::map nodeParseFuncMap; 43 | std::map onnxNodeTypeToTensorrtNodeTypeMap; 44 | NodeParse() 45 | { 46 | } 47 | public: 48 | nodeParseFunc getNodeParseFunc(std::string nodeType); 49 | static NodeParse* getInstance() { 50 | return instance; 51 | } 52 | }; 53 | 54 | extern nodeParseFunc getNodeParseFuncMap(std::string onnxNodeType); 55 | } 56 | 57 | #endif -------------------------------------------------------------------------------- /node_info/nonzero_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "nonzero_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // NonZero Node 7 | NonZeroNodeInfo::NonZeroNodeInfo() 8 | { 9 | setNodeType("NonZero"); 10 | setSubNodeType(""); 11 | } 12 | NonZeroNodeInfo::~NonZeroNodeInfo() 13 | { 14 | 15 | } 16 | bool NonZeroNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize == 1, "NonZero node must have 1 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "NonZero node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | return true; 33 | } 34 | } -------------------------------------------------------------------------------- /node_info/nonzero_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __NONZERO_NODE_INFO_HPP__ 3 | #define __NONZERO_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class NonZeroNodeInfo : public nodeInfo 10 | { 11 | public: 12 | NonZeroNodeInfo(); 13 | ~NonZeroNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | private: 16 | }; 17 | } // tensorrtInference 18 | #endif //__NONZERO_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/padding_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __PADDING_NODE_INFO_HPP__ 3 | #define __PADDING_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class PaddingNodeInfo : public nodeInfo 10 | { 11 | public: 12 | PaddingNodeInfo(); 13 | ~PaddingNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | std::string getMode() {return mode;} 16 | std::vector getPads() {return pads;} 17 | float getFloatValue() {return floatValue;} 18 | int getIntValue() {return intValue;} 19 | private: 20 | std::string mode; 21 | std::vector pads; 22 | float floatValue; 23 | int intValue; 24 | }; 25 | } //tensorrtInference 26 | #endif // __PADDING_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/pooling_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __POOLING_NODE_INFO_HPP__ 3 | #define __POOLING_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class PoolingNodeInfo : public nodeInfo 10 | { 11 | public: 12 | PoolingNodeInfo(); 13 | ~PoolingNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | std::vector getKernelShape() { return kernelShape; } 17 | std::vector getPads() { return pads; } 18 | std::vector getStrides() { return strides; } 19 | // std::vector getDilations() { return dilations; } 20 | std::string getAutoPad() { return auto_pad; } 21 | bool getCeilMode() { return (1 == ceil_mode); } 22 | int getCountIncludePad() {return count_include_pad;} 23 | 24 | private: 25 | int ceil_mode; 26 | int count_include_pad; 27 | std::string auto_pad; 28 | std::vector kernelShape; 29 | std::vector pads; 30 | std::vector strides; 31 | // std::vector dilations; 32 | }; 33 | } // tensorrtInference 34 | #endif //__POOLING_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/reduce_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "reduce_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Reduce Node 7 | ReduceNodeInfo::ReduceNodeInfo() 8 | { 9 | axes.clear(); 10 | keepdims = 0; 11 | setNodeType("Reduce"); 12 | setSubNodeType(""); 13 | } 14 | ReduceNodeInfo::~ReduceNodeInfo() 15 | { 16 | axes.clear(); 17 | keepdims = 0; 18 | } 19 | bool ReduceNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 20 | { 21 | setSubNodeType(type); 22 | auto inputSize = root["inputs"].size(); 23 | CHECK_ASSERT(inputSize == 1, "Reduce node must have 1 inputs\n"); 24 | for(int i = 0; i < inputSize; i++) 25 | { 26 | addInput(root["inputs"][i].asString()); 27 | } 28 | auto outputSize = root["outputs"].size(); 29 | CHECK_ASSERT(outputSize == 1, "Reduce node must have 1 output\n"); 30 | auto nodeOutputs = getOutputs(); 31 | for(int i = 0; i < outputSize; i++) 32 | { 33 | addOutput(root["outputs"][i].asString()); 34 | } 35 | auto attr = root["attributes"]; 36 | for (auto elem : attr.getMemberNames()) 37 | { 38 | if(elem.compare("axes") == 0) 39 | { 40 | auto size = attr[elem].size(); 41 | // CHECK_ASSERT(size == 1, "Reduce node's axes must have 1 element\n"); 42 | for(int i = 0; i < size; i++) 43 | axes.push_back(attr[elem][0].asInt()); 44 | } 45 | else if(elem.compare("keepdims") == 0) 46 | { 47 | auto size = attr[elem].size(); 48 | CHECK_ASSERT(size == 1, "Reduce node's keepdims must have 1 element\n"); 49 | keepdims = attr[elem][0].asInt(); 50 | } 51 | else 52 | { 53 | LOG("currnet Reduce node not support %s \n", elem.c_str()); 54 | } 55 | } 56 | return true; 57 | } 58 | void ReduceNodeInfo::printNodeInfo() 59 | { 60 | nodeInfo::printNodeInfo(); 61 | LOG("node attribute is as follows:\n"); 62 | LOG("----axes is : %d \n", axes); 63 | LOG("----keepdims is : %d \n", keepdims); 64 | } 65 | } -------------------------------------------------------------------------------- /node_info/reduce_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __REDUCE_NODE_INFO_HPP__ 3 | #define __REDUCE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ReduceNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ReduceNodeInfo(); 13 | ~ReduceNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | std::vector getAxes() {return axes;} 17 | bool getKeepdims() {return keepdims == 1;} 18 | private: 19 | std::vector axes; 20 | int keepdims; 21 | }; 22 | } // tensorrtInference 23 | #endif //__REDUCE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/resize_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "resize_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Resize Node 7 | ResizeNodeInfo::ResizeNodeInfo() 8 | { 9 | mode = "nearest"; 10 | setNodeType("Resize"); 11 | setSubNodeType(""); 12 | } 13 | ResizeNodeInfo::~ResizeNodeInfo() 14 | { 15 | mode = ""; 16 | } 17 | bool ResizeNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 18 | { 19 | setSubNodeType(type); 20 | auto inputSize = root["inputs"].size(); 21 | CHECK_ASSERT(inputSize > 1, "Resize node must larger than 1 inputs\n"); 22 | for(int i = 0; i < inputSize; i++) 23 | { 24 | addInput(root["inputs"][i].asString()); 25 | } 26 | auto outputSize = root["outputs"].size(); 27 | CHECK_ASSERT(outputSize == 1, "Resize node must have 1 output\n"); 28 | auto nodeOutputs = getOutputs(); 29 | for(int i = 0; i < outputSize; i++) 30 | { 31 | addOutput(root["outputs"][i].asString()); 32 | } 33 | auto attr = root["attributes"]; 34 | for (auto elem : attr.getMemberNames()) 35 | { 36 | if(elem.compare("mode") == 0) 37 | { 38 | auto size = attr[elem].size(); 39 | CHECK_ASSERT(size == 1, "Resize node's mode must have 1 element\n"); 40 | mode = attr[elem][0].asString(); 41 | } 42 | else 43 | { 44 | LOG("currnet Resize node not support %s \n", elem.c_str()); 45 | } 46 | } 47 | return true; 48 | } 49 | void ResizeNodeInfo::printNodeInfo() 50 | { 51 | nodeInfo::printNodeInfo(); 52 | LOG("node attribute is as follows:\n"); 53 | LOG("----mode is : %s \n", mode.c_str()); 54 | } 55 | } -------------------------------------------------------------------------------- /node_info/resize_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __RESIZE_NODE_INFO_HPP__ 3 | #define __RESIZE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ResizeNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ResizeNodeInfo(); 13 | ~ResizeNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | std::string getMode(){return mode;} 17 | private: 18 | std::string mode; 19 | }; 20 | } // tensorrtInference 21 | #endif //__RESIZE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/shape_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "shape_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Shape Node 7 | ShapeNodeInfo::ShapeNodeInfo() 8 | { 9 | setNodeType("Shape"); 10 | setSubNodeType(""); 11 | } 12 | ShapeNodeInfo::~ShapeNodeInfo() 13 | { 14 | 15 | } 16 | bool ShapeNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize == 1, "Shape node must have 1 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "Shape node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | return true; 33 | } 34 | } -------------------------------------------------------------------------------- /node_info/shape_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __SHAPE_NODE_INFO_HPP__ 3 | #define __SHAPE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ShapeNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ShapeNodeInfo(); 13 | ~ShapeNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | private: 16 | 17 | }; 18 | } // tensorrtInference 19 | #endif //__SHAPE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/shuffle_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __SHUFFLE_NODE_INFO_HPP__ 3 | #define __SHUFFLE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class ShuffleNodeInfo : public nodeInfo 10 | { 11 | public: 12 | ShuffleNodeInfo(); 13 | ~ShuffleNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | std::vector getPerm() { return perm; } 17 | int getAxis() { return axis; } 18 | private: 19 | std::vector perm; 20 | int axis; //Flatten 21 | }; 22 | } // tensorrtInference 23 | #endif // __SHUFFLE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/slice_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "slice_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Slice Node 7 | SliceNodeInfo::SliceNodeInfo() 8 | { 9 | setNodeType("Slice"); 10 | setSubNodeType(""); 11 | } 12 | SliceNodeInfo::~SliceNodeInfo() 13 | { 14 | 15 | } 16 | bool SliceNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize >= 3, "slice node must greate equal than 3 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "slice node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | return true; 33 | } 34 | } -------------------------------------------------------------------------------- /node_info/slice_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __SLICE_NODE_INFO_HPP__ 3 | #define __SLICE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class SliceNodeInfo : public nodeInfo 10 | { 11 | public: 12 | SliceNodeInfo(); 13 | ~SliceNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | private: 16 | 17 | }; 18 | } // tensorrtInference 19 | #endif //__SLICE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/softmax_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "softmax_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Softmax Node 7 | SoftmaxNodeInfo::SoftmaxNodeInfo() 8 | { 9 | axis = 0; 10 | setNodeType("Softmax"); 11 | setSubNodeType(""); 12 | } 13 | SoftmaxNodeInfo::~SoftmaxNodeInfo() 14 | { 15 | axis = 0; 16 | } 17 | bool SoftmaxNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 18 | { 19 | setSubNodeType(type); 20 | auto inputSize = root["inputs"].size(); 21 | CHECK_ASSERT(inputSize == 1, "Softmax node must have 1 inputs\n"); 22 | for(int i = 0; i < inputSize; i++) 23 | { 24 | addInput(root["inputs"][i].asString()); 25 | } 26 | auto outputSize = root["outputs"].size(); 27 | CHECK_ASSERT(outputSize == 1, "Softmax node must have 1 output\n"); 28 | auto nodeOutputs = getOutputs(); 29 | for(int i = 0; i < outputSize; i++) 30 | { 31 | addOutput(root["outputs"][i].asString()); 32 | } 33 | auto attr = root["attributes"]; 34 | for (auto elem : attr.getMemberNames()) 35 | { 36 | if(elem.compare("axis") == 0) 37 | { 38 | auto size = attr[elem].size(); 39 | CHECK_ASSERT(size == 1, "Softmax node's axis must have 1 element\n"); 40 | axis = attr[elem][0].asInt(); 41 | } 42 | else 43 | { 44 | LOG("currnet Softmax node not support %s \n", elem.c_str()); 45 | } 46 | } 47 | return true; 48 | } 49 | void SoftmaxNodeInfo::printNodeInfo() 50 | { 51 | nodeInfo::printNodeInfo(); 52 | LOG("node attribute is as follows:\n"); 53 | LOG("----axis is : %d \n", axis); 54 | } 55 | } -------------------------------------------------------------------------------- /node_info/softmax_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __SOFTMAX_NODE_INFO_HPP__ 3 | #define __SOFTMAX_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class SoftmaxNodeInfo : public nodeInfo 10 | { 11 | public: 12 | SoftmaxNodeInfo(); 13 | ~SoftmaxNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | int getAxis(){return axis;} 17 | private: 18 | int axis; 19 | }; 20 | } // tensorrtInference 21 | #endif //__SOFTMAX_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/unary_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "unary_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Unary Node 7 | UnaryNodeInfo::UnaryNodeInfo() 8 | { 9 | setNodeType("Unary"); 10 | setSubNodeType(""); 11 | } 12 | UnaryNodeInfo::~UnaryNodeInfo() 13 | { 14 | 15 | } 16 | bool UnaryNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 17 | { 18 | setSubNodeType(type); 19 | auto inputSize = root["inputs"].size(); 20 | CHECK_ASSERT(inputSize == 1, "Unary node must have 1 inputs\n"); 21 | for(int i = 0; i < inputSize; i++) 22 | { 23 | addInput(root["inputs"][i].asString()); 24 | } 25 | auto outputSize = root["outputs"].size(); 26 | CHECK_ASSERT(outputSize == 1, "Unary node must have 1 output\n"); 27 | auto nodeOutputs = getOutputs(); 28 | for(int i = 0; i < outputSize; i++) 29 | { 30 | addOutput(root["outputs"][i].asString()); 31 | } 32 | return true; 33 | } 34 | } -------------------------------------------------------------------------------- /node_info/unary_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __UNARY_NODE_INFO_HPP__ 3 | #define __UNARY_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class UnaryNodeInfo : public nodeInfo 10 | { 11 | public: 12 | UnaryNodeInfo(); 13 | ~UnaryNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | private: 16 | 17 | }; 18 | } // tensorrtInference 19 | #endif //__UNARY_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /node_info/unsqueeze_node_info.cpp: -------------------------------------------------------------------------------- 1 | #include "unsqueeze_node_info.hpp" 2 | #include "utils.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | // Unsqueeze Node 7 | UnsqueezeNodeInfo::UnsqueezeNodeInfo() 8 | { 9 | axes.clear(); 10 | setNodeType("Unsqueeze"); 11 | setSubNodeType(""); 12 | } 13 | UnsqueezeNodeInfo::~UnsqueezeNodeInfo() 14 | { 15 | axes.clear(); 16 | } 17 | bool UnsqueezeNodeInfo::parseNodeInfoFromJson(std::string type, Json::Value &root) 18 | { 19 | setSubNodeType(type); 20 | auto inputSize = root["inputs"].size(); 21 | CHECK_ASSERT(inputSize == 1, "Unsqueeze node must have 1 inputs\n"); 22 | for(int i = 0; i < inputSize; i++) 23 | { 24 | addInput(root["inputs"][i].asString()); 25 | } 26 | auto outputSize = root["outputs"].size(); 27 | CHECK_ASSERT(outputSize == 1, "Unsqueeze node must have 1 output\n"); 28 | auto nodeOutputs = getOutputs(); 29 | for(int i = 0; i < outputSize; i++) 30 | { 31 | addOutput(root["outputs"][i].asString()); 32 | } 33 | auto attr = root["attributes"]; 34 | for (auto elem : attr.getMemberNames()) 35 | { 36 | if(elem.compare("axes") == 0) 37 | { 38 | auto size = attr[elem].size(); 39 | for(int i = 0; i < size; i++) 40 | { 41 | axes.push_back(attr[elem][i].asInt()); 42 | } 43 | } 44 | else 45 | { 46 | LOG("currnet Unsqueeze node not support %s \n", elem.c_str()); 47 | } 48 | } 49 | return true; 50 | } 51 | void UnsqueezeNodeInfo::printNodeInfo() 52 | { 53 | nodeInfo::printNodeInfo(); 54 | LOG("node attribute is as follows:\n"); 55 | LOG("----axes is : "); 56 | for(int i = 0; i < axes.size(); i++) { 57 | LOG("%d ", axes[i]); 58 | } 59 | LOG("\n"); 60 | } 61 | } -------------------------------------------------------------------------------- /node_info/unsqueeze_node_info.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef __UNSQUEEZE_NODE_INFO_HPP__ 3 | #define __UNSQUEEZE_NODE_INFO_HPP__ 4 | 5 | #include "node_info.hpp" 6 | 7 | namespace tensorrtInference 8 | { 9 | class UnsqueezeNodeInfo : public nodeInfo 10 | { 11 | public: 12 | UnsqueezeNodeInfo(); 13 | ~UnsqueezeNodeInfo(); 14 | virtual bool parseNodeInfoFromJson(std::string type, Json::Value &root) override; 15 | void printNodeInfo(); 16 | std::vector getAxes(){return axes;} 17 | private: 18 | std::vector axes; 19 | }; 20 | } // tensorrtInference 21 | #endif //__UNSQUEEZE_NODE_INFO_HPP__ -------------------------------------------------------------------------------- /onnx_tensorrt_wrapper.cpp: -------------------------------------------------------------------------------- 1 | #include "onnx_tensorrt_wrapper.hpp" 2 | #include "tensorrt_engine.hpp" 3 | 4 | namespace tensorrtInference 5 | { 6 | onnxTensorrtWrapper::onnxTensorrtWrapper(std::string engineFile, int deviceID) 7 | { 8 | auto ptr = new tensorrtEngine(engineFile, deviceID); 9 | inferenceEngine = ptr; 10 | } 11 | onnxTensorrtWrapper::~onnxTensorrtWrapper() 12 | { 13 | if(inferenceEngine != nullptr) 14 | { 15 | auto ptr = (tensorrtEngine*)inferenceEngine; 16 | delete ptr; 17 | inferenceEngine = nullptr; 18 | } 19 | } 20 | void onnxTensorrtWrapper::prepareData(std::map dataMap) 21 | { 22 | auto ptr = (tensorrtEngine*)inferenceEngine; 23 | ptr->prepareData(dataMap); 24 | } 25 | 26 | void onnxTensorrtWrapper::doInference(bool syncFlag) 27 | { 28 | auto ptr = (tensorrtEngine*)inferenceEngine; 29 | ptr->doInference(syncFlag); 30 | } 31 | std::map onnxTensorrtWrapper::getInferenceResult() 32 | { 33 | auto ptr = (tensorrtEngine*)inferenceEngine; 34 | return ptr->getInferenceResult(); 35 | } 36 | 37 | } -------------------------------------------------------------------------------- /onnx_tensorrt_wrapper.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __ONNX_TENSORRT_WRAPPER_HPP__ 2 | #define __ONNX_TENSORRT_WRAPPER_HPP__ 3 | 4 | #include 5 | #include 6 | #include 7 | using namespace std; 8 | 9 | namespace tensorrtInference 10 | { 11 | class onnxTensorrtWrapper 12 | { 13 | public: 14 | onnxTensorrtWrapper(std::string engineFile, int deviceID = 0); 15 | ~onnxTensorrtWrapper(); 16 | void prepareData(std::map dataMap); 17 | std::map getInferenceResult(); 18 | void doInference(bool syncFlag); 19 | private: 20 | void *inferenceEngine = nullptr; 21 | }; 22 | } 23 | 24 | #endif -------------------------------------------------------------------------------- /python_scripts/.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python: 当前文件", 9 | "type": "python", 10 | "request": "launch", 11 | "program": "${file}", 12 | "console": "integratedTerminal", 13 | "args" : ["../example/squeezenet/squeezenet_simplify.onnx", "../example/squeezenet/squeezenet_edit.onnx", "--inputs", "input[1,3,227,227]", "--outputs", "58[1,64,56,56],60[1,64,56,56]"] 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /python_scripts/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": "/home/xj-zjd/anaconda3/envs/tensorrt_wrapper/bin/python" 3 | } -------------------------------------------------------------------------------- /python_scripts/Readme: -------------------------------------------------------------------------------- 1 | convert_fp32_to_fp16.py convert fp32 onnx model to fp16 model 2 | onnx_edit.py refer to https://github.com/saurabh-shandilya/onnx-utils.git 3 | parse_onnx_model.py parse onnx(fp32 or fp16) model and generate **.json and ***.bin files for c++ code 4 | test_onnx_model.py test onnx model with onnxruntime,compaing with c++ results 5 | simplfy_and_infer_shape.py simplfy onnx model and infer op's shape -------------------------------------------------------------------------------- /python_scripts/convert_fp32_to_fp16.py: -------------------------------------------------------------------------------- 1 | import onnxmltools 2 | from onnxmltools.utils.float16_converter import convert_float_to_float16 3 | 4 | input_onnx_model = './hfnet_github.onnx' # fp32 onnx model 5 | output_onnx_model = './hfnet_github_fp16.onnx' # fp16 onnx model 6 | 7 | onnx_model = onnxmltools.utils.load_model(input_onnx_model) 8 | 9 | onnx_model = convert_float_to_float16(onnx_model) 10 | # onnx_model.graph.output.pop() 11 | # onnx_model.graph.output.pop() 12 | # onnx_model.graph.output.pop() 13 | onnxmltools.utils.save_model(onnx_model, output_onnx_model) 14 | -------------------------------------------------------------------------------- /python_scripts/shufflenetv2_simplify.py: -------------------------------------------------------------------------------- 1 | import onnx 2 | import onnxmltools 3 | from onnxsim import simplify 4 | 5 | # load your predefined ONNX model 6 | model = onnx.load('../example/shufflenet/shufflenet.onnx') 7 | 8 | 9 | optimizers_list = ['eliminate_deadend', 'eliminate_identity', 'eliminate_nop_dropout', 10 | 'eliminate_nop_monotone_argmax', 'eliminate_nop_pad', 11 | 'extract_constant_to_initializer', 'eliminate_unused_initializer', 12 | 'eliminate_nop_transpose', 'fuse_add_bias_into_conv', 13 | 'fuse_consecutive_log_softmax', 14 | 'fuse_consecutive_reduce_unsqueeze', 'fuse_consecutive_squeezes', 15 | 'fuse_consecutive_transposes', 'fuse_matmul_add_bias_into_gemm', 16 | 'fuse_pad_into_conv', 'fuse_transpose_into_gemm'] 17 | 18 | optimizers_list.append('fuse_bn_into_conv') 19 | 20 | check_result = onnx.checker.check_model(model) 21 | for i in range(len(model.graph.node)): 22 | print(model.graph.node[i].input) 23 | # convert model 24 | model_simp = onnx.optimizer.optimize(model, optimizers_list) 25 | 26 | onnxmltools.utils.save_model(model_simp, '../example/shufflenet/shufflenet_simplify.onnx') 27 | -------------------------------------------------------------------------------- /python_scripts/simplfy_and_infer_shape.py: -------------------------------------------------------------------------------- 1 | import onnx 2 | import onnxmltools 3 | from onnxsim import simplify 4 | 5 | # load your predefined ONNX model 6 | model = onnx.load('../example/lenet/lenet.onnx') 7 | 8 | # convert model 9 | model_with_shape = onnx.shape_inference.infer_shapes(model) 10 | onnxmltools.utils.save_model(model_with_shape, '../example/lenet/lenet_infer_shape.onnx') 11 | 12 | model_simp, check = simplify(model) 13 | onnxmltools.utils.save_model(model_simp, '../example/lenet/lenet_simplify.onnx') 14 | # assert check, "Simplified ONNX model could not be validated" -------------------------------------------------------------------------------- /python_scripts/test_hfnet_onnx_model_with_execution.py: -------------------------------------------------------------------------------- 1 | import time 2 | import execution 3 | import numpy as np 4 | import cv2 5 | 6 | 7 | if __name__ == "__main__": 8 | test_img_file = "../example/hfnet/gray_test.bmp" 9 | color_img = cv2.imread(test_img_file) 10 | gray_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY).reshape((1, 720,1280,1)) 11 | test_input = np.zeros((1,721,1281,1)).astype(np.uint8) 12 | test_input[:,0:720,0:1280,:] = gray_img 13 | 14 | # construct network 15 | network = execution.Network() 16 | # 1 uint8 to float32 17 | uint8ToFloat32_node = execution.DataTypeConvertExecution(["gray_image",], ["prefix/image:0",]) 18 | attr = {} 19 | attr["convert_type"] = "ConvertUint8ToFloat32" 20 | uint8ToFloat32_node.init_attr(attr) 21 | network.insert_node(uint8ToFloat32_node) 22 | 23 | # 2 onnx model 24 | inputs = uint8ToFloat32_node.get_outputs() 25 | onnx_file = "../example/hfnet/hfnet_edit.onnx" 26 | onnx_node = execution.OnnxModelExecution(onnx_file, inputs, ["prefix/pred/global_head/l2_normalize:0", 27 | "prefix/pred/local_head/descriptor/Mul_1:0", "prefix/pred/Reshape:0", "prefix/pred/keypoint_extraction/Greater_new:0"]) 28 | network.insert_node(onnx_node) 29 | 30 | # 3 resample 31 | inputs = onnx_node.get_outputs() 32 | resample_node = execution.HFnetResampleExecution(inputs, ["global_desc", "local_desc",]) 33 | network.insert_node(resample_node) 34 | 35 | # 4 inference network 36 | outputs = resample_node.get_outputs() 37 | network.generate_topo_order(["gray_image",]) 38 | result = network.inference({"gray_image": test_input}, outputs) 39 | 40 | # 5 save network info to json file 41 | network.export_json() 42 | 43 | -------------------------------------------------------------------------------- /python_scripts/test_lenet_onnx_model_with_execution.py: -------------------------------------------------------------------------------- 1 | import time 2 | import execution 3 | import cv2 4 | import numpy as np 5 | 6 | if __name__ == "__main__": 7 | # construct network 8 | test_input = np.ones((1, 1, 32, 32)).astype(np.float32) 9 | network = execution.Network() 10 | 11 | # 1 onnx model 12 | onnx_file = "../example/lenet/lenet.onnx" 13 | onnx_node = execution.OnnxModelExecution(onnx_file, ["input"], ["output",]) 14 | network.insert_node(onnx_node) 15 | 16 | # 7 inference network 17 | network.generate_topo_order(["input",]) 18 | result = network.inference({"input": test_input}, ["output",]) 19 | 20 | # 8 save network info to json file 21 | network.export_json() 22 | 23 | -------------------------------------------------------------------------------- /python_scripts/test_onnx_model.py: -------------------------------------------------------------------------------- 1 | import onnxruntime 2 | import numpy as np 3 | import time 4 | devices = onnxruntime.get_device() 5 | session = onnxruntime.InferenceSession("../example/lenet/lenet.onnx") 6 | session.get_modelmeta() 7 | first_input_name = session.get_inputs()[0].name 8 | 9 | indata1 = np.ones((1,1,32,32)).astype(np.float32) 10 | results = session.run([], {first_input_name : indata1}) 11 | 12 | starttime = time.time() 13 | for i in range(1): 14 | results = session.run([], {first_input_name : indata1}) 15 | 16 | endtime = time.time() 17 | print((endtime - starttime)) 18 | print(results[0]) 19 | 20 | -------------------------------------------------------------------------------- /python_scripts/test_yolov3_onnx_model_with_execution.py: -------------------------------------------------------------------------------- 1 | import time 2 | import execution 3 | import cv2 4 | 5 | 6 | if __name__ == "__main__": 7 | test_img_file = "../example/yolov3/bus.jpg" 8 | test_input = cv2.imread(test_img_file) 9 | # construct network 10 | network = execution.Network() 11 | # 1 brg to rgb 12 | bgr2rgb_node = execution.DataFormatConvertExecution(["bgr_image",], ["rgb_image",]) 13 | attr = {} 14 | attr["convert_type"] = "BGR2RGB" 15 | bgr2rgb_node.init_attr(attr) 16 | network.insert_node(bgr2rgb_node) 17 | 18 | # 2 reshape rgb to 1 * h * w * c 19 | inputs = bgr2rgb_node.get_outputs() 20 | reshape_node = execution.ReshapeExecution(inputs, ["reshape_image",]) 21 | attr = {} 22 | shape = [1, ] 23 | img_shape = test_input.shape 24 | shape.extend(list(img_shape)) 25 | attr["shape"] = shape 26 | reshape_node.init_attr(attr) 27 | network.insert_node(reshape_node) 28 | 29 | # 3 transpose nhwc to nchw 30 | inputs = reshape_node.get_outputs() 31 | transpose_node = execution.TransposeExecution(inputs, ["transpose_img",]) 32 | attr = {} 33 | attr["perm"] = [0, 3, 1, 2] 34 | transpose_node.init_attr(attr) 35 | network.insert_node(transpose_node) 36 | 37 | # 4 normalization 38 | inputs = transpose_node.get_outputs() 39 | normalization_node = execution.NormalizationExecution(inputs, ["images",]) 40 | attr = {} 41 | attr["alpha"] = 0.0 42 | attr["beta"] = 255.0 43 | attr["bias"] = 0.0 44 | normalization_node.init_attr(attr) 45 | network.insert_node(normalization_node) 46 | 47 | # 5 onnx model 48 | inputs = normalization_node.get_outputs() 49 | onnx_file = "../example/yolov3/yolov3-tiny.onnx" 50 | onnx_node = execution.OnnxModelExecution(onnx_file, inputs, ["classes", "boxes"]) 51 | network.insert_node(onnx_node) 52 | 53 | # 6 yolo nms 54 | inputs = onnx_node.get_outputs() 55 | nms_node = execution.YoloNMSExecution(inputs, ["nms_number", "nms_boxes", "nms_classes"]) 56 | network.insert_node(nms_node) 57 | 58 | # 7 inference network 59 | outputs = nms_node.get_outputs() 60 | network.generate_topo_order(["bgr_image",]) 61 | result = network.inference({"bgr_image": test_input}, outputs) 62 | 63 | # 8 save network info to json file 64 | network.export_json() 65 | 66 | -------------------------------------------------------------------------------- /tensorrt_engine.cpp: -------------------------------------------------------------------------------- 1 | #include "tensorrt_engine.hpp" 2 | #include "create_node.hpp" 3 | #include 4 | #include 5 | 6 | using namespace std; 7 | 8 | namespace tensorrtInference 9 | { 10 | tensorrtEngine::tensorrtEngine(std::string jsonFile, std::string weightsFile, bool fp16Flag) 11 | { 12 | weightsAndGraph.reset(new weightsAndGraphParse(jsonFile, weightsFile, fp16Flag)); 13 | CHECK_ASSERT((weightsAndGraph.get()->getInitFlag() != false), "init jsonFile and weightsFile fail!!\n"); 14 | } 15 | 16 | tensorrtEngine::tensorrtEngine(std::string engineFile, int gpuId) 17 | { 18 | cudaRuntime.reset(new CUDARuntime(gpuId)); 19 | executionInfo.reset(new executionParse(cudaRuntime.get(), engineFile)); 20 | CHECK_ASSERT((executionInfo.get()->getInitFlag() != false), "init engineFile fail!!\n"); 21 | } 22 | 23 | tensorrtEngine::~tensorrtEngine() 24 | { 25 | } 26 | 27 | bool tensorrtEngine::saveEnginePlanFile(std::string saveFile) 28 | { 29 | return weightsAndGraph->saveEnginePlanFile(saveFile); 30 | } 31 | 32 | void tensorrtEngine::prepareData(std::map dataMap) 33 | { 34 | auto allTensors = executionInfo->getTensorsInfo(); 35 | for(auto inputData : dataMap) 36 | { 37 | if(allTensors.count(inputData.first) != 0) 38 | { 39 | allTensors[inputData.first]->setHost(dataMap[inputData.first]); 40 | } 41 | } 42 | } 43 | 44 | void tensorrtEngine::doInference(bool syncFlag) 45 | { 46 | cudaRuntime->activate(); 47 | executionInfo->runInference(); 48 | if(syncFlag) 49 | cudaRuntime->onWaitFinish(); 50 | } 51 | std::map tensorrtEngine::getInferenceResult() 52 | { 53 | return executionInfo->getInferenceResult(); 54 | } 55 | 56 | } -------------------------------------------------------------------------------- /tensorrt_engine.hpp: -------------------------------------------------------------------------------- 1 | #ifndef __TENSORRT_ENGINE_HPP__ 2 | #define __TENSORRT_ENGINE_HPP__ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "utils.hpp" 9 | #include "weights_graph_parse.hpp" 10 | #include "execution_parse.hpp" 11 | #include "cuda_runtime.hpp" 12 | 13 | using namespace nvinfer1; 14 | using namespace std; 15 | 16 | namespace tensorrtInference 17 | { 18 | class tensorrtEngine 19 | { 20 | public: 21 | tensorrtEngine(std::string jsonFile, std::string weightsFile, bool fp16Flag = false); 22 | tensorrtEngine(std::string engineFile, int gpuId = 0); 23 | ~tensorrtEngine(); 24 | bool saveEnginePlanFile(std::string saveFile); 25 | // void createEngine(unsigned int maxBatchSize, bool fp16Flag); 26 | void prepareData(std::map dataMap); 27 | void doInference(bool syncFlag); 28 | std::map getInferenceResult(); 29 | 30 | private: 31 | std::shared_ptr weightsAndGraph; 32 | std::shared_ptr executionInfo; 33 | //gpu runtime 34 | std::shared_ptr cudaRuntime; 35 | }; 36 | } 37 | 38 | #endif --------------------------------------------------------------------------------