├── LICENSE ├── README.md ├── helmet_tensorrt ├── 1.jpg ├── CMakeLists.txt ├── build │ ├── CMakeCache.txt │ ├── CMakeFiles │ │ ├── 3.14.4 │ │ │ ├── CMakeCCompiler.cmake │ │ │ ├── CMakeCXXCompiler.cmake │ │ │ ├── CMakeDetermineCompilerABI_C.bin │ │ │ ├── CMakeDetermineCompilerABI_CXX.bin │ │ │ ├── CMakeSystem.cmake │ │ │ ├── CompilerIdC │ │ │ │ ├── CMakeCCompilerId.c │ │ │ │ └── a.out │ │ │ └── CompilerIdCXX │ │ │ │ ├── CMakeCXXCompilerId.cpp │ │ │ │ └── a.out │ │ ├── CMakeDirectoryInformation.cmake │ │ ├── CMakeOutput.log │ │ ├── CMakeRuleHashes.txt │ │ ├── Makefile.cmake │ │ ├── Makefile2 │ │ ├── TargetDirectories.txt │ │ ├── cmake.check_cache │ │ ├── feature_tests.bin │ │ ├── feature_tests.c │ │ ├── feature_tests.cxx │ │ ├── myplugins.dir │ │ │ ├── DependInfo.cmake │ │ │ ├── build.make │ │ │ ├── cmake_clean.cmake │ │ │ ├── depend.internal │ │ │ ├── depend.make │ │ │ ├── flags.make │ │ │ ├── link.txt │ │ │ ├── myplugins_generated_yololayer.cu.o │ │ │ ├── myplugins_generated_yololayer.cu.o.Debug.cmake │ │ │ ├── myplugins_generated_yololayer.cu.o.cmake.pre-gen │ │ │ ├── myplugins_generated_yololayer.cu.o.depend │ │ │ └── progress.make │ │ ├── progress.marks │ │ └── yolov5.dir │ │ │ ├── CXX.includecache │ │ │ ├── DependInfo.cmake │ │ │ ├── build.make │ │ │ ├── calibrator.cpp.o │ │ │ ├── cmake_clean.cmake │ │ │ ├── depend.internal │ │ │ ├── depend.make │ │ │ ├── flags.make │ │ │ ├── link.txt │ │ │ ├── progress.make │ │ │ └── yolov5.cpp.o │ ├── Makefile │ ├── cmake_install.cmake │ ├── libmyplugins.so │ └── yolov5 ├── calibrator.cpp ├── calibrator.h ├── common.hpp ├── cuda_utils.h ├── gen_wts.py ├── logging.h ├── macros.h ├── output │ └── 1.jpg ├── samples │ └── 1.jpg ├── utils.h ├── yololayer.cu ├── yololayer.h ├── yolov5.cpp └── yolov5_trt.py ├── helmet_yolov5 ├── .dockerignore ├── .gitattributes ├── .github │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE │ │ ├── bug-report.md │ │ ├── feature-request.md │ │ └── question.md │ ├── dependabot.yml │ └── workflows │ │ ├── ci-testing.yml │ │ ├── codeql-analysis.yml │ │ ├── greetings.yml │ │ ├── rebase.yml │ │ └── stale.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── data │ ├── Argoverse.yaml │ ├── GlobalWheat2020.yaml │ ├── Objects365.yaml │ ├── README.md │ ├── SKU-110K.yaml │ ├── VOC.yaml │ ├── VisDrone.yaml │ ├── coco.yaml │ ├── coco128.yaml │ ├── custom_data.yaml │ ├── hyps │ │ ├── hyp.finetune.yaml │ │ ├── hyp.finetune_objects365.yaml │ │ ├── hyp.scratch-p6.yaml │ │ └── hyp.scratch.yaml │ ├── images │ │ ├── bus.jpg │ │ └── zidane.jpg │ ├── scripts │ │ ├── download_weights.sh │ │ ├── get_coco.sh │ │ └── get_coco128.sh │ └── xView.yaml ├── detect.py ├── export.py ├── hubconf.py ├── models │ ├── __init__.py │ ├── common.py │ ├── experimental.py │ ├── hub │ │ ├── anchors.yaml │ │ ├── yolov3-spp.yaml │ │ ├── yolov3-tiny.yaml │ │ ├── yolov3.yaml │ │ ├── yolov5-bifpn.yaml │ │ ├── yolov5-fpn.yaml │ │ ├── yolov5-p2.yaml │ │ ├── yolov5-p6.yaml │ │ ├── yolov5-p7.yaml │ │ ├── yolov5-panet.yaml │ │ ├── yolov5l6.yaml │ │ ├── yolov5m6.yaml │ │ ├── yolov5s-transformer.yaml │ │ ├── yolov5s6.yaml │ │ └── yolov5x6.yaml │ ├── yolo.py │ ├── yolov5l.yaml │ ├── yolov5m.yaml │ ├── yolov5s.yaml │ └── yolov5x.yaml ├── requirements.txt ├── train.py ├── tutorial.ipynb ├── utils │ ├── __init__.py │ ├── activations.py │ ├── augmentations.py │ ├── autoanchor.py │ ├── aws │ │ ├── __init__.py │ │ ├── mime.sh │ │ ├── resume.py │ │ └── userdata.sh │ ├── callbacks.py │ ├── datasets.py │ ├── downloads.py │ ├── flask_rest_api │ │ ├── README.md │ │ ├── example_request.py │ │ └── restapi.py │ ├── general.py │ ├── google_app_engine │ │ ├── Dockerfile │ │ ├── additional_requirements.txt │ │ └── app.yaml │ ├── loggers │ │ └── __init__.py │ ├── loss.py │ ├── metrics.py │ ├── plots.py │ └── torch_utils.py └── val.py ├── result_img ├── results.jpg ├── results.png └── val_batch2_pred.jpg └── utils ├── crawl_webImg.py ├── gen_yolo_format.py ├── generate_txt.py ├── remove_empty.py └── rename.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Yuri 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Helmet-Detection-YoloV5 2 | 本项目主要基于YoloV5s-V5.0版本实现工地上安全帽佩戴的检测,因此本项目的主要实现背景是在Jetson Nano上部署好安全帽佩戴检测的代码,当然,在Windows/Linux上同样可以实现,并且本项目包含TensorRT加速使其能够在边缘硬件平台上能够更快更实时的处理数据,再次**强调**本项目使用的YoloV5是属于**YoloV5s**网络是属于模型最小的,并且版本是**V5.0**(各个版本不是很兼容各版本有点差别) 3 | 4 | ![效果图1](https://z3.ax1x.com/2021/08/17/f4v1Z8.jpg) 5 | # Requirement 6 | ``` 7 | Python>=3.6.0 8 | Pillow 9 | torch>=1.7.0 10 | torchvision>=0.8.0 11 | numpy>=1.18.5 12 | matplotlib>=3.2.2 13 | opencv-python>=4.1.2 14 | PyYAML>=5.3.1 15 | scipy>=1.4.1 16 | tqdm>=4.41.0 17 | ``` 18 | 19 | # Quick start 20 | - 步骤一 : 配置好对应的环境并且克隆项目到本地 21 | ``` 22 | $ git clone https://github.com/FanDady/Helmet-Detection-YoloV5.git 23 | $ cd Helmet-Detection-YoloV5 24 | ``` 25 | - 步骤二 : 下载安全帽检测模型文件和安全帽TensorRT加速的engine文件并存放到相应位置 26 | ``` 27 | $ 安全帽模型文件百度网盘链接:https://pan.baidu.com/s/1n7pq4HoUCDcClURTPt9D5w 28 | $ 安全帽模型文件提取码:k24b 29 | $ 安全帽tensorRT文件百度网盘链接:https://pan.baidu.com/s/1olMmUR6T5tQN4aBJeINK9Q 30 | $ 安全帽tensorRT文件提取码:ywfk 31 | $ 如果链接失效请邮件联系我或者在Issue下留言 32 | ``` 33 | - 步骤三 :在非TensorRT加速下YoloV5s推理 34 | ``` 35 | $ cd helmet_yolov5 36 | $ python detect.py --source 0 # webcam 37 | file.jpg # image 38 | file.mp4 # video 39 | path/ # directory 40 | path/*.jpg # glob 41 | rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream 42 | http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream 43 | 44 | $ 例子:python detect.py --source test.jpg --weights helmet.pt 45 | ``` 46 | 47 | - 步骤四(可选): 在TensorRT加速下进行YoloV5推理 48 | ``` 49 | $ cd helmet_tensorrt 50 | $ python3 yolov5_trt.py --source img 51 | video 52 | csiCam 53 | usbCam 54 | $ 例子 : python3 yolov5_trt.py --source img --img_dir test.jpg --engine_dir helmet.engine 55 | python3 yolov5_trt.py --source img --img_dir test.jpg --engine_dir helmet.engine 56 | python3 yolov5_trt.py --source usbCam --engine_dir helmet.engine (默认调用dev 0摄像头) 57 | python3 yolov5_trt.py --source csiCam --engine_dir helmet.engine (CSI摄像头参数根据自己实际去设置) 58 | ``` 59 | 60 | # Helmet Dataset 61 | > 注意:下面两个格式的数据集中的内容都是一样的只不过是内容存放的格式以及内容需要的文件格式不同而已,二者可以相互转换 62 | - 安全帽VOC格式数据集 63 | ``` 64 | # 百度网盘链接:https://pan.baidu.com/s/1dE23iElE3iGVdsPfQYm3jg 65 | # 提取码:ir9x 66 | ``` 67 | - 安全帽Yolo格式数据集 68 | ``` 69 | # 百度网盘链接:https://pan.baidu.com/s/1CceCFIYzpBjjPcCe4_dr7g 70 | # 提取码:gyre 71 | ``` 72 | 73 | # How to train 74 | - 准备好安全帽的yolo格式数据集(已上传如上)和官方YoloV5s权重文件 75 | ``` 76 | # yoloV5s权重百度网盘链接:https://pan.baidu.com/s/1PPEDV2UZsPLpugEAEW2wGg 77 | # 提取码:6pfy 78 | ``` 79 | - (可选)制作自己的数据集,收集好图像并命名好使用Labelbox 、CVAT 、精灵标注助手等标注工具标注生成xml文件并且文件格式放置参照VOC数据集格式如下其中Main文件的txt文件可通过```../utils/generate_txt.py```生成 80 | ``` 81 | ---| 82 | |---Annotations----.xml 83 | | 84 | |---JPEGImages-----.jpg 85 | | 86 | |---Main-----------| 87 | |--train.txt 88 | |--val.txt 89 | |--trainval.txt 90 | |--test.txt 91 | ``` 92 | - (可选)将VOC格式数据集转换成yolo格式数据集,在```../utils/gen_yolo_format.py```生成yolo格式数据集如下格式 93 | ``` 94 | ---| 95 | |---images--| 96 | | |--test 97 | | |--train 98 | | |--val 99 | | 100 | |---labels--| 101 | |--train 102 | |--val 103 | |--test 104 | ``` 105 | - 克隆YoloV5官方的代码到本地 106 | ``` 107 | $ git clone https://github.com/ultralytics/yolov5.git 108 | ``` 109 | - 准备好环境 110 | ``` 111 | $ cd yolov5 112 | $ pip install -r requirements.txt 113 | ``` 114 | - 创建配置文件修改```data/custom_data.yaml```文件 115 | ``` 116 | # 设置数据集的路径 117 | train: data/Safety_Helmet_Train_dataset/score/images/train 118 | val: data/Safety_Helmet_Train_dataset/score/images/val 119 | 120 | # 分类数量 121 | nc: 2 122 | 123 | # 类别名称 124 | names: ['person', 'hat'] 125 | ``` 126 | - 在models文件夹中选择需要训练的模型这里选择的是yoloV5s训练并修改配置存放好为```yolov5s.yaml``` 127 | ``` 128 | # 这里只需要修改类别数量即可 129 | nc: 2 # number of classes 130 | ``` 131 | - 执行训练文件 132 | ``` 133 | $ python train.py --epochs 200 --data custom_data.yaml --cfg yolov5s.yaml --weights yolov5s.pt --device 0 134 | ``` 135 | - 在路径下会生成runs文件夹里面找到weights里的best.pt就是训练好的权重 136 | - 使用TensorRT加速训练好的模型详情请见官方教程[YoloV5_tesnorrt](https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5) 137 | 138 | # Results 139 | - 检测分类为person和hat分别代表没戴安全帽和戴安全帽 140 | ![效果图2](https://z3.ax1x.com/2021/08/17/f4vYGj.jpg) 141 | - 下图为训练的一个指标结果图 142 | ![效果图3](https://z3.ax1x.com/2021/08/17/f4vcW9.png) 143 | 144 | # Reference 145 | - [YoloV5](https://github.com/ultralytics/yolov5) 146 | - [YoloV5-tensorrt](https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5) 147 | 148 | -------------------------------------------------------------------------------- /helmet_tensorrt/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/1.jpg -------------------------------------------------------------------------------- /helmet_tensorrt/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | 3 | project(yolov5) 4 | 5 | add_definitions(-std=c++11) 6 | add_definitions(-DAPI_EXPORTS) 7 | option(CUDA_USE_STATIC_CUDA_RUNTIME OFF) 8 | set(CMAKE_CXX_STANDARD 11) 9 | set(CMAKE_BUILD_TYPE Debug) 10 | 11 | find_package(CUDA REQUIRED) 12 | 13 | if(WIN32) 14 | enable_language(CUDA) 15 | endif(WIN32) 16 | 17 | include_directories(${PROJECT_SOURCE_DIR}/include) 18 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different 19 | # cuda 20 | include_directories(/usr/local/cuda/include) 21 | link_directories(/usr/local/cuda/lib64) 22 | # tensorrt 23 | include_directories(/usr/include/x86_64-linux-gnu/) 24 | link_directories(/usr/lib/x86_64-linux-gnu/) 25 | 26 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED") 27 | 28 | cuda_add_library(myplugins SHARED ${PROJECT_SOURCE_DIR}/yololayer.cu) 29 | target_link_libraries(myplugins nvinfer cudart) 30 | 31 | find_package(OpenCV) 32 | include_directories(${OpenCV_INCLUDE_DIRS}) 33 | 34 | add_executable(yolov5 ${PROJECT_SOURCE_DIR}/calibrator.cpp ${PROJECT_SOURCE_DIR}/yolov5.cpp) 35 | target_link_libraries(yolov5 nvinfer) 36 | target_link_libraries(yolov5 cudart) 37 | target_link_libraries(yolov5 myplugins) 38 | target_link_libraries(yolov5 ${OpenCV_LIBS}) 39 | 40 | if(UNIX) 41 | add_definitions(-O2 -pthread) 42 | endif(UNIX) 43 | 44 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeCCompiler.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_C_COMPILER "/usr/bin/cc") 2 | set(CMAKE_C_COMPILER_ARG1 "") 3 | set(CMAKE_C_COMPILER_ID "GNU") 4 | set(CMAKE_C_COMPILER_VERSION "7.5.0") 5 | set(CMAKE_C_COMPILER_VERSION_INTERNAL "") 6 | set(CMAKE_C_COMPILER_WRAPPER "") 7 | set(CMAKE_C_STANDARD_COMPUTED_DEFAULT "11") 8 | set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert") 9 | set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") 10 | set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") 11 | set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") 12 | 13 | set(CMAKE_C_PLATFORM_ID "Linux") 14 | set(CMAKE_C_SIMULATE_ID "") 15 | set(CMAKE_C_SIMULATE_VERSION "") 16 | 17 | 18 | 19 | set(CMAKE_AR "/usr/bin/ar") 20 | set(CMAKE_C_COMPILER_AR "/usr/bin/gcc-ar-7") 21 | set(CMAKE_RANLIB "/usr/bin/ranlib") 22 | set(CMAKE_C_COMPILER_RANLIB "/usr/bin/gcc-ranlib-7") 23 | set(CMAKE_LINKER "/usr/bin/ld") 24 | set(CMAKE_MT "") 25 | set(CMAKE_COMPILER_IS_GNUCC 1) 26 | set(CMAKE_C_COMPILER_LOADED 1) 27 | set(CMAKE_C_COMPILER_WORKS TRUE) 28 | set(CMAKE_C_ABI_COMPILED TRUE) 29 | set(CMAKE_COMPILER_IS_MINGW ) 30 | set(CMAKE_COMPILER_IS_CYGWIN ) 31 | if(CMAKE_COMPILER_IS_CYGWIN) 32 | set(CYGWIN 1) 33 | set(UNIX 1) 34 | endif() 35 | 36 | set(CMAKE_C_COMPILER_ENV_VAR "CC") 37 | 38 | if(CMAKE_COMPILER_IS_MINGW) 39 | set(MINGW 1) 40 | endif() 41 | set(CMAKE_C_COMPILER_ID_RUN 1) 42 | set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m) 43 | set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC) 44 | set(CMAKE_C_LINKER_PREFERENCE 10) 45 | 46 | # Save compiler ABI information. 47 | set(CMAKE_C_SIZEOF_DATA_PTR "8") 48 | set(CMAKE_C_COMPILER_ABI "ELF") 49 | set(CMAKE_C_LIBRARY_ARCHITECTURE "aarch64-linux-gnu") 50 | 51 | if(CMAKE_C_SIZEOF_DATA_PTR) 52 | set(CMAKE_SIZEOF_VOID_P "${CMAKE_C_SIZEOF_DATA_PTR}") 53 | endif() 54 | 55 | if(CMAKE_C_COMPILER_ABI) 56 | set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_C_COMPILER_ABI}") 57 | endif() 58 | 59 | if(CMAKE_C_LIBRARY_ARCHITECTURE) 60 | set(CMAKE_LIBRARY_ARCHITECTURE "aarch64-linux-gnu") 61 | endif() 62 | 63 | set(CMAKE_C_CL_SHOWINCLUDES_PREFIX "") 64 | if(CMAKE_C_CL_SHOWINCLUDES_PREFIX) 65 | set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_C_CL_SHOWINCLUDES_PREFIX}") 66 | endif() 67 | 68 | 69 | 70 | 71 | 72 | set(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES "/usr/lib/gcc/aarch64-linux-gnu/7/include;/usr/local/include;/usr/lib/gcc/aarch64-linux-gnu/7/include-fixed;/usr/include/aarch64-linux-gnu;/usr/include") 73 | set(CMAKE_C_IMPLICIT_LINK_LIBRARIES "gcc;gcc_s;c;gcc;gcc_s") 74 | set(CMAKE_C_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/aarch64-linux-gnu/7;/usr/lib/aarch64-linux-gnu;/usr/lib;/lib/aarch64-linux-gnu;/lib") 75 | set(CMAKE_C_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") 76 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeCXXCompiler.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_CXX_COMPILER "/usr/bin/c++") 2 | set(CMAKE_CXX_COMPILER_ARG1 "") 3 | set(CMAKE_CXX_COMPILER_ID "GNU") 4 | set(CMAKE_CXX_COMPILER_VERSION "7.5.0") 5 | set(CMAKE_CXX_COMPILER_VERSION_INTERNAL "") 6 | set(CMAKE_CXX_COMPILER_WRAPPER "") 7 | set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT "14") 8 | set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17") 9 | set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") 10 | set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") 11 | set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") 12 | set(CMAKE_CXX17_COMPILE_FEATURES "cxx_std_17") 13 | set(CMAKE_CXX20_COMPILE_FEATURES "") 14 | 15 | set(CMAKE_CXX_PLATFORM_ID "Linux") 16 | set(CMAKE_CXX_SIMULATE_ID "") 17 | set(CMAKE_CXX_SIMULATE_VERSION "") 18 | 19 | 20 | 21 | set(CMAKE_AR "/usr/bin/ar") 22 | set(CMAKE_CXX_COMPILER_AR "/usr/bin/gcc-ar-7") 23 | set(CMAKE_RANLIB "/usr/bin/ranlib") 24 | set(CMAKE_CXX_COMPILER_RANLIB "/usr/bin/gcc-ranlib-7") 25 | set(CMAKE_LINKER "/usr/bin/ld") 26 | set(CMAKE_MT "") 27 | set(CMAKE_COMPILER_IS_GNUCXX 1) 28 | set(CMAKE_CXX_COMPILER_LOADED 1) 29 | set(CMAKE_CXX_COMPILER_WORKS TRUE) 30 | set(CMAKE_CXX_ABI_COMPILED TRUE) 31 | set(CMAKE_COMPILER_IS_MINGW ) 32 | set(CMAKE_COMPILER_IS_CYGWIN ) 33 | if(CMAKE_COMPILER_IS_CYGWIN) 34 | set(CYGWIN 1) 35 | set(UNIX 1) 36 | endif() 37 | 38 | set(CMAKE_CXX_COMPILER_ENV_VAR "CXX") 39 | 40 | if(CMAKE_COMPILER_IS_MINGW) 41 | set(MINGW 1) 42 | endif() 43 | set(CMAKE_CXX_COMPILER_ID_RUN 1) 44 | set(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC) 45 | set(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;mm;CPP) 46 | set(CMAKE_CXX_LINKER_PREFERENCE 30) 47 | set(CMAKE_CXX_LINKER_PREFERENCE_PROPAGATES 1) 48 | 49 | # Save compiler ABI information. 50 | set(CMAKE_CXX_SIZEOF_DATA_PTR "8") 51 | set(CMAKE_CXX_COMPILER_ABI "ELF") 52 | set(CMAKE_CXX_LIBRARY_ARCHITECTURE "aarch64-linux-gnu") 53 | 54 | if(CMAKE_CXX_SIZEOF_DATA_PTR) 55 | set(CMAKE_SIZEOF_VOID_P "${CMAKE_CXX_SIZEOF_DATA_PTR}") 56 | endif() 57 | 58 | if(CMAKE_CXX_COMPILER_ABI) 59 | set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_CXX_COMPILER_ABI}") 60 | endif() 61 | 62 | if(CMAKE_CXX_LIBRARY_ARCHITECTURE) 63 | set(CMAKE_LIBRARY_ARCHITECTURE "aarch64-linux-gnu") 64 | endif() 65 | 66 | set(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX "") 67 | if(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX) 68 | set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_CXX_CL_SHOWINCLUDES_PREFIX}") 69 | endif() 70 | 71 | 72 | 73 | 74 | 75 | set(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES "/usr/include/c++/7;/usr/include/aarch64-linux-gnu/c++/7;/usr/include/c++/7/backward;/usr/lib/gcc/aarch64-linux-gnu/7/include;/usr/local/include;/usr/lib/gcc/aarch64-linux-gnu/7/include-fixed;/usr/include/aarch64-linux-gnu;/usr/include") 76 | set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "stdc++;m;gcc_s;gcc;c;gcc_s;gcc") 77 | set(CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/aarch64-linux-gnu/7;/usr/lib/aarch64-linux-gnu;/usr/lib;/lib/aarch64-linux-gnu;/lib") 78 | set(CMAKE_CXX_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") 79 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeDetermineCompilerABI_C.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeDetermineCompilerABI_C.bin -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeDetermineCompilerABI_CXX.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeDetermineCompilerABI_CXX.bin -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CMakeSystem.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_HOST_SYSTEM "Linux-4.9.201") 2 | set(CMAKE_HOST_SYSTEM_NAME "Linux") 3 | set(CMAKE_HOST_SYSTEM_VERSION "4.9.201") 4 | set(CMAKE_HOST_SYSTEM_PROCESSOR "aarch64") 5 | 6 | 7 | 8 | set(CMAKE_SYSTEM "Linux-4.9.201") 9 | set(CMAKE_SYSTEM_NAME "Linux") 10 | set(CMAKE_SYSTEM_VERSION "4.9.201") 11 | set(CMAKE_SYSTEM_PROCESSOR "aarch64") 12 | 13 | set(CMAKE_CROSSCOMPILING "FALSE") 14 | 15 | set(CMAKE_SYSTEM_LOADED 1) 16 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CompilerIdC/a.out: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/3.14.4/CompilerIdC/a.out -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/3.14.4/CompilerIdCXX/a.out: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/3.14.4/CompilerIdCXX/a.out -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/CMakeDirectoryInformation.cmake: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | # Relative path conversion top directories. 5 | set(CMAKE_RELATIVE_PATH_TOP_SOURCE "/home/nvidia/tensorrtx/yolov5") 6 | set(CMAKE_RELATIVE_PATH_TOP_BINARY "/home/nvidia/tensorrtx/yolov5/build") 7 | 8 | # Force unix paths in dependencies. 9 | set(CMAKE_FORCE_UNIX_PATHS 1) 10 | 11 | 12 | # The C and CXX include file regular expressions for this directory. 13 | set(CMAKE_C_INCLUDE_REGEX_SCAN "^.*$") 14 | set(CMAKE_C_INCLUDE_REGEX_COMPLAIN "^$") 15 | set(CMAKE_CXX_INCLUDE_REGEX_SCAN ${CMAKE_C_INCLUDE_REGEX_SCAN}) 16 | set(CMAKE_CXX_INCLUDE_REGEX_COMPLAIN ${CMAKE_C_INCLUDE_REGEX_COMPLAIN}) 17 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/CMakeRuleHashes.txt: -------------------------------------------------------------------------------- 1 | # Hashes of file build rules. 2 | a0ea58fb197dcfc53a0adc7a5e1bf5ae CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o 3 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/Makefile2: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | # Default target executed when no arguments are given to make. 5 | default_target: all 6 | 7 | .PHONY : default_target 8 | 9 | # The main recursive all target 10 | all: 11 | 12 | .PHONY : all 13 | 14 | # The main recursive preinstall target 15 | preinstall: 16 | 17 | .PHONY : preinstall 18 | 19 | # The main recursive clean target 20 | clean: 21 | 22 | .PHONY : clean 23 | 24 | #============================================================================= 25 | # Special targets provided by cmake. 26 | 27 | # Disable implicit rules so canonical targets will work. 28 | .SUFFIXES: 29 | 30 | 31 | # Remove some rules from gmake that .SUFFIXES does not remove. 32 | SUFFIXES = 33 | 34 | .SUFFIXES: .hpux_make_needs_suffix_list 35 | 36 | 37 | # Suppress display of executed commands. 38 | $(VERBOSE).SILENT: 39 | 40 | 41 | # A target that is always out of date. 42 | cmake_force: 43 | 44 | .PHONY : cmake_force 45 | 46 | #============================================================================= 47 | # Set environment variables for the build. 48 | 49 | # The shell in which to execute make rules. 50 | SHELL = /bin/sh 51 | 52 | # The CMake executable. 53 | CMAKE_COMMAND = /usr/bin/cmake 54 | 55 | # The command to remove a file. 56 | RM = /usr/bin/cmake -E remove -f 57 | 58 | # Escaping for special characters. 59 | EQUALS = = 60 | 61 | # The top-level source directory on which CMake was run. 62 | CMAKE_SOURCE_DIR = /home/nvidia/tensorrtx/yolov5 63 | 64 | # The top-level build directory on which CMake was run. 65 | CMAKE_BINARY_DIR = /home/nvidia/tensorrtx/yolov5/build 66 | 67 | #============================================================================= 68 | # Target rules for target CMakeFiles/myplugins.dir 69 | 70 | # All Build rule for target. 71 | CMakeFiles/myplugins.dir/all: 72 | $(MAKE) -f CMakeFiles/myplugins.dir/build.make CMakeFiles/myplugins.dir/depend 73 | $(MAKE) -f CMakeFiles/myplugins.dir/build.make CMakeFiles/myplugins.dir/build 74 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=1,2 "Built target myplugins" 75 | .PHONY : CMakeFiles/myplugins.dir/all 76 | 77 | # Include target in all. 78 | all: CMakeFiles/myplugins.dir/all 79 | 80 | .PHONY : all 81 | 82 | # Build rule for subdir invocation for target. 83 | CMakeFiles/myplugins.dir/rule: cmake_check_build_system 84 | $(CMAKE_COMMAND) -E cmake_progress_start /home/nvidia/tensorrtx/yolov5/build/CMakeFiles 2 85 | $(MAKE) -f CMakeFiles/Makefile2 CMakeFiles/myplugins.dir/all 86 | $(CMAKE_COMMAND) -E cmake_progress_start /home/nvidia/tensorrtx/yolov5/build/CMakeFiles 0 87 | .PHONY : CMakeFiles/myplugins.dir/rule 88 | 89 | # Convenience name for target. 90 | myplugins: CMakeFiles/myplugins.dir/rule 91 | 92 | .PHONY : myplugins 93 | 94 | # clean rule for target. 95 | CMakeFiles/myplugins.dir/clean: 96 | $(MAKE) -f CMakeFiles/myplugins.dir/build.make CMakeFiles/myplugins.dir/clean 97 | .PHONY : CMakeFiles/myplugins.dir/clean 98 | 99 | # clean rule for target. 100 | clean: CMakeFiles/myplugins.dir/clean 101 | 102 | .PHONY : clean 103 | 104 | #============================================================================= 105 | # Target rules for target CMakeFiles/yolov5.dir 106 | 107 | # All Build rule for target. 108 | CMakeFiles/yolov5.dir/all: CMakeFiles/myplugins.dir/all 109 | $(MAKE) -f CMakeFiles/yolov5.dir/build.make CMakeFiles/yolov5.dir/depend 110 | $(MAKE) -f CMakeFiles/yolov5.dir/build.make CMakeFiles/yolov5.dir/build 111 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=3,4,5 "Built target yolov5" 112 | .PHONY : CMakeFiles/yolov5.dir/all 113 | 114 | # Include target in all. 115 | all: CMakeFiles/yolov5.dir/all 116 | 117 | .PHONY : all 118 | 119 | # Build rule for subdir invocation for target. 120 | CMakeFiles/yolov5.dir/rule: cmake_check_build_system 121 | $(CMAKE_COMMAND) -E cmake_progress_start /home/nvidia/tensorrtx/yolov5/build/CMakeFiles 5 122 | $(MAKE) -f CMakeFiles/Makefile2 CMakeFiles/yolov5.dir/all 123 | $(CMAKE_COMMAND) -E cmake_progress_start /home/nvidia/tensorrtx/yolov5/build/CMakeFiles 0 124 | .PHONY : CMakeFiles/yolov5.dir/rule 125 | 126 | # Convenience name for target. 127 | yolov5: CMakeFiles/yolov5.dir/rule 128 | 129 | .PHONY : yolov5 130 | 131 | # clean rule for target. 132 | CMakeFiles/yolov5.dir/clean: 133 | $(MAKE) -f CMakeFiles/yolov5.dir/build.make CMakeFiles/yolov5.dir/clean 134 | .PHONY : CMakeFiles/yolov5.dir/clean 135 | 136 | # clean rule for target. 137 | clean: CMakeFiles/yolov5.dir/clean 138 | 139 | .PHONY : clean 140 | 141 | #============================================================================= 142 | # Special targets to cleanup operation of make. 143 | 144 | # Special rule to run CMake to check the build system integrity. 145 | # No rule that depends on this can have commands that come from listfiles 146 | # because they might be regenerated. 147 | cmake_check_build_system: 148 | $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 149 | .PHONY : cmake_check_build_system 150 | 151 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/TargetDirectories.txt: -------------------------------------------------------------------------------- 1 | /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/rebuild_cache.dir 2 | /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/edit_cache.dir 3 | /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir 4 | /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/yolov5.dir 5 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/cmake.check_cache: -------------------------------------------------------------------------------- 1 | # This file is generated by cmake for dependency checking of the CMakeCache.txt file 2 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/feature_tests.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/feature_tests.bin -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/feature_tests.c: -------------------------------------------------------------------------------- 1 | 2 | const char features[] = {"\n" 3 | "C_FEATURE:" 4 | #if (__GNUC__ * 100 + __GNUC_MINOR__) >= 304 5 | "1" 6 | #else 7 | "0" 8 | #endif 9 | "c_function_prototypes\n" 10 | "C_FEATURE:" 11 | #if (__GNUC__ * 100 + __GNUC_MINOR__) >= 304 && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 12 | "1" 13 | #else 14 | "0" 15 | #endif 16 | "c_restrict\n" 17 | "C_FEATURE:" 18 | #if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201000L 19 | "1" 20 | #else 21 | "0" 22 | #endif 23 | "c_static_assert\n" 24 | "C_FEATURE:" 25 | #if (__GNUC__ * 100 + __GNUC_MINOR__) >= 304 && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 26 | "1" 27 | #else 28 | "0" 29 | #endif 30 | "c_variadic_macros\n" 31 | 32 | }; 33 | 34 | int main(int argc, char** argv) { (void)argv; return features[argc]; } 35 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/DependInfo.cmake: -------------------------------------------------------------------------------- 1 | # The set of languages for which implicit dependencies are needed: 2 | set(CMAKE_DEPENDS_LANGUAGES 3 | ) 4 | # The set of files for implicit dependencies of each language: 5 | 6 | # Targets to which this target links. 7 | set(CMAKE_TARGET_LINKED_INFO_FILES 8 | ) 9 | 10 | # Fortran module output directory. 11 | set(CMAKE_Fortran_TARGET_MODULE_DIR "") 12 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/build.make: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | # Delete rule output on recipe failure. 5 | .DELETE_ON_ERROR: 6 | 7 | 8 | #============================================================================= 9 | # Special targets provided by cmake. 10 | 11 | # Disable implicit rules so canonical targets will work. 12 | .SUFFIXES: 13 | 14 | 15 | # Remove some rules from gmake that .SUFFIXES does not remove. 16 | SUFFIXES = 17 | 18 | .SUFFIXES: .hpux_make_needs_suffix_list 19 | 20 | 21 | # Suppress display of executed commands. 22 | $(VERBOSE).SILENT: 23 | 24 | 25 | # A target that is always out of date. 26 | cmake_force: 27 | 28 | .PHONY : cmake_force 29 | 30 | #============================================================================= 31 | # Set environment variables for the build. 32 | 33 | # The shell in which to execute make rules. 34 | SHELL = /bin/sh 35 | 36 | # The CMake executable. 37 | CMAKE_COMMAND = /usr/bin/cmake 38 | 39 | # The command to remove a file. 40 | RM = /usr/bin/cmake -E remove -f 41 | 42 | # Escaping for special characters. 43 | EQUALS = = 44 | 45 | # The top-level source directory on which CMake was run. 46 | CMAKE_SOURCE_DIR = /home/nvidia/tensorrtx/yolov5 47 | 48 | # The top-level build directory on which CMake was run. 49 | CMAKE_BINARY_DIR = /home/nvidia/tensorrtx/yolov5/build 50 | 51 | # Include any dependencies generated for this target. 52 | include CMakeFiles/myplugins.dir/depend.make 53 | 54 | # Include the progress variables for this target. 55 | include CMakeFiles/myplugins.dir/progress.make 56 | 57 | # Include the compile flags for this target's objects. 58 | include CMakeFiles/myplugins.dir/flags.make 59 | 60 | CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o: CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o.depend 61 | CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o: CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o.Debug.cmake 62 | CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o: ../yololayer.cu 63 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --blue --bold --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_1) "Building NVCC (Device) object CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o" 64 | cd /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir && /usr/bin/cmake -E make_directory /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir//. 65 | cd /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir && /usr/bin/cmake -D verbose:BOOL=$(VERBOSE) -D build_configuration:STRING=Debug -D generated_file:STRING=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir//./myplugins_generated_yololayer.cu.o -D generated_cubin_file:STRING=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir//./myplugins_generated_yololayer.cu.o.cubin.txt -P /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir//myplugins_generated_yololayer.cu.o.Debug.cmake 66 | 67 | # Object files for target myplugins 68 | myplugins_OBJECTS = 69 | 70 | # External object files for target myplugins 71 | myplugins_EXTERNAL_OBJECTS = \ 72 | "/home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o" 73 | 74 | libmyplugins.so: CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o 75 | libmyplugins.so: CMakeFiles/myplugins.dir/build.make 76 | libmyplugins.so: /usr/local/cuda/lib64/libcudart.so 77 | libmyplugins.so: CMakeFiles/myplugins.dir/link.txt 78 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Linking CXX shared library libmyplugins.so" 79 | $(CMAKE_COMMAND) -E cmake_link_script CMakeFiles/myplugins.dir/link.txt --verbose=$(VERBOSE) 80 | 81 | # Rule to build all files generated by this target. 82 | CMakeFiles/myplugins.dir/build: libmyplugins.so 83 | 84 | .PHONY : CMakeFiles/myplugins.dir/build 85 | 86 | CMakeFiles/myplugins.dir/clean: 87 | $(CMAKE_COMMAND) -P CMakeFiles/myplugins.dir/cmake_clean.cmake 88 | .PHONY : CMakeFiles/myplugins.dir/clean 89 | 90 | CMakeFiles/myplugins.dir/depend: CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o 91 | cd /home/nvidia/tensorrtx/yolov5/build && $(CMAKE_COMMAND) -E cmake_depends "Unix Makefiles" /home/nvidia/tensorrtx/yolov5 /home/nvidia/tensorrtx/yolov5 /home/nvidia/tensorrtx/yolov5/build /home/nvidia/tensorrtx/yolov5/build /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir/DependInfo.cmake --color=$(COLOR) 92 | .PHONY : CMakeFiles/myplugins.dir/depend 93 | 94 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/cmake_clean.cmake: -------------------------------------------------------------------------------- 1 | file(REMOVE_RECURSE 2 | "CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o" 3 | "libmyplugins.pdb" 4 | "libmyplugins.so" 5 | ) 6 | 7 | # Per-language clean rules from dependency scanning. 8 | foreach(lang ) 9 | include(CMakeFiles/myplugins.dir/cmake_clean_${lang}.cmake OPTIONAL) 10 | endforeach() 11 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/depend.internal: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/depend.make: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/flags.make: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/link.txt: -------------------------------------------------------------------------------- 1 | /usr/bin/c++ -fPIC -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -shared -Wl,-soname,libmyplugins.so -o libmyplugins.so CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o -L/usr/local/cuda/lib64 -L/usr/lib/x86_64-linux-gnu -Wl,-rpath,/usr/local/cuda/lib64:/usr/lib/x86_64-linux-gnu /usr/local/cuda/lib64/libcudart.so -lnvinfer -lcudart 2 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/myplugins.dir/myplugins_generated_yololayer.cu.o -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/myplugins.dir/progress.make: -------------------------------------------------------------------------------- 1 | CMAKE_PROGRESS_1 = 1 2 | CMAKE_PROGRESS_2 = 2 3 | 4 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/progress.marks: -------------------------------------------------------------------------------- 1 | 5 2 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/DependInfo.cmake: -------------------------------------------------------------------------------- 1 | # The set of languages for which implicit dependencies are needed: 2 | set(CMAKE_DEPENDS_LANGUAGES 3 | "CXX" 4 | ) 5 | # The set of files for implicit dependencies of each language: 6 | set(CMAKE_DEPENDS_CHECK_CXX 7 | "/home/nvidia/tensorrtx/yolov5/calibrator.cpp" "/home/nvidia/tensorrtx/yolov5/build/CMakeFiles/yolov5.dir/calibrator.cpp.o" 8 | "/home/nvidia/tensorrtx/yolov5/yolov5.cpp" "/home/nvidia/tensorrtx/yolov5/build/CMakeFiles/yolov5.dir/yolov5.cpp.o" 9 | ) 10 | set(CMAKE_CXX_COMPILER_ID "GNU") 11 | 12 | # Preprocessor definitions for this target. 13 | set(CMAKE_TARGET_DEFINITIONS_CXX 14 | "API_EXPORTS" 15 | ) 16 | 17 | # The include file search paths: 18 | set(CMAKE_CXX_TARGET_INCLUDE_PATH 19 | "../include" 20 | "/usr/local/cuda/include" 21 | "/usr/include/x86_64-linux-gnu" 22 | "/usr/include/opencv4" 23 | ) 24 | 25 | # Targets to which this target links. 26 | set(CMAKE_TARGET_LINKED_INFO_FILES 27 | "/home/nvidia/tensorrtx/yolov5/build/CMakeFiles/myplugins.dir/DependInfo.cmake" 28 | ) 29 | 30 | # Fortran module output directory. 31 | set(CMAKE_Fortran_TARGET_MODULE_DIR "") 32 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/build.make: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | # Delete rule output on recipe failure. 5 | .DELETE_ON_ERROR: 6 | 7 | 8 | #============================================================================= 9 | # Special targets provided by cmake. 10 | 11 | # Disable implicit rules so canonical targets will work. 12 | .SUFFIXES: 13 | 14 | 15 | # Remove some rules from gmake that .SUFFIXES does not remove. 16 | SUFFIXES = 17 | 18 | .SUFFIXES: .hpux_make_needs_suffix_list 19 | 20 | 21 | # Suppress display of executed commands. 22 | $(VERBOSE).SILENT: 23 | 24 | 25 | # A target that is always out of date. 26 | cmake_force: 27 | 28 | .PHONY : cmake_force 29 | 30 | #============================================================================= 31 | # Set environment variables for the build. 32 | 33 | # The shell in which to execute make rules. 34 | SHELL = /bin/sh 35 | 36 | # The CMake executable. 37 | CMAKE_COMMAND = /usr/bin/cmake 38 | 39 | # The command to remove a file. 40 | RM = /usr/bin/cmake -E remove -f 41 | 42 | # Escaping for special characters. 43 | EQUALS = = 44 | 45 | # The top-level source directory on which CMake was run. 46 | CMAKE_SOURCE_DIR = /home/nvidia/tensorrtx/yolov5 47 | 48 | # The top-level build directory on which CMake was run. 49 | CMAKE_BINARY_DIR = /home/nvidia/tensorrtx/yolov5/build 50 | 51 | # Include any dependencies generated for this target. 52 | include CMakeFiles/yolov5.dir/depend.make 53 | 54 | # Include the progress variables for this target. 55 | include CMakeFiles/yolov5.dir/progress.make 56 | 57 | # Include the compile flags for this target's objects. 58 | include CMakeFiles/yolov5.dir/flags.make 59 | 60 | CMakeFiles/yolov5.dir/calibrator.cpp.o: CMakeFiles/yolov5.dir/flags.make 61 | CMakeFiles/yolov5.dir/calibrator.cpp.o: ../calibrator.cpp 62 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_1) "Building CXX object CMakeFiles/yolov5.dir/calibrator.cpp.o" 63 | /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/yolov5.dir/calibrator.cpp.o -c /home/nvidia/tensorrtx/yolov5/calibrator.cpp 64 | 65 | CMakeFiles/yolov5.dir/calibrator.cpp.i: cmake_force 66 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/yolov5.dir/calibrator.cpp.i" 67 | /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/nvidia/tensorrtx/yolov5/calibrator.cpp > CMakeFiles/yolov5.dir/calibrator.cpp.i 68 | 69 | CMakeFiles/yolov5.dir/calibrator.cpp.s: cmake_force 70 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/yolov5.dir/calibrator.cpp.s" 71 | /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/nvidia/tensorrtx/yolov5/calibrator.cpp -o CMakeFiles/yolov5.dir/calibrator.cpp.s 72 | 73 | CMakeFiles/yolov5.dir/yolov5.cpp.o: CMakeFiles/yolov5.dir/flags.make 74 | CMakeFiles/yolov5.dir/yolov5.cpp.o: ../yolov5.cpp 75 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Building CXX object CMakeFiles/yolov5.dir/yolov5.cpp.o" 76 | /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/yolov5.dir/yolov5.cpp.o -c /home/nvidia/tensorrtx/yolov5/yolov5.cpp 77 | 78 | CMakeFiles/yolov5.dir/yolov5.cpp.i: cmake_force 79 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/yolov5.dir/yolov5.cpp.i" 80 | /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/nvidia/tensorrtx/yolov5/yolov5.cpp > CMakeFiles/yolov5.dir/yolov5.cpp.i 81 | 82 | CMakeFiles/yolov5.dir/yolov5.cpp.s: cmake_force 83 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/yolov5.dir/yolov5.cpp.s" 84 | /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/nvidia/tensorrtx/yolov5/yolov5.cpp -o CMakeFiles/yolov5.dir/yolov5.cpp.s 85 | 86 | # Object files for target yolov5 87 | yolov5_OBJECTS = \ 88 | "CMakeFiles/yolov5.dir/calibrator.cpp.o" \ 89 | "CMakeFiles/yolov5.dir/yolov5.cpp.o" 90 | 91 | # External object files for target yolov5 92 | yolov5_EXTERNAL_OBJECTS = 93 | 94 | yolov5: CMakeFiles/yolov5.dir/calibrator.cpp.o 95 | yolov5: CMakeFiles/yolov5.dir/yolov5.cpp.o 96 | yolov5: CMakeFiles/yolov5.dir/build.make 97 | yolov5: libmyplugins.so 98 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_dnn.so.4.1.1 99 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_gapi.so.4.1.1 100 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_highgui.so.4.1.1 101 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_ml.so.4.1.1 102 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_objdetect.so.4.1.1 103 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_photo.so.4.1.1 104 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_stitching.so.4.1.1 105 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_video.so.4.1.1 106 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_videoio.so.4.1.1 107 | yolov5: /usr/local/cuda/lib64/libcudart.so 108 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_imgcodecs.so.4.1.1 109 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_calib3d.so.4.1.1 110 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_features2d.so.4.1.1 111 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_flann.so.4.1.1 112 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_imgproc.so.4.1.1 113 | yolov5: /usr/lib/aarch64-linux-gnu/libopencv_core.so.4.1.1 114 | yolov5: CMakeFiles/yolov5.dir/link.txt 115 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/home/nvidia/tensorrtx/yolov5/build/CMakeFiles --progress-num=$(CMAKE_PROGRESS_3) "Linking CXX executable yolov5" 116 | $(CMAKE_COMMAND) -E cmake_link_script CMakeFiles/yolov5.dir/link.txt --verbose=$(VERBOSE) 117 | 118 | # Rule to build all files generated by this target. 119 | CMakeFiles/yolov5.dir/build: yolov5 120 | 121 | .PHONY : CMakeFiles/yolov5.dir/build 122 | 123 | CMakeFiles/yolov5.dir/clean: 124 | $(CMAKE_COMMAND) -P CMakeFiles/yolov5.dir/cmake_clean.cmake 125 | .PHONY : CMakeFiles/yolov5.dir/clean 126 | 127 | CMakeFiles/yolov5.dir/depend: 128 | cd /home/nvidia/tensorrtx/yolov5/build && $(CMAKE_COMMAND) -E cmake_depends "Unix Makefiles" /home/nvidia/tensorrtx/yolov5 /home/nvidia/tensorrtx/yolov5 /home/nvidia/tensorrtx/yolov5/build /home/nvidia/tensorrtx/yolov5/build /home/nvidia/tensorrtx/yolov5/build/CMakeFiles/yolov5.dir/DependInfo.cmake --color=$(COLOR) 129 | .PHONY : CMakeFiles/yolov5.dir/depend 130 | 131 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/calibrator.cpp.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/yolov5.dir/calibrator.cpp.o -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/cmake_clean.cmake: -------------------------------------------------------------------------------- 1 | file(REMOVE_RECURSE 2 | "CMakeFiles/yolov5.dir/calibrator.cpp.o" 3 | "CMakeFiles/yolov5.dir/yolov5.cpp.o" 4 | "yolov5.pdb" 5 | "yolov5" 6 | ) 7 | 8 | # Per-language clean rules from dependency scanning. 9 | foreach(lang CXX) 10 | include(CMakeFiles/yolov5.dir/cmake_clean_${lang}.cmake OPTIONAL) 11 | endforeach() 12 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/flags.make: -------------------------------------------------------------------------------- 1 | # CMAKE generated file: DO NOT EDIT! 2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.14 3 | 4 | # compile CXX with /usr/bin/c++ 5 | CXX_FLAGS = -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -std=c++11 -O2 -pthread -std=gnu++11 6 | 7 | CXX_DEFINES = -DAPI_EXPORTS 8 | 9 | CXX_INCLUDES = -I/home/nvidia/tensorrtx/yolov5/include -I/usr/local/cuda/include -I/usr/include/x86_64-linux-gnu -isystem /usr/include/opencv4 10 | 11 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/link.txt: -------------------------------------------------------------------------------- 1 | /usr/bin/c++ -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -rdynamic CMakeFiles/yolov5.dir/calibrator.cpp.o CMakeFiles/yolov5.dir/yolov5.cpp.o -o yolov5 -L/usr/local/cuda/lib64 -L/usr/lib/x86_64-linux-gnu -Wl,-rpath,/usr/local/cuda/lib64:/usr/lib/x86_64-linux-gnu:/home/nvidia/tensorrtx/yolov5/build -lnvinfer -lcudart libmyplugins.so /usr/lib/aarch64-linux-gnu/libopencv_dnn.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_gapi.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_highgui.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_ml.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_objdetect.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_photo.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_stitching.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_video.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_videoio.so.4.1.1 /usr/local/cuda/lib64/libcudart.so -lnvinfer -lcudart /usr/lib/aarch64-linux-gnu/libopencv_imgcodecs.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_calib3d.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_features2d.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_flann.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_imgproc.so.4.1.1 /usr/lib/aarch64-linux-gnu/libopencv_core.so.4.1.1 2 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/progress.make: -------------------------------------------------------------------------------- 1 | CMAKE_PROGRESS_1 = 3 2 | CMAKE_PROGRESS_2 = 4 3 | CMAKE_PROGRESS_3 = 5 4 | 5 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/CMakeFiles/yolov5.dir/yolov5.cpp.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/CMakeFiles/yolov5.dir/yolov5.cpp.o -------------------------------------------------------------------------------- /helmet_tensorrt/build/cmake_install.cmake: -------------------------------------------------------------------------------- 1 | # Install script for directory: /home/nvidia/tensorrtx/yolov5 2 | 3 | # Set the install prefix 4 | if(NOT DEFINED CMAKE_INSTALL_PREFIX) 5 | set(CMAKE_INSTALL_PREFIX "/usr/local") 6 | endif() 7 | string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") 8 | 9 | # Set the install configuration name. 10 | if(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME) 11 | if(BUILD_TYPE) 12 | string(REGEX REPLACE "^[^A-Za-z0-9_]+" "" 13 | CMAKE_INSTALL_CONFIG_NAME "${BUILD_TYPE}") 14 | else() 15 | set(CMAKE_INSTALL_CONFIG_NAME "Debug") 16 | endif() 17 | message(STATUS "Install configuration: \"${CMAKE_INSTALL_CONFIG_NAME}\"") 18 | endif() 19 | 20 | # Set the component getting installed. 21 | if(NOT CMAKE_INSTALL_COMPONENT) 22 | if(COMPONENT) 23 | message(STATUS "Install component: \"${COMPONENT}\"") 24 | set(CMAKE_INSTALL_COMPONENT "${COMPONENT}") 25 | else() 26 | set(CMAKE_INSTALL_COMPONENT) 27 | endif() 28 | endif() 29 | 30 | # Install shared libraries without execute permission? 31 | if(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE) 32 | set(CMAKE_INSTALL_SO_NO_EXE "1") 33 | endif() 34 | 35 | # Is this installation the result of a crosscompile? 36 | if(NOT DEFINED CMAKE_CROSSCOMPILING) 37 | set(CMAKE_CROSSCOMPILING "FALSE") 38 | endif() 39 | 40 | if(CMAKE_INSTALL_COMPONENT) 41 | set(CMAKE_INSTALL_MANIFEST "install_manifest_${CMAKE_INSTALL_COMPONENT}.txt") 42 | else() 43 | set(CMAKE_INSTALL_MANIFEST "install_manifest.txt") 44 | endif() 45 | 46 | string(REPLACE ";" "\n" CMAKE_INSTALL_MANIFEST_CONTENT 47 | "${CMAKE_INSTALL_MANIFEST_FILES}") 48 | file(WRITE "/home/nvidia/tensorrtx/yolov5/build/${CMAKE_INSTALL_MANIFEST}" 49 | "${CMAKE_INSTALL_MANIFEST_CONTENT}") 50 | -------------------------------------------------------------------------------- /helmet_tensorrt/build/libmyplugins.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/libmyplugins.so -------------------------------------------------------------------------------- /helmet_tensorrt/build/yolov5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/build/yolov5 -------------------------------------------------------------------------------- /helmet_tensorrt/calibrator.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "calibrator.h" 6 | #include "cuda_utils.h" 7 | #include "utils.h" 8 | 9 | Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache) 10 | : batchsize_(batchsize) 11 | , input_w_(input_w) 12 | , input_h_(input_h) 13 | , img_idx_(0) 14 | , img_dir_(img_dir) 15 | , calib_table_name_(calib_table_name) 16 | , input_blob_name_(input_blob_name) 17 | , read_cache_(read_cache) 18 | { 19 | input_count_ = 3 * input_w * input_h * batchsize; 20 | CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float))); 21 | read_files_in_dir(img_dir, img_files_); 22 | } 23 | 24 | Int8EntropyCalibrator2::~Int8EntropyCalibrator2() 25 | { 26 | CUDA_CHECK(cudaFree(device_input_)); 27 | } 28 | 29 | int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT 30 | { 31 | return batchsize_; 32 | } 33 | 34 | bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT 35 | { 36 | if (img_idx_ + batchsize_ > (int)img_files_.size()) { 37 | return false; 38 | } 39 | 40 | std::vector input_imgs_; 41 | for (int i = img_idx_; i < img_idx_ + batchsize_; i++) { 42 | std::cout << img_files_[i] << " " << i << std::endl; 43 | cv::Mat temp = cv::imread(img_dir_ + img_files_[i]); 44 | if (temp.empty()){ 45 | std::cerr << "Fatal error: image cannot open!" << std::endl; 46 | return false; 47 | } 48 | cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_); 49 | input_imgs_.push_back(pr_img); 50 | } 51 | img_idx_ += batchsize_; 52 | cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false); 53 | 54 | CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice)); 55 | assert(!strcmp(names[0], input_blob_name_)); 56 | bindings[0] = device_input_; 57 | return true; 58 | } 59 | 60 | const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT 61 | { 62 | std::cout << "reading calib cache: " << calib_table_name_ << std::endl; 63 | calib_cache_.clear(); 64 | std::ifstream input(calib_table_name_, std::ios::binary); 65 | input >> std::noskipws; 66 | if (read_cache_ && input.good()) 67 | { 68 | std::copy(std::istream_iterator(input), std::istream_iterator(), std::back_inserter(calib_cache_)); 69 | } 70 | length = calib_cache_.size(); 71 | return length ? calib_cache_.data() : nullptr; 72 | } 73 | 74 | void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT 75 | { 76 | std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl; 77 | std::ofstream output(calib_table_name_, std::ios::binary); 78 | output.write(reinterpret_cast(cache), length); 79 | } 80 | 81 | -------------------------------------------------------------------------------- /helmet_tensorrt/calibrator.h: -------------------------------------------------------------------------------- 1 | #ifndef ENTROPY_CALIBRATOR_H 2 | #define ENTROPY_CALIBRATOR_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include "macros.h" 8 | 9 | //! \class Int8EntropyCalibrator2 10 | //! 11 | //! \brief Implements Entropy calibrator 2. 12 | //! CalibrationAlgoType is kENTROPY_CALIBRATION_2. 13 | //! 14 | class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 15 | { 16 | public: 17 | Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true); 18 | 19 | virtual ~Int8EntropyCalibrator2(); 20 | int getBatchSize() const TRT_NOEXCEPT override; 21 | bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override; 22 | const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override; 23 | void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override; 24 | 25 | private: 26 | int batchsize_; 27 | int input_w_; 28 | int input_h_; 29 | int img_idx_; 30 | std::string img_dir_; 31 | std::vector img_files_; 32 | size_t input_count_; 33 | std::string calib_table_name_; 34 | const char* input_blob_name_; 35 | bool read_cache_; 36 | void* device_input_; 37 | std::vector calib_cache_; 38 | }; 39 | 40 | #endif // ENTROPY_CALIBRATOR_H 41 | -------------------------------------------------------------------------------- /helmet_tensorrt/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef TRTX_CUDA_UTILS_H_ 2 | #define TRTX_CUDA_UTILS_H_ 3 | 4 | #include 5 | 6 | #ifndef CUDA_CHECK 7 | #define CUDA_CHECK(callstr)\ 8 | {\ 9 | cudaError_t error_code = callstr;\ 10 | if (error_code != cudaSuccess) {\ 11 | std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\ 12 | assert(0);\ 13 | }\ 14 | } 15 | #endif // CUDA_CHECK 16 | 17 | #endif // TRTX_CUDA_UTILS_H_ 18 | 19 | -------------------------------------------------------------------------------- /helmet_tensorrt/gen_wts.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import struct 3 | import sys 4 | from utils.torch_utils import select_device 5 | 6 | # Initialize 7 | device = select_device('cpu') 8 | pt_file = sys.argv[1] 9 | # Load model 10 | model = torch.load(pt_file, map_location=device)['model'].float() # load to FP32 11 | model.to(device).eval() 12 | 13 | with open(pt_file.split('.')[0] + '.wts', 'w') as f: 14 | f.write('{}\n'.format(len(model.state_dict().keys()))) 15 | for k, v in model.state_dict().items(): 16 | vr = v.reshape(-1).cpu().numpy() 17 | f.write('{} {} '.format(k, len(vr))) 18 | for vv in vr: 19 | f.write(' ') 20 | f.write(struct.pack('>f',float(vv)).hex()) 21 | f.write('\n') 22 | -------------------------------------------------------------------------------- /helmet_tensorrt/macros.h: -------------------------------------------------------------------------------- 1 | #ifndef __MACROS_H 2 | #define __MACROS_H 3 | 4 | #ifdef API_EXPORTS 5 | #if defined(_MSC_VER) 6 | #define API __declspec(dllexport) 7 | #else 8 | #define API __attribute__((visibility("default"))) 9 | #endif 10 | #else 11 | 12 | #if defined(_MSC_VER) 13 | #define API __declspec(dllimport) 14 | #else 15 | #define API 16 | #endif 17 | #endif // API_EXPORTS 18 | 19 | #if NV_TENSORRT_MAJOR >= 8 20 | #define TRT_NOEXCEPT noexcept 21 | #define TRT_CONST_ENQUEUE const 22 | #else 23 | #define TRT_NOEXCEPT 24 | #define TRT_CONST_ENQUEUE 25 | #endif 26 | 27 | #endif // __MACROS_H 28 | -------------------------------------------------------------------------------- /helmet_tensorrt/output/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/output/1.jpg -------------------------------------------------------------------------------- /helmet_tensorrt/samples/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_tensorrt/samples/1.jpg -------------------------------------------------------------------------------- /helmet_tensorrt/utils.h: -------------------------------------------------------------------------------- 1 | #ifndef TRTX_YOLOV5_UTILS_H_ 2 | #define TRTX_YOLOV5_UTILS_H_ 3 | 4 | #include 5 | #include 6 | 7 | static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) { 8 | int w, h, x, y; 9 | float r_w = input_w / (img.cols*1.0); 10 | float r_h = input_h / (img.rows*1.0); 11 | if (r_h > r_w) { 12 | w = input_w; 13 | h = r_w * img.rows; 14 | x = 0; 15 | y = (input_h - h) / 2; 16 | } else { 17 | w = r_h * img.cols; 18 | h = input_h; 19 | x = (input_w - w) / 2; 20 | y = 0; 21 | } 22 | cv::Mat re(h, w, CV_8UC3); 23 | cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR); 24 | cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128)); 25 | re.copyTo(out(cv::Rect(x, y, re.cols, re.rows))); 26 | return out; 27 | } 28 | 29 | static inline int read_files_in_dir(const char *p_dir_name, std::vector &file_names) { 30 | DIR *p_dir = opendir(p_dir_name); 31 | if (p_dir == nullptr) { 32 | return -1; 33 | } 34 | 35 | struct dirent* p_file = nullptr; 36 | while ((p_file = readdir(p_dir)) != nullptr) { 37 | if (strcmp(p_file->d_name, ".") != 0 && 38 | strcmp(p_file->d_name, "..") != 0) { 39 | //std::string cur_file_name(p_dir_name); 40 | //cur_file_name += "/"; 41 | //cur_file_name += p_file->d_name; 42 | std::string cur_file_name(p_file->d_name); 43 | file_names.push_back(cur_file_name); 44 | } 45 | } 46 | 47 | closedir(p_dir); 48 | return 0; 49 | } 50 | 51 | #endif // TRTX_YOLOV5_UTILS_H_ 52 | 53 | -------------------------------------------------------------------------------- /helmet_tensorrt/yololayer.h: -------------------------------------------------------------------------------- 1 | #ifndef _YOLO_LAYER_H 2 | #define _YOLO_LAYER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include "macros.h" 8 | 9 | namespace Yolo 10 | { 11 | static constexpr int CHECK_COUNT = 3; 12 | static constexpr float IGNORE_THRESH = 0.1f; 13 | struct YoloKernel 14 | { 15 | int width; 16 | int height; 17 | float anchors[CHECK_COUNT * 2]; 18 | }; 19 | static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000; 20 | static constexpr int CLASS_NUM = 2; 21 | static constexpr int INPUT_H = 640; // yolov5's input height and width must be divisible by 32. 22 | static constexpr int INPUT_W = 640; 23 | 24 | static constexpr int LOCATIONS = 4; 25 | struct alignas(float) Detection { 26 | //center_x center_y w h 27 | float bbox[LOCATIONS]; 28 | float conf; // bbox_conf * cls_conf 29 | float class_id; 30 | }; 31 | } 32 | 33 | namespace nvinfer1 34 | { 35 | class API YoloLayerPlugin : public IPluginV2IOExt 36 | { 37 | public: 38 | YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector& vYoloKernel); 39 | YoloLayerPlugin(const void* data, size_t length); 40 | ~YoloLayerPlugin(); 41 | 42 | int getNbOutputs() const TRT_NOEXCEPT override 43 | { 44 | return 1; 45 | } 46 | 47 | Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override; 48 | 49 | int initialize() TRT_NOEXCEPT override; 50 | 51 | virtual void terminate() TRT_NOEXCEPT override {}; 52 | 53 | virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; } 54 | 55 | virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override; 56 | 57 | virtual size_t getSerializationSize() const TRT_NOEXCEPT override; 58 | 59 | virtual void serialize(void* buffer) const TRT_NOEXCEPT override; 60 | 61 | bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override { 62 | return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT; 63 | } 64 | 65 | const char* getPluginType() const TRT_NOEXCEPT override; 66 | 67 | const char* getPluginVersion() const TRT_NOEXCEPT override; 68 | 69 | void destroy() TRT_NOEXCEPT override; 70 | 71 | IPluginV2IOExt* clone() const TRT_NOEXCEPT override; 72 | 73 | void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override; 74 | 75 | const char* getPluginNamespace() const TRT_NOEXCEPT override; 76 | 77 | DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override; 78 | 79 | bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override; 80 | 81 | bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override; 82 | 83 | void attachToContext( 84 | cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override; 85 | 86 | void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override; 87 | 88 | void detachFromContext() TRT_NOEXCEPT override; 89 | 90 | private: 91 | void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1); 92 | int mThreadCount = 256; 93 | const char* mPluginNamespace; 94 | int mKernelCount; 95 | int mClassCount; 96 | int mYoloV5NetWidth; 97 | int mYoloV5NetHeight; 98 | int mMaxOutObject; 99 | std::vector mYoloKernel; 100 | void** mAnchor; 101 | }; 102 | 103 | class API YoloPluginCreator : public IPluginCreator 104 | { 105 | public: 106 | YoloPluginCreator(); 107 | 108 | ~YoloPluginCreator() override = default; 109 | 110 | const char* getPluginName() const TRT_NOEXCEPT override; 111 | 112 | const char* getPluginVersion() const TRT_NOEXCEPT override; 113 | 114 | const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override; 115 | 116 | IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override; 117 | 118 | IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override; 119 | 120 | void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override 121 | { 122 | mNamespace = libNamespace; 123 | } 124 | 125 | const char* getPluginNamespace() const TRT_NOEXCEPT override 126 | { 127 | return mNamespace.c_str(); 128 | } 129 | 130 | private: 131 | std::string mNamespace; 132 | static PluginFieldCollection mFC; 133 | static std::vector mPluginAttributes; 134 | }; 135 | REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); 136 | }; 137 | 138 | #endif // _YOLO_LAYER_H 139 | -------------------------------------------------------------------------------- /helmet_yolov5/.dockerignore: -------------------------------------------------------------------------------- 1 | # Repo-specific DockerIgnore ------------------------------------------------------------------------------------------- 2 | #.git 3 | .cache 4 | .idea 5 | runs 6 | output 7 | coco 8 | storage.googleapis.com 9 | 10 | data/samples/* 11 | **/results*.csv 12 | *.jpg 13 | 14 | # Neural Network weights ----------------------------------------------------------------------------------------------- 15 | **/*.pt 16 | **/*.pth 17 | **/*.onnx 18 | **/*.mlmodel 19 | **/*.torchscript 20 | **/*.torchscript.pt 21 | 22 | 23 | # Below Copied From .gitignore ----------------------------------------------------------------------------------------- 24 | # Below Copied From .gitignore ----------------------------------------------------------------------------------------- 25 | 26 | 27 | # GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- 28 | # Byte-compiled / optimized / DLL files 29 | __pycache__/ 30 | *.py[cod] 31 | *$py.class 32 | 33 | # C extensions 34 | *.so 35 | 36 | # Distribution / packaging 37 | .Python 38 | env/ 39 | build/ 40 | develop-eggs/ 41 | dist/ 42 | downloads/ 43 | eggs/ 44 | .eggs/ 45 | lib/ 46 | lib64/ 47 | parts/ 48 | sdist/ 49 | var/ 50 | wheels/ 51 | *.egg-info/ 52 | wandb/ 53 | .installed.cfg 54 | *.egg 55 | 56 | # PyInstaller 57 | # Usually these files are written by a python script from a template 58 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 59 | *.manifest 60 | *.spec 61 | 62 | # Installer logs 63 | pip-log.txt 64 | pip-delete-this-directory.txt 65 | 66 | # Unit test / coverage reports 67 | htmlcov/ 68 | .tox/ 69 | .coverage 70 | .coverage.* 71 | .cache 72 | nosetests.xml 73 | coverage.xml 74 | *.cover 75 | .hypothesis/ 76 | 77 | # Translations 78 | *.mo 79 | *.pot 80 | 81 | # Django stuff: 82 | *.log 83 | local_settings.py 84 | 85 | # Flask stuff: 86 | instance/ 87 | .webassets-cache 88 | 89 | # Scrapy stuff: 90 | .scrapy 91 | 92 | # Sphinx documentation 93 | docs/_build/ 94 | 95 | # PyBuilder 96 | target/ 97 | 98 | # Jupyter Notebook 99 | .ipynb_checkpoints 100 | 101 | # pyenv 102 | .python-version 103 | 104 | # celery beat schedule file 105 | celerybeat-schedule 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # dotenv 111 | .env 112 | 113 | # virtualenv 114 | .venv* 115 | venv*/ 116 | ENV*/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | 131 | 132 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- 133 | 134 | # General 135 | .DS_Store 136 | .AppleDouble 137 | .LSOverride 138 | 139 | # Icon must end with two \r 140 | Icon 141 | Icon? 142 | 143 | # Thumbnails 144 | ._* 145 | 146 | # Files that might appear in the root of a volume 147 | .DocumentRevisions-V100 148 | .fseventsd 149 | .Spotlight-V100 150 | .TemporaryItems 151 | .Trashes 152 | .VolumeIcon.icns 153 | .com.apple.timemachine.donotpresent 154 | 155 | # Directories potentially created on remote AFP share 156 | .AppleDB 157 | .AppleDesktop 158 | Network Trash Folder 159 | Temporary Items 160 | .apdisk 161 | 162 | 163 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore 164 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 165 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 166 | 167 | # User-specific stuff: 168 | .idea/* 169 | .idea/**/workspace.xml 170 | .idea/**/tasks.xml 171 | .idea/dictionaries 172 | .html # Bokeh Plots 173 | .pg # TensorFlow Frozen Graphs 174 | .avi # videos 175 | 176 | # Sensitive or high-churn files: 177 | .idea/**/dataSources/ 178 | .idea/**/dataSources.ids 179 | .idea/**/dataSources.local.xml 180 | .idea/**/sqlDataSources.xml 181 | .idea/**/dynamic.xml 182 | .idea/**/uiDesigner.xml 183 | 184 | # Gradle: 185 | .idea/**/gradle.xml 186 | .idea/**/libraries 187 | 188 | # CMake 189 | cmake-build-debug/ 190 | cmake-build-release/ 191 | 192 | # Mongo Explorer plugin: 193 | .idea/**/mongoSettings.xml 194 | 195 | ## File-based project format: 196 | *.iws 197 | 198 | ## Plugin-specific files: 199 | 200 | # IntelliJ 201 | out/ 202 | 203 | # mpeltonen/sbt-idea plugin 204 | .idea_modules/ 205 | 206 | # JIRA plugin 207 | atlassian-ide-plugin.xml 208 | 209 | # Cursive Clojure plugin 210 | .idea/replstate.xml 211 | 212 | # Crashlytics plugin (for Android Studio and IntelliJ) 213 | com_crashlytics_export_strings.xml 214 | crashlytics.properties 215 | crashlytics-build.properties 216 | fabric.properties 217 | -------------------------------------------------------------------------------- /helmet_yolov5/.gitattributes: -------------------------------------------------------------------------------- 1 | # this drop notebooks from GitHub language stats 2 | *.ipynb linguist-vendored 3 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: glenn-jocher 4 | patreon: ultralytics 5 | open_collective: ultralytics 6 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bug report" 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, 11 | otherwise it is non-actionable, and we can not help you: 12 | 13 | - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo 14 | - **Common dataset**: coco.yaml or coco128.yaml 15 | - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments 16 | 17 | If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` 18 | figures, or we can not help you. You can generate these with `utils.plot_results()`. 19 | 20 | ## 🐛 Bug 21 | 22 | A clear and concise description of what the bug is. 23 | 24 | ## To Reproduce (REQUIRED) 25 | 26 | Input: 27 | 28 | ``` 29 | import torch 30 | 31 | a = torch.tensor([5]) 32 | c = a / 0 33 | ``` 34 | 35 | Output: 36 | 37 | ``` 38 | Traceback (most recent call last): 39 | File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code 40 | exec(code_obj, self.user_global_ns, self.user_ns) 41 | File "", line 5, in 42 | c = a / 0 43 | RuntimeError: ZeroDivisionError 44 | ``` 45 | 46 | ## Expected behavior 47 | 48 | A clear and concise description of what you expected to happen. 49 | 50 | ## Environment 51 | 52 | If applicable, add screenshots to help explain your problem. 53 | 54 | - OS: [e.g. Ubuntu] 55 | - GPU [e.g. 2080 Ti] 56 | 57 | ## Additional context 58 | 59 | Add any other context about the problem here. 60 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🚀 Feature request" 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## 🚀 Feature 11 | 12 | 13 | 14 | ## Motivation 15 | 16 | 18 | 19 | ## Pitch 20 | 21 | 22 | 23 | ## Alternatives 24 | 25 | 26 | 27 | ## Additional context 28 | 29 | 30 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "❓Question" 3 | about: Ask a general question 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## ❔Question 11 | 12 | ## Additional context 13 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | time: "04:00" 8 | open-pull-requests-limit: 10 9 | reviewers: 10 | - glenn-jocher 11 | labels: 12 | - dependencies 13 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/workflows/ci-testing.yml: -------------------------------------------------------------------------------- 1 | name: CI CPU testing 2 | 3 | on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows 4 | push: 5 | branches: [ master, develop ] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [ master, develop ] 9 | 10 | jobs: 11 | cpu-tests: 12 | 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: [ ubuntu-latest, macos-latest, windows-latest ] 18 | python-version: [ 3.8 ] 19 | model: [ 'yolov5s' ] # models to test 20 | 21 | # Timeout: https://stackoverflow.com/a/59076067/4521646 22 | timeout-minutes: 50 23 | steps: 24 | - uses: actions/checkout@v2 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v2 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | 30 | # Note: This uses an internal pip API and may not always work 31 | # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow 32 | - name: Get pip cache 33 | id: pip-cache 34 | run: | 35 | python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" 36 | 37 | - name: Cache pip 38 | uses: actions/cache@v1 39 | with: 40 | path: ${{ steps.pip-cache.outputs.dir }} 41 | key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} 42 | restore-keys: | 43 | ${{ runner.os }}-${{ matrix.python-version }}-pip- 44 | 45 | - name: Install dependencies 46 | run: | 47 | python -m pip install --upgrade pip 48 | pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html 49 | pip install -q onnx 50 | python --version 51 | pip --version 52 | pip list 53 | shell: bash 54 | 55 | - name: Download data 56 | run: | 57 | # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip 58 | # unzip -q tmp.zip -d ../ 59 | # rm tmp.zip 60 | 61 | - name: Tests workflow 62 | run: | 63 | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories 64 | di=cpu # inference devices # define device 65 | 66 | # train 67 | python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di 68 | # detect 69 | python detect.py --weights ${{ matrix.model }}.pt --device $di 70 | python detect.py --weights runs/train/exp/weights/last.pt --device $di 71 | # val 72 | python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di 73 | python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di 74 | 75 | python hubconf.py # hub 76 | python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect 77 | python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export 78 | shell: bash 79 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. 2 | # https://github.com/github/codeql-action 3 | 4 | name: "CodeQL" 5 | 6 | on: 7 | schedule: 8 | - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month 9 | 10 | jobs: 11 | analyze: 12 | name: Analyze 13 | runs-on: ubuntu-latest 14 | 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | language: [ 'python' ] 19 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 20 | # Learn more: 21 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 22 | 23 | steps: 24 | - name: Checkout repository 25 | uses: actions/checkout@v2 26 | 27 | # Initializes the CodeQL tools for scanning. 28 | - name: Initialize CodeQL 29 | uses: github/codeql-action/init@v1 30 | with: 31 | languages: ${{ matrix.language }} 32 | # If you wish to specify custom queries, you can do so here or in a config file. 33 | # By default, queries listed here will override any specified in a config file. 34 | # Prefix the list here with "+" to use these queries and those in the config file. 35 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 36 | 37 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 38 | # If this step fails, then you should remove it and run the build manually (see below) 39 | - name: Autobuild 40 | uses: github/codeql-action/autobuild@v1 41 | 42 | # ℹ️ Command-line programs to run using the OS shell. 43 | # 📚 https://git.io/JvXDl 44 | 45 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 46 | # and modify them (or add more) to build your code if your project 47 | # uses a compiled language 48 | 49 | #- run: | 50 | # make bootstrap 51 | # make release 52 | 53 | - name: Perform CodeQL Analysis 54 | uses: github/codeql-action/analyze@v1 55 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [ pull_request_target, issues ] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/first-interaction@v1 10 | with: 11 | repo-token: ${{ secrets.GITHUB_TOKEN }} 12 | pr-message: | 13 | 👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: 14 | - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: 15 | ```bash 16 | git remote add upstream https://github.com/ultralytics/yolov5.git 17 | git fetch upstream 18 | git checkout feature # <----- replace 'feature' with local branch name 19 | git rebase upstream/master 20 | git push -u origin -f 21 | ``` 22 | - ✅ Verify all Continuous Integration (CI) **checks are passing**. 23 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee 24 | 25 | issue-message: | 26 | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). 27 | 28 | If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. 29 | 30 | If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. 31 | 32 | For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. 33 | 34 | ## Requirements 35 | 36 | [**Python>=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: 37 | ```bash 38 | $ git clone https://github.com/ultralytics/yolov5 39 | $ cd yolov5 40 | $ pip install -r requirements.txt 41 | ``` 42 | 43 | ## Environments 44 | 45 | YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): 46 | 47 | - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle 48 | - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) 49 | - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) 50 | - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls 51 | 52 | 53 | ## Status 54 | 55 | ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) 56 | 57 | If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. 58 | 59 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/workflows/rebase.yml: -------------------------------------------------------------------------------- 1 | name: Automatic Rebase 2 | # https://github.com/marketplace/actions/automatic-rebase 3 | 4 | on: 5 | issue_comment: 6 | types: [ created ] 7 | 8 | jobs: 9 | rebase: 10 | name: Rebase 11 | if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout the latest code 15 | uses: actions/checkout@v2 16 | with: 17 | fetch-depth: 0 18 | - name: Automatic Rebase 19 | uses: cirrus-actions/rebase@1.3.1 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /helmet_yolov5/.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close stale issues 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v3 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | stale-issue-message: | 14 | 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. 15 | 16 | Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: 17 | - **Wiki** – https://github.com/ultralytics/yolov5/wiki 18 | - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials 19 | - **Docs** – https://docs.ultralytics.com 20 | 21 | Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: 22 | - **Ultralytics HUB** – https://ultralytics.com 23 | - **Vision API** – https://ultralytics.com/yolov5 24 | - **About Us** – https://ultralytics.com/about 25 | - **Join Our Team** – https://ultralytics.com/work 26 | - **Contact Us** – https://ultralytics.com/contact 27 | 28 | Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! 29 | 30 | Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! 31 | 32 | stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' 33 | days-before-stale: 30 34 | days-before-close: 5 35 | exempt-issue-labels: 'documentation,tutorial' 36 | operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting. 37 | -------------------------------------------------------------------------------- /helmet_yolov5/.gitignore: -------------------------------------------------------------------------------- 1 | # Repo-specific GitIgnore ---------------------------------------------------------------------------------------------- 2 | *.jpg 3 | *.jpeg 4 | *.png 5 | *.bmp 6 | *.tif 7 | *.tiff 8 | *.heic 9 | *.JPG 10 | *.JPEG 11 | *.PNG 12 | *.BMP 13 | *.TIF 14 | *.TIFF 15 | *.HEIC 16 | *.mp4 17 | *.mov 18 | *.MOV 19 | *.avi 20 | *.data 21 | *.json 22 | *.cfg 23 | !cfg/yolov3*.cfg 24 | 25 | storage.googleapis.com 26 | runs/* 27 | data/* 28 | !data/hyps/* 29 | !data/images/zidane.jpg 30 | !data/images/bus.jpg 31 | !data/*.sh 32 | 33 | results*.csv 34 | 35 | # Datasets ------------------------------------------------------------------------------------------------------------- 36 | coco/ 37 | coco128/ 38 | VOC/ 39 | 40 | # MATLAB GitIgnore ----------------------------------------------------------------------------------------------------- 41 | *.m~ 42 | *.mat 43 | !targets*.mat 44 | 45 | # Neural Network weights ----------------------------------------------------------------------------------------------- 46 | *.weights 47 | *.pt 48 | *.onnx 49 | *.mlmodel 50 | *.torchscript 51 | darknet53.conv.74 52 | yolov3-tiny.conv.15 53 | 54 | # GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- 55 | # Byte-compiled / optimized / DLL files 56 | __pycache__/ 57 | *.py[cod] 58 | *$py.class 59 | 60 | # C extensions 61 | *.so 62 | 63 | # Distribution / packaging 64 | .Python 65 | env/ 66 | build/ 67 | develop-eggs/ 68 | dist/ 69 | downloads/ 70 | eggs/ 71 | .eggs/ 72 | lib/ 73 | lib64/ 74 | parts/ 75 | sdist/ 76 | var/ 77 | wheels/ 78 | *.egg-info/ 79 | wandb/ 80 | .installed.cfg 81 | *.egg 82 | 83 | 84 | # PyInstaller 85 | # Usually these files are written by a python script from a template 86 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 87 | *.manifest 88 | *.spec 89 | 90 | # Installer logs 91 | pip-log.txt 92 | pip-delete-this-directory.txt 93 | 94 | # Unit test / coverage reports 95 | htmlcov/ 96 | .tox/ 97 | .coverage 98 | .coverage.* 99 | .cache 100 | nosetests.xml 101 | coverage.xml 102 | *.cover 103 | .hypothesis/ 104 | 105 | # Translations 106 | *.mo 107 | *.pot 108 | 109 | # Django stuff: 110 | *.log 111 | local_settings.py 112 | 113 | # Flask stuff: 114 | instance/ 115 | .webassets-cache 116 | 117 | # Scrapy stuff: 118 | .scrapy 119 | 120 | # Sphinx documentation 121 | docs/_build/ 122 | 123 | # PyBuilder 124 | target/ 125 | 126 | # Jupyter Notebook 127 | .ipynb_checkpoints 128 | 129 | # pyenv 130 | .python-version 131 | 132 | # celery beat schedule file 133 | celerybeat-schedule 134 | 135 | # SageMath parsed files 136 | *.sage.py 137 | 138 | # dotenv 139 | .env 140 | 141 | # virtualenv 142 | .venv* 143 | venv*/ 144 | ENV*/ 145 | 146 | # Spyder project settings 147 | .spyderproject 148 | .spyproject 149 | 150 | # Rope project settings 151 | .ropeproject 152 | 153 | # mkdocs documentation 154 | /site 155 | 156 | # mypy 157 | .mypy_cache/ 158 | 159 | 160 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- 161 | 162 | # General 163 | .DS_Store 164 | .AppleDouble 165 | .LSOverride 166 | 167 | # Icon must end with two \r 168 | Icon 169 | Icon? 170 | 171 | # Thumbnails 172 | ._* 173 | 174 | # Files that might appear in the root of a volume 175 | .DocumentRevisions-V100 176 | .fseventsd 177 | .Spotlight-V100 178 | .TemporaryItems 179 | .Trashes 180 | .VolumeIcon.icns 181 | .com.apple.timemachine.donotpresent 182 | 183 | # Directories potentially created on remote AFP share 184 | .AppleDB 185 | .AppleDesktop 186 | Network Trash Folder 187 | Temporary Items 188 | .apdisk 189 | 190 | 191 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore 192 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 193 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 194 | 195 | # User-specific stuff: 196 | .idea/* 197 | .idea/**/workspace.xml 198 | .idea/**/tasks.xml 199 | .idea/dictionaries 200 | .html # Bokeh Plots 201 | .pg # TensorFlow Frozen Graphs 202 | .avi # videos 203 | 204 | # Sensitive or high-churn files: 205 | .idea/**/dataSources/ 206 | .idea/**/dataSources.ids 207 | .idea/**/dataSources.local.xml 208 | .idea/**/sqlDataSources.xml 209 | .idea/**/dynamic.xml 210 | .idea/**/uiDesigner.xml 211 | 212 | # Gradle: 213 | .idea/**/gradle.xml 214 | .idea/**/libraries 215 | 216 | # CMake 217 | cmake-build-debug/ 218 | cmake-build-release/ 219 | 220 | # Mongo Explorer plugin: 221 | .idea/**/mongoSettings.xml 222 | 223 | ## File-based project format: 224 | *.iws 225 | 226 | ## Plugin-specific files: 227 | 228 | # IntelliJ 229 | out/ 230 | 231 | # mpeltonen/sbt-idea plugin 232 | .idea_modules/ 233 | 234 | # JIRA plugin 235 | atlassian-ide-plugin.xml 236 | 237 | # Cursive Clojure plugin 238 | .idea/replstate.xml 239 | 240 | # Crashlytics plugin (for Android Studio and IntelliJ) 241 | com_crashlytics_export_strings.xml 242 | crashlytics.properties 243 | crashlytics-build.properties 244 | fabric.properties 245 | -------------------------------------------------------------------------------- /helmet_yolov5/Dockerfile: -------------------------------------------------------------------------------- 1 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch 2 | FROM nvcr.io/nvidia/pytorch:21.05-py3 3 | 4 | # Install linux packages 5 | RUN apt update && apt install -y zip htop screen libgl1-mesa-glx 6 | 7 | # Install python dependencies 8 | COPY requirements.txt . 9 | RUN python -m pip install --upgrade pip 10 | RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof 11 | RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook 12 | RUN pip install --no-cache -U torch torchvision numpy 13 | # RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html 14 | 15 | # Create working directory 16 | RUN mkdir -p /usr/src/app 17 | WORKDIR /usr/src/app 18 | 19 | # Copy contents 20 | COPY . /usr/src/app 21 | 22 | # Set environment variables 23 | ENV HOME=/usr/src/app 24 | 25 | 26 | # Usage Examples ------------------------------------------------------------------------------------------------------- 27 | 28 | # Build and Push 29 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t 30 | 31 | # Pull and Run 32 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t 33 | 34 | # Pull and Run with local directory access 35 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t 36 | 37 | # Kill all 38 | # sudo docker kill $(sudo docker ps -q) 39 | 40 | # Kill all image-based 41 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) 42 | 43 | # Bash into running container 44 | # sudo docker exec -it 5a9b5863d93d bash 45 | 46 | # Bash into stopped container 47 | # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash 48 | 49 | # Clean up 50 | # docker system prune -a --volumes 51 | -------------------------------------------------------------------------------- /helmet_yolov5/data/Argoverse.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ 3 | # Example usage: python train.py --data Argoverse.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── Argoverse ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/Argoverse # dataset root dir 12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images 13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images 14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview 15 | 16 | # Classes 17 | nc: 8 # number of classes 18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names 19 | 20 | 21 | # Download script/URL (optional) --------------------------------------------------------------------------------------- 22 | download: | 23 | import json 24 | 25 | from tqdm import tqdm 26 | from utils.general import download, Path 27 | 28 | 29 | def argoverse2yolo(set): 30 | labels = {} 31 | a = json.load(open(set, "rb")) 32 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): 33 | img_id = annot['image_id'] 34 | img_name = a['images'][img_id]['name'] 35 | img_label_name = img_name[:-3] + "txt" 36 | 37 | cls = annot['category_id'] # instance class id 38 | x_center, y_center, width, height = annot['bbox'] 39 | x_center = (x_center + width / 2) / 1920.0 # offset and scale 40 | y_center = (y_center + height / 2) / 1200.0 # offset and scale 41 | width /= 1920.0 # scale 42 | height /= 1200.0 # scale 43 | 44 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] 45 | if not img_dir.exists(): 46 | img_dir.mkdir(parents=True, exist_ok=True) 47 | 48 | k = str(img_dir / img_label_name) 49 | if k not in labels: 50 | labels[k] = [] 51 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") 52 | 53 | for k in labels: 54 | with open(k, "w") as f: 55 | f.writelines(labels[k]) 56 | 57 | 58 | # Download 59 | dir = Path('../datasets/Argoverse') # dataset root dir 60 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] 61 | download(urls, dir=dir, delete=False) 62 | 63 | # Convert 64 | annotations_dir = 'Argoverse-HD/annotations/' 65 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' 66 | for d in "train.json", "val.json": 67 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels 68 | -------------------------------------------------------------------------------- /helmet_yolov5/data/GlobalWheat2020.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ 3 | # Example usage: python train.py --data GlobalWheat2020.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── GlobalWheat2020 ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/GlobalWheat2020 # dataset root dir 12 | train: # train images (relative to 'path') 3422 images 13 | - images/arvalis_1 14 | - images/arvalis_2 15 | - images/arvalis_3 16 | - images/ethz_1 17 | - images/rres_1 18 | - images/inrae_1 19 | - images/usask_1 20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) 21 | - images/ethz_1 22 | test: # test images (optional) 1276 images 23 | - images/utokyo_1 24 | - images/utokyo_2 25 | - images/nau_1 26 | - images/uq_1 27 | 28 | # Classes 29 | nc: 1 # number of classes 30 | names: ['wheat_head'] # class names 31 | 32 | 33 | # Download script/URL (optional) --------------------------------------------------------------------------------------- 34 | download: | 35 | from utils.general import download, Path 36 | 37 | # Download 38 | dir = Path(yaml['path']) # dataset root dir 39 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', 40 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] 41 | download(urls, dir=dir) 42 | 43 | # Make Directories 44 | for p in 'annotations', 'images', 'labels': 45 | (dir / p).mkdir(parents=True, exist_ok=True) 46 | 47 | # Move 48 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ 49 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': 50 | (dir / p).rename(dir / 'images' / p) # move to /images 51 | f = (dir / p).with_suffix('.json') # json file 52 | if f.exists(): 53 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations 54 | -------------------------------------------------------------------------------- /helmet_yolov5/data/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /helmet_yolov5/data/SKU-110K.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 3 | # Example usage: python train.py --data SKU-110K.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── SKU-110K ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/SKU-110K # dataset root dir 12 | train: train.txt # train images (relative to 'path') 8219 images 13 | val: val.txt # val images (relative to 'path') 588 images 14 | test: test.txt # test images (optional) 2936 images 15 | 16 | # Classes 17 | nc: 1 # number of classes 18 | names: ['object'] # class names 19 | 20 | 21 | # Download script/URL (optional) --------------------------------------------------------------------------------------- 22 | download: | 23 | import shutil 24 | from tqdm import tqdm 25 | from utils.general import np, pd, Path, download, xyxy2xywh 26 | 27 | # Download 28 | dir = Path(yaml['path']) # dataset root dir 29 | parent = Path(dir.parent) # download dir 30 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] 31 | download(urls, dir=parent, delete=False) 32 | 33 | # Rename directories 34 | if dir.exists(): 35 | shutil.rmtree(dir) 36 | (parent / 'SKU110K_fixed').rename(dir) # rename dir 37 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir 38 | 39 | # Convert labels 40 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names 41 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': 42 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations 43 | images, unique_images = x[:, 0], np.unique(x[:, 0]) 44 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: 45 | f.writelines(f'./images/{s}\n' for s in unique_images) 46 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'): 47 | cls = 0 # single-class dataset 48 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: 49 | for r in x[images == im]: 50 | w, h = r[6], r[7] # image width, height 51 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance 52 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label 53 | -------------------------------------------------------------------------------- /helmet_yolov5/data/VOC.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC 3 | # Example usage: python train.py --data VOC.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── VOC ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/VOC 12 | train: # train images (relative to 'path') 16551 images 13 | - images/train2012 14 | - images/train2007 15 | - images/val2012 16 | - images/val2007 17 | val: # val images (relative to 'path') 4952 images 18 | - images/test2007 19 | test: # test images (optional) 20 | - images/test2007 21 | 22 | # Classes 23 | nc: 20 # number of classes 24 | names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 25 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names 26 | 27 | 28 | # Download script/URL (optional) --------------------------------------------------------------------------------------- 29 | download: | 30 | import xml.etree.ElementTree as ET 31 | 32 | from tqdm import tqdm 33 | from utils.general import download, Path 34 | 35 | 36 | def convert_label(path, lb_path, year, image_id): 37 | def convert_box(size, box): 38 | dw, dh = 1. / size[0], 1. / size[1] 39 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] 40 | return x * dw, y * dh, w * dw, h * dh 41 | 42 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') 43 | out_file = open(lb_path, 'w') 44 | tree = ET.parse(in_file) 45 | root = tree.getroot() 46 | size = root.find('size') 47 | w = int(size.find('width').text) 48 | h = int(size.find('height').text) 49 | 50 | for obj in root.iter('object'): 51 | cls = obj.find('name').text 52 | if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: 53 | xmlbox = obj.find('bndbox') 54 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) 55 | cls_id = yaml['names'].index(cls) # class id 56 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') 57 | 58 | 59 | # Download 60 | dir = Path(yaml['path']) # dataset root dir 61 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' 62 | urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images 63 | url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images 64 | url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images 65 | download(urls, dir=dir / 'images', delete=False) 66 | 67 | # Convert 68 | path = dir / f'images/VOCdevkit' 69 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): 70 | imgs_path = dir / 'images' / f'{image_set}{year}' 71 | lbs_path = dir / 'labels' / f'{image_set}{year}' 72 | imgs_path.mkdir(exist_ok=True, parents=True) 73 | lbs_path.mkdir(exist_ok=True, parents=True) 74 | 75 | image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split() 76 | for id in tqdm(image_ids, desc=f'{image_set}{year}'): 77 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path 78 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path 79 | f.rename(imgs_path / f.name) # move image 80 | convert_label(path, lb_path, year, id) # convert labels to YOLO format 81 | -------------------------------------------------------------------------------- /helmet_yolov5/data/VisDrone.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset 3 | # Example usage: python train.py --data VisDrone.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── VisDrone ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/VisDrone # dataset root dir 12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images 13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images 14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images 15 | 16 | # Classes 17 | nc: 10 # number of classes 18 | names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] 19 | 20 | 21 | # Download script/URL (optional) --------------------------------------------------------------------------------------- 22 | download: | 23 | from utils.general import download, os, Path 24 | 25 | def visdrone2yolo(dir): 26 | from PIL import Image 27 | from tqdm import tqdm 28 | 29 | def convert_box(size, box): 30 | # Convert VisDrone box to YOLO xywh box 31 | dw = 1. / size[0] 32 | dh = 1. / size[1] 33 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh 34 | 35 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory 36 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}') 37 | for f in pbar: 38 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size 39 | lines = [] 40 | with open(f, 'r') as file: # read annotation.txt 41 | for row in [x.split(',') for x in file.read().strip().splitlines()]: 42 | if row[4] == '0': # VisDrone 'ignored regions' class 0 43 | continue 44 | cls = int(row[5]) - 1 45 | box = convert_box(img_size, tuple(map(int, row[:4]))) 46 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n") 47 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl: 48 | fl.writelines(lines) # write label.txt 49 | 50 | 51 | # Download 52 | dir = Path(yaml['path']) # dataset root dir 53 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', 54 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 55 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 56 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] 57 | download(urls, dir=dir) 58 | 59 | # Convert 60 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': 61 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels 62 | -------------------------------------------------------------------------------- /helmet_yolov5/data/coco.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # COCO 2017 dataset http://cocodataset.org 3 | # Example usage: python train.py --data coco.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── coco ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/coco # dataset root dir 12 | train: train2017.txt # train images (relative to 'path') 118287 images 13 | val: val2017.txt # train images (relative to 'path') 5000 images 14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 15 | 16 | # Classes 17 | nc: 80 # number of classes 18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 26 | 'hair drier', 'toothbrush'] # class names 27 | 28 | 29 | # Download script/URL (optional) 30 | download: | 31 | from utils.general import download, Path 32 | 33 | # Download labels 34 | segments = False # segment or box labels 35 | dir = Path(yaml['path']) # dataset root dir 36 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' 37 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels 38 | download(urls, dir=dir.parent) 39 | 40 | # Download data 41 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images 42 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images 43 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) 44 | download(urls, dir=dir / 'images', threads=3) 45 | -------------------------------------------------------------------------------- /helmet_yolov5/data/coco128.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) 3 | # Example usage: python train.py --data coco128.yaml 4 | # parent 5 | # ├── yolov5 6 | # └── datasets 7 | # └── coco128 ← downloads here 8 | 9 | 10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 11 | path: ../datasets/coco128 # dataset root dir 12 | train: images/train2017 # train images (relative to 'path') 128 images 13 | val: images/train2017 # val images (relative to 'path') 128 images 14 | test: # test images (optional) 15 | 16 | # Classes 17 | nc: 80 # number of classes 18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 26 | 'hair drier', 'toothbrush'] # class names 27 | 28 | 29 | # Download script/URL (optional) 30 | download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip -------------------------------------------------------------------------------- /helmet_yolov5/data/custom_data.yaml: -------------------------------------------------------------------------------- 1 | train: data/Safety_Helmet_Train_dataset/score/images/train 2 | val: data/Safety_Helmet_Train_dataset/score/images/val 3 | 4 | # number of classes 5 | nc: 2 6 | 7 | # class names 8 | names: ['person', 'hat'] 9 | -------------------------------------------------------------------------------- /helmet_yolov5/data/hyps/hyp.finetune.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for VOC finetuning 2 | # python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | # Hyperparameter Evolution Results 7 | # Generations: 306 8 | # P R mAP.5 mAP.5:.95 box obj cls 9 | # Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 10 | 11 | lr0: 0.0032 12 | lrf: 0.12 13 | momentum: 0.843 14 | weight_decay: 0.00036 15 | warmup_epochs: 2.0 16 | warmup_momentum: 0.5 17 | warmup_bias_lr: 0.05 18 | box: 0.0296 19 | cls: 0.243 20 | cls_pw: 0.631 21 | obj: 0.301 22 | obj_pw: 0.911 23 | iou_t: 0.2 24 | anchor_t: 2.91 25 | # anchors: 3.63 26 | fl_gamma: 0.0 27 | hsv_h: 0.0138 28 | hsv_s: 0.664 29 | hsv_v: 0.464 30 | degrees: 0.373 31 | translate: 0.245 32 | scale: 0.898 33 | shear: 0.602 34 | perspective: 0.0 35 | flipud: 0.00856 36 | fliplr: 0.5 37 | mosaic: 1.0 38 | mixup: 0.243 39 | copy_paste: 0.0 40 | -------------------------------------------------------------------------------- /helmet_yolov5/data/hyps/hyp.finetune_objects365.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.00258 2 | lrf: 0.17 3 | momentum: 0.779 4 | weight_decay: 0.00058 5 | warmup_epochs: 1.33 6 | warmup_momentum: 0.86 7 | warmup_bias_lr: 0.0711 8 | box: 0.0539 9 | cls: 0.299 10 | cls_pw: 0.825 11 | obj: 0.632 12 | obj_pw: 1.0 13 | iou_t: 0.2 14 | anchor_t: 3.44 15 | anchors: 3.2 16 | fl_gamma: 0.0 17 | hsv_h: 0.0188 18 | hsv_s: 0.704 19 | hsv_v: 0.36 20 | degrees: 0.0 21 | translate: 0.0902 22 | scale: 0.491 23 | shear: 0.0 24 | perspective: 0.0 25 | flipud: 0.0 26 | fliplr: 0.5 27 | mosaic: 1.0 28 | mixup: 0.0 29 | copy_paste: 0.0 30 | -------------------------------------------------------------------------------- /helmet_yolov5/data/hyps/hyp.scratch-p6.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for COCO training from scratch 2 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 8 | momentum: 0.937 # SGD momentum/Adam beta1 9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 11 | warmup_momentum: 0.8 # warmup initial momentum 12 | warmup_bias_lr: 0.1 # warmup initial bias lr 13 | box: 0.05 # box loss gain 14 | cls: 0.3 # cls loss gain 15 | cls_pw: 1.0 # cls BCELoss positive_weight 16 | obj: 0.7 # obj loss gain (scale with pixels) 17 | obj_pw: 1.0 # obj BCELoss positive_weight 18 | iou_t: 0.20 # IoU training threshold 19 | anchor_t: 4.0 # anchor-multiple threshold 20 | # anchors: 3 # anchors per output layer (0 to ignore) 21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 25 | degrees: 0.0 # image rotation (+/- deg) 26 | translate: 0.1 # image translation (+/- fraction) 27 | scale: 0.9 # image scale (+/- gain) 28 | shear: 0.0 # image shear (+/- deg) 29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 30 | flipud: 0.0 # image flip up-down (probability) 31 | fliplr: 0.5 # image flip left-right (probability) 32 | mosaic: 1.0 # image mosaic (probability) 33 | mixup: 0.0 # image mixup (probability) 34 | copy_paste: 0.0 # segment copy-paste (probability) 35 | -------------------------------------------------------------------------------- /helmet_yolov5/data/hyps/hyp.scratch.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for COCO training from scratch 2 | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 8 | momentum: 0.937 # SGD momentum/Adam beta1 9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 11 | warmup_momentum: 0.8 # warmup initial momentum 12 | warmup_bias_lr: 0.1 # warmup initial bias lr 13 | box: 0.05 # box loss gain 14 | cls: 0.5 # cls loss gain 15 | cls_pw: 1.0 # cls BCELoss positive_weight 16 | obj: 1.0 # obj loss gain (scale with pixels) 17 | obj_pw: 1.0 # obj BCELoss positive_weight 18 | iou_t: 0.20 # IoU training threshold 19 | anchor_t: 4.0 # anchor-multiple threshold 20 | # anchors: 3 # anchors per output layer (0 to ignore) 21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 25 | degrees: 0.0 # image rotation (+/- deg) 26 | translate: 0.1 # image translation (+/- fraction) 27 | scale: 0.5 # image scale (+/- gain) 28 | shear: 0.0 # image shear (+/- deg) 29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 30 | flipud: 0.0 # image flip up-down (probability) 31 | fliplr: 0.5 # image flip left-right (probability) 32 | mosaic: 1.0 # image mosaic (probability) 33 | mixup: 0.0 # image mixup (probability) 34 | copy_paste: 0.0 # segment copy-paste (probability) 35 | -------------------------------------------------------------------------------- /helmet_yolov5/data/images/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_yolov5/data/images/bus.jpg -------------------------------------------------------------------------------- /helmet_yolov5/data/images/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_yolov5/data/images/zidane.jpg -------------------------------------------------------------------------------- /helmet_yolov5/data/scripts/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 3 | # Download latest models from https://github.com/ultralytics/yolov5/releases 4 | # Example usage: bash path/to/download_weights.sh 5 | # parent 6 | # └── yolov5 7 | # ├── yolov5s.pt ← downloads here 8 | # ├── yolov5m.pt 9 | # └── ... 10 | 11 | python - <= cls >= 0, f'incorrect class index {cls}' 74 | 75 | # Write YOLO label 76 | if id not in shapes: 77 | shapes[id] = Image.open(file).size 78 | box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) 79 | with open((labels / id).with_suffix('.txt'), 'a') as f: 80 | f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt 81 | except Exception as e: 82 | print(f'WARNING: skipping one label for {file}: {e}') 83 | 84 | 85 | # Download manually from https://challenge.xviewdataset.org 86 | dir = Path(yaml['path']) # dataset root dir 87 | # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels 88 | # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images 89 | # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) 90 | # download(urls, dir=dir, delete=False) 91 | 92 | # Convert labels 93 | convert_labels(dir / 'xView_train.geojson') 94 | 95 | # Move images 96 | images = Path(dir / 'images') 97 | images.mkdir(parents=True, exist_ok=True) 98 | Path(dir / 'train_images').rename(dir / 'images' / 'train') 99 | Path(dir / 'val_images').rename(dir / 'images' / 'val') 100 | 101 | # Split 102 | autosplit(dir / 'images' / 'train') 103 | -------------------------------------------------------------------------------- /helmet_yolov5/hubconf.py: -------------------------------------------------------------------------------- 1 | """YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ 2 | 3 | Usage: 4 | import torch 5 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s') 6 | """ 7 | 8 | import torch 9 | 10 | 11 | def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 12 | """Creates a specified YOLOv5 model 13 | 14 | Arguments: 15 | name (str): name of model, i.e. 'yolov5s' 16 | pretrained (bool): load pretrained weights into the model 17 | channels (int): number of input channels 18 | classes (int): number of model classes 19 | autoshape (bool): apply YOLOv5 .autoshape() wrapper to model 20 | verbose (bool): print all information to screen 21 | device (str, torch.device, None): device to use for model parameters 22 | 23 | Returns: 24 | YOLOv5 pytorch model 25 | """ 26 | from pathlib import Path 27 | 28 | from models.yolo import Model, attempt_load 29 | from utils.general import check_requirements, set_logging 30 | from utils.downloads import attempt_download 31 | from utils.torch_utils import select_device 32 | 33 | file = Path(__file__).absolute() 34 | check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) 35 | set_logging(verbose=verbose) 36 | 37 | save_dir = Path('') if str(name).endswith('.pt') else file.parent 38 | path = (save_dir / name).with_suffix('.pt') # checkpoint path 39 | try: 40 | device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) 41 | 42 | if pretrained and channels == 3 and classes == 80: 43 | model = attempt_load(path, map_location=device) # download/load FP32 model 44 | else: 45 | cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path 46 | model = Model(cfg, channels, classes) # create model 47 | if pretrained: 48 | ckpt = torch.load(attempt_download(path), map_location=device) # load 49 | msd = model.state_dict() # model state_dict 50 | csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 51 | csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter 52 | model.load_state_dict(csd, strict=False) # load 53 | if len(ckpt['model'].names) == classes: 54 | model.names = ckpt['model'].names # set class names attribute 55 | if autoshape: 56 | model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS 57 | return model.to(device) 58 | 59 | except Exception as e: 60 | help_url = 'https://github.com/ultralytics/yolov5/issues/36' 61 | s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url 62 | raise Exception(s) from e 63 | 64 | 65 | def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None): 66 | # YOLOv5 custom or local model 67 | return _create(path, autoshape=autoshape, verbose=verbose, device=device) 68 | 69 | 70 | def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 71 | # YOLOv5-small model https://github.com/ultralytics/yolov5 72 | return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device) 73 | 74 | 75 | def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 76 | # YOLOv5-medium model https://github.com/ultralytics/yolov5 77 | return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device) 78 | 79 | 80 | def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 81 | # YOLOv5-large model https://github.com/ultralytics/yolov5 82 | return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device) 83 | 84 | 85 | def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 86 | # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 87 | return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device) 88 | 89 | 90 | def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 91 | # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 92 | return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device) 93 | 94 | 95 | def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 96 | # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 97 | return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device) 98 | 99 | 100 | def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 101 | # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 102 | return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device) 103 | 104 | 105 | def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): 106 | # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 107 | return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device) 108 | 109 | 110 | if __name__ == '__main__': 111 | model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained 112 | # model = custom(path='path/to/model.pt') # custom 113 | 114 | # Verify inference 115 | import cv2 116 | import numpy as np 117 | from PIL import Image 118 | from pathlib import Path 119 | 120 | imgs = ['data/images/zidane.jpg', # filename 121 | Path('data/images/zidane.jpg'), # Path 122 | 'https://ultralytics.com/images/zidane.jpg', # URI 123 | cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV 124 | Image.open('data/images/bus.jpg'), # PIL 125 | np.zeros((320, 640, 3))] # numpy 126 | 127 | results = model(imgs) # batched inference 128 | results.print() 129 | results.save() 130 | -------------------------------------------------------------------------------- /helmet_yolov5/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_yolov5/models/__init__.py -------------------------------------------------------------------------------- /helmet_yolov5/models/experimental.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 experimental modules 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | 7 | from models.common import Conv, DWConv 8 | from utils.downloads import attempt_download 9 | 10 | 11 | class CrossConv(nn.Module): 12 | # Cross Convolution Downsample 13 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 14 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 15 | super().__init__() 16 | c_ = int(c2 * e) # hidden channels 17 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 18 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 19 | self.add = shortcut and c1 == c2 20 | 21 | def forward(self, x): 22 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 23 | 24 | 25 | class Sum(nn.Module): 26 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 27 | def __init__(self, n, weight=False): # n: number of inputs 28 | super().__init__() 29 | self.weight = weight # apply weights boolean 30 | self.iter = range(n - 1) # iter object 31 | if weight: 32 | self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights 33 | 34 | def forward(self, x): 35 | y = x[0] # no weight 36 | if self.weight: 37 | w = torch.sigmoid(self.w) * 2 38 | for i in self.iter: 39 | y = y + x[i + 1] * w[i] 40 | else: 41 | for i in self.iter: 42 | y = y + x[i + 1] 43 | return y 44 | 45 | 46 | class GhostConv(nn.Module): 47 | # Ghost Convolution https://github.com/huawei-noah/ghostnet 48 | def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups 49 | super().__init__() 50 | c_ = c2 // 2 # hidden channels 51 | self.cv1 = Conv(c1, c_, k, s, None, g, act) 52 | self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) 53 | 54 | def forward(self, x): 55 | y = self.cv1(x) 56 | return torch.cat([y, self.cv2(y)], 1) 57 | 58 | 59 | class GhostBottleneck(nn.Module): 60 | # Ghost Bottleneck https://github.com/huawei-noah/ghostnet 61 | def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride 62 | super().__init__() 63 | c_ = c2 // 2 64 | self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw 65 | DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw 66 | GhostConv(c_, c2, 1, 1, act=False)) # pw-linear 67 | self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), 68 | Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() 69 | 70 | def forward(self, x): 71 | return self.conv(x) + self.shortcut(x) 72 | 73 | 74 | class MixConv2d(nn.Module): 75 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 76 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): 77 | super().__init__() 78 | groups = len(k) 79 | if equal_ch: # equal c_ per group 80 | i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices 81 | c_ = [(i == g).sum() for g in range(groups)] # intermediate channels 82 | else: # equal weight.numel() per group 83 | b = [c2] + [0] * groups 84 | a = np.eye(groups + 1, groups, k=-1) 85 | a -= np.roll(a, 1, axis=1) 86 | a *= np.array(k) ** 2 87 | a[0] = 1 88 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 89 | 90 | self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) 91 | self.bn = nn.BatchNorm2d(c2) 92 | self.act = nn.LeakyReLU(0.1, inplace=True) 93 | 94 | def forward(self, x): 95 | return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 96 | 97 | 98 | class Ensemble(nn.ModuleList): 99 | # Ensemble of models 100 | def __init__(self): 101 | super().__init__() 102 | 103 | def forward(self, x, augment=False, profile=False, visualize=False): 104 | y = [] 105 | for module in self: 106 | y.append(module(x, augment, profile, visualize)[0]) 107 | # y = torch.stack(y).max(0)[0] # max ensemble 108 | # y = torch.stack(y).mean(0) # mean ensemble 109 | y = torch.cat(y, 1) # nms ensemble 110 | return y, None # inference, train output 111 | 112 | 113 | def attempt_load(weights, map_location=None, inplace=True): 114 | from models.yolo import Detect, Model 115 | 116 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 117 | model = Ensemble() 118 | for w in weights if isinstance(weights, list) else [weights]: 119 | ckpt = torch.load(attempt_download(w), map_location=map_location) # load 120 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model 121 | 122 | # Compatibility updates 123 | for m in model.modules(): 124 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: 125 | m.inplace = inplace # pytorch 1.7.0 compatibility 126 | elif type(m) is Conv: 127 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 128 | 129 | if len(model) == 1: 130 | return model[-1] # return model 131 | else: 132 | print(f'Ensemble created with {weights}\n') 133 | for k in ['names']: 134 | setattr(model, k, getattr(model[-1], k)) 135 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride 136 | return model # return ensemble 137 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # Default YOLOv5 anchors for COCO data 2 | 3 | 4 | # P5 ------------------------------------------------------------------------------------------------------------------- 5 | # P5-640: 6 | anchors_p5_640: 7 | - [10,13, 16,30, 33,23] # P3/8 8 | - [30,61, 62,45, 59,119] # P4/16 9 | - [116,90, 156,198, 373,326] # P5/32 10 | 11 | 12 | # P6 ------------------------------------------------------------------------------------------------------------------- 13 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 14 | anchors_p6_640: 15 | - [9,11, 21,19, 17,41] # P3/8 16 | - [43,32, 39,70, 86,64] # P4/16 17 | - [65,131, 134,130, 120,265] # P5/32 18 | - [282,180, 247,354, 512,387] # P6/64 19 | 20 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 21 | anchors_p6_1280: 22 | - [19,27, 44,40, 38,94] # P3/8 23 | - [96,68, 86,152, 180,137] # P4/16 24 | - [140,301, 303,264, 238,542] # P5/32 25 | - [436,615, 739,380, 925,792] # P6/64 26 | 27 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 28 | anchors_p6_1920: 29 | - [28,41, 67,59, 57,141] # P3/8 30 | - [144,103, 129,227, 270,205] # P4/16 31 | - [209,452, 455,396, 358,812] # P5/32 32 | - [653,922, 1109,570, 1387,1187] # P6/64 33 | 34 | 35 | # P7 ------------------------------------------------------------------------------------------------------------------- 36 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 37 | anchors_p7_640: 38 | - [11,11, 13,30, 29,20] # P3/8 39 | - [30,46, 61,38, 39,92] # P4/16 40 | - [78,80, 146,66, 79,163] # P5/32 41 | - [149,150, 321,143, 157,303] # P6/64 42 | - [257,402, 359,290, 524,372] # P7/128 43 | 44 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 45 | anchors_p7_1280: 46 | - [19,22, 54,36, 32,77] # P3/8 47 | - [70,83, 138,71, 75,173] # P4/16 48 | - [165,159, 148,334, 375,151] # P5/32 49 | - [334,317, 251,626, 499,474] # P6/64 50 | - [750,326, 534,814, 1079,818] # P7/128 51 | 52 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 53 | anchors_p7_1920: 54 | - [29,34, 81,55, 47,115] # P3/8 55 | - [105,124, 207,107, 113,259] # P4/16 56 | - [247,238, 222,500, 563,227] # P5/32 57 | - [501,476, 376,939, 749,711] # P6/64 58 | - [1126,489, 801,1222, 1618,1227] # P7/128 59 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # darknet53 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Conv, [32, 3, 1]], # 0 14 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 15 | [-1, 1, Bottleneck, [64]], 16 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 17 | [-1, 2, Bottleneck, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 19 | [-1, 8, Bottleneck, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 21 | [-1, 8, Bottleneck, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 23 | [-1, 4, Bottleneck, [1024]], # 10 24 | ] 25 | 26 | # YOLOv3-SPP head 27 | head: 28 | [[-1, 1, Bottleneck, [1024, False]], 29 | [-1, 1, SPP, [512, [5, 9, 13]]], 30 | [-1, 1, Conv, [1024, 3, 1]], 31 | [-1, 1, Conv, [512, 1, 1]], 32 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 33 | 34 | [-2, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 37 | [-1, 1, Bottleneck, [512, False]], 38 | [-1, 1, Bottleneck, [512, False]], 39 | [-1, 1, Conv, [256, 1, 1]], 40 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 41 | 42 | [-2, 1, Conv, [128, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 45 | [-1, 1, Bottleneck, [256, False]], 46 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 47 | 48 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,14, 23,27, 37,58] # P4/16 7 | - [81,82, 135,169, 344,319] # P5/32 8 | 9 | # YOLOv3-tiny backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [16, 3, 1]], # 0 13 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 14 | [-1, 1, Conv, [32, 3, 1]], 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 16 | [-1, 1, Conv, [64, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 18 | [-1, 1, Conv, [128, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 20 | [-1, 1, Conv, [256, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 22 | [-1, 1, Conv, [512, 3, 1]], 23 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 24 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 25 | ] 26 | 27 | # YOLOv3-tiny head 28 | head: 29 | [[-1, 1, Conv, [1024, 3, 1]], 30 | [-1, 1, Conv, [256, 1, 1]], 31 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 32 | 33 | [-2, 1, Conv, [128, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 36 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 37 | 38 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 39 | ] 40 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # darknet53 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Conv, [32, 3, 1]], # 0 14 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 15 | [-1, 1, Bottleneck, [64]], 16 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 17 | [-1, 2, Bottleneck, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 19 | [-1, 8, Bottleneck, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 21 | [-1, 8, Bottleneck, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 23 | [-1, 4, Bottleneck, [1024]], # 10 24 | ] 25 | 26 | # YOLOv3 head 27 | head: 28 | [[-1, 1, Bottleneck, [1024, False]], 29 | [-1, 1, Conv, [512, [1, 1]]], 30 | [-1, 1, Conv, [1024, 3, 1]], 31 | [-1, 1, Conv, [512, 1, 1]], 32 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 33 | 34 | [-2, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 37 | [-1, 1, Bottleneck, [512, False]], 38 | [-1, 1, Bottleneck, [512, False]], 39 | [-1, 1, Conv, [256, 1, 1]], 40 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 41 | 42 | [-2, 1, Conv, [128, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 45 | [-1, 1, Bottleneck, [256, False]], 46 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 47 | 48 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5-bifpn.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]] 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, C3, [1024, False]], # 9 23 | ] 24 | 25 | # YOLOv5 BiFPN head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, Bottleneck, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, BottleneckCSP, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, BottleneckCSP, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 6, BottleneckCSP, [1024]], # 9 23 | ] 24 | 25 | # YOLOv5 FPN head 26 | head: 27 | [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) 28 | 29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 30 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 31 | [-1, 1, Conv, [512, 1, 1]], 32 | [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) 33 | 34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 35 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 36 | [-1, 1, Conv, [256, 1, 1]], 37 | [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) 38 | 39 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 40 | ] 41 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 3 6 | 7 | # YOLOv5 backbone 8 | backbone: 9 | # [from, number, module, args] 10 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 11 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 12 | [-1, 3, C3, [128]], 13 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 14 | [-1, 9, C3, [256]], 15 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 16 | [-1, 9, C3, [512]], 17 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 18 | [-1, 1, SPP, [1024, [5, 9, 13]]], 19 | [-1, 3, C3, [1024, False]], # 9 20 | ] 21 | 22 | # YOLOv5 head 23 | head: 24 | [[-1, 1, Conv, [512, 1, 1]], 25 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 26 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 27 | [-1, 3, C3, [512, False]], # 13 28 | 29 | [-1, 1, Conv, [256, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 32 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 33 | 34 | [-1, 1, Conv, [128, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 2], 1, Concat, [1]], # cat backbone P2 37 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) 38 | 39 | [-1, 1, Conv, [128, 3, 2]], 40 | [[-1, 18], 1, Concat, [1]], # cat head P3 41 | [-1, 3, C3, [256, False]], # 24 (P3/8-small) 42 | 43 | [-1, 1, Conv, [256, 3, 2]], 44 | [[-1, 14], 1, Concat, [1]], # cat head P4 45 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium) 46 | 47 | [-1, 1, Conv, [512, 3, 2]], 48 | [[-1, 10], 1, Concat, [1]], # cat head P5 49 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large) 50 | 51 | [[24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 52 | ] 53 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 3 6 | 7 | # YOLOv5 backbone 8 | backbone: 9 | # [from, number, module, args] 10 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 11 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 12 | [-1, 3, C3, [128]], 13 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 14 | [-1, 9, C3, [256]], 15 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 16 | [-1, 9, C3, [512]], 17 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 18 | [-1, 3, C3, [768]], 19 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 20 | [-1, 1, SPP, [1024, [3, 5, 7]]], 21 | [-1, 3, C3, [1024, False]], # 11 22 | ] 23 | 24 | # YOLOv5 head 25 | head: 26 | [[-1, 1, Conv, [768, 1, 1]], 27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 28 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 29 | [-1, 3, C3, [768, False]], # 15 30 | 31 | [-1, 1, Conv, [512, 1, 1]], 32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 33 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 34 | [-1, 3, C3, [512, False]], # 19 35 | 36 | [-1, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 39 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 40 | 41 | [-1, 1, Conv, [256, 3, 2]], 42 | [[-1, 20], 1, Concat, [1]], # cat head P4 43 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 44 | 45 | [-1, 1, Conv, [512, 3, 2]], 46 | [[-1, 16], 1, Concat, [1]], # cat head P5 47 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 48 | 49 | [-1, 1, Conv, [768, 3, 2]], 50 | [[-1, 12], 1, Concat, [1]], # cat head P6 51 | [-1, 3, C3, [1024, False]], # 32 (P5/64-xlarge) 52 | 53 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 54 | ] 55 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 3 6 | 7 | # YOLOv5 backbone 8 | backbone: 9 | # [from, number, module, args] 10 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 11 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 12 | [-1, 3, C3, [128]], 13 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 14 | [-1, 9, C3, [256]], 15 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 16 | [-1, 9, C3, [512]], 17 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 18 | [-1, 3, C3, [768]], 19 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 20 | [-1, 3, C3, [1024]], 21 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 22 | [-1, 1, SPP, [1280, [3, 5]]], 23 | [-1, 3, C3, [1280, False]], # 13 24 | ] 25 | 26 | # YOLOv5 head 27 | head: 28 | [[-1, 1, Conv, [1024, 1, 1]], 29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 30 | [[-1, 10], 1, Concat, [1]], # cat backbone P6 31 | [-1, 3, C3, [1024, False]], # 17 32 | 33 | [-1, 1, Conv, [768, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 36 | [-1, 3, C3, [768, False]], # 21 37 | 38 | [-1, 1, Conv, [512, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 40 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 41 | [-1, 3, C3, [512, False]], # 25 42 | 43 | [-1, 1, Conv, [256, 1, 1]], 44 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 45 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 46 | [-1, 3, C3, [256, False]], # 29 (P3/8-small) 47 | 48 | [-1, 1, Conv, [256, 3, 2]], 49 | [[-1, 26], 1, Concat, [1]], # cat head P4 50 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium) 51 | 52 | [-1, 1, Conv, [512, 3, 2]], 53 | [[-1, 22], 1, Concat, [1]], # cat head P5 54 | [-1, 3, C3, [768, False]], # 35 (P5/32-large) 55 | 56 | [-1, 1, Conv, [768, 3, 2]], 57 | [[-1, 18], 1, Concat, [1]], # cat head P6 58 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) 59 | 60 | [-1, 1, Conv, [1024, 3, 2]], 61 | [[-1, 14], 1, Concat, [1]], # cat head P7 62 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) 63 | 64 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) 65 | ] 66 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, BottleneckCSP, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, BottleneckCSP, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, BottleneckCSP, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, BottleneckCSP, [1024, False]], # 9 23 | ] 24 | 25 | # YOLOv5 PANet head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, BottleneckCSP, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5l6.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [19,27, 44,40, 38,94] # P3/8 7 | - [96,68, 86,152, 180,137] # P4/16 8 | - [140,301, 303,264, 238,542] # P5/32 9 | - [436,615, 739,380, 925,792] # P6/64 10 | 11 | # YOLOv5 backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 15 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 16 | [-1, 3, C3, [128]], 17 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 18 | [-1, 9, C3, [256]], 19 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 20 | [-1, 9, C3, [512]], 21 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 22 | [-1, 3, C3, [768]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 24 | [-1, 1, SPP, [1024, [3, 5, 7]]], 25 | [-1, 3, C3, [1024, False]], # 11 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [768, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 33 | [-1, 3, C3, [768, False]], # 15 34 | 35 | [-1, 1, Conv, [512, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 38 | [-1, 3, C3, [512, False]], # 19 39 | 40 | [-1, 1, Conv, [256, 1, 1]], 41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 42 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 43 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 44 | 45 | [-1, 1, Conv, [256, 3, 2]], 46 | [[-1, 20], 1, Concat, [1]], # cat head P4 47 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 48 | 49 | [-1, 1, Conv, [512, 3, 2]], 50 | [[-1, 16], 1, Concat, [1]], # cat head P5 51 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 52 | 53 | [-1, 1, Conv, [768, 3, 2]], 54 | [[-1, 12], 1, Concat, [1]], # cat head P6 55 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 56 | 57 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 58 | ] 59 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5m6.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | anchors: 6 | - [19,27, 44,40, 38,94] # P3/8 7 | - [96,68, 86,152, 180,137] # P4/16 8 | - [140,301, 303,264, 238,542] # P5/32 9 | - [436,615, 739,380, 925,792] # P6/64 10 | 11 | # YOLOv5 backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 15 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 16 | [-1, 3, C3, [128]], 17 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 18 | [-1, 9, C3, [256]], 19 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 20 | [-1, 9, C3, [512]], 21 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 22 | [-1, 3, C3, [768]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 24 | [-1, 1, SPP, [1024, [3, 5, 7]]], 25 | [-1, 3, C3, [1024, False]], # 11 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [768, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 33 | [-1, 3, C3, [768, False]], # 15 34 | 35 | [-1, 1, Conv, [512, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 38 | [-1, 3, C3, [512, False]], # 19 39 | 40 | [-1, 1, Conv, [256, 1, 1]], 41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 42 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 43 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 44 | 45 | [-1, 1, Conv, [256, 3, 2]], 46 | [[-1, 20], 1, Concat, [1]], # cat head P4 47 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 48 | 49 | [-1, 1, Conv, [512, 3, 2]], 50 | [[-1, 16], 1, Concat, [1]], # cat head P5 51 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 52 | 53 | [-1, 1, Conv, [768, 3, 2]], 54 | [[-1, 12], 1, Concat, [1]], # cat head P6 55 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 56 | 57 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 58 | ] 59 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5s-transformer.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module 23 | ] 24 | 25 | # YOLOv5 head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5s6.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | anchors: 6 | - [19,27, 44,40, 38,94] # P3/8 7 | - [96,68, 86,152, 180,137] # P4/16 8 | - [140,301, 303,264, 238,542] # P5/32 9 | - [436,615, 739,380, 925,792] # P6/64 10 | 11 | # YOLOv5 backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 15 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 16 | [-1, 3, C3, [128]], 17 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 18 | [-1, 9, C3, [256]], 19 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 20 | [-1, 9, C3, [512]], 21 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 22 | [-1, 3, C3, [768]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 24 | [-1, 1, SPP, [1024, [3, 5, 7]]], 25 | [-1, 3, C3, [1024, False]], # 11 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [768, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 33 | [-1, 3, C3, [768, False]], # 15 34 | 35 | [-1, 1, Conv, [512, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 38 | [-1, 3, C3, [512, False]], # 19 39 | 40 | [-1, 1, Conv, [256, 1, 1]], 41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 42 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 43 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 44 | 45 | [-1, 1, Conv, [256, 3, 2]], 46 | [[-1, 20], 1, Concat, [1]], # cat head P4 47 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 48 | 49 | [-1, 1, Conv, [512, 3, 2]], 50 | [[-1, 16], 1, Concat, [1]], # cat head P5 51 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 52 | 53 | [-1, 1, Conv, [768, 3, 2]], 54 | [[-1, 12], 1, Concat, [1]], # cat head P6 55 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 56 | 57 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 58 | ] 59 | -------------------------------------------------------------------------------- /helmet_yolov5/models/hub/yolov5x6.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | anchors: 6 | - [19,27, 44,40, 38,94] # P3/8 7 | - [96,68, 86,152, 180,137] # P4/16 8 | - [140,301, 303,264, 238,542] # P5/32 9 | - [436,615, 739,380, 925,792] # P6/64 10 | 11 | # YOLOv5 backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 15 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 16 | [-1, 3, C3, [128]], 17 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 18 | [-1, 9, C3, [256]], 19 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 20 | [-1, 9, C3, [512]], 21 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 22 | [-1, 3, C3, [768]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 24 | [-1, 1, SPP, [1024, [3, 5, 7]]], 25 | [-1, 3, C3, [1024, False]], # 11 26 | ] 27 | 28 | # YOLOv5 head 29 | head: 30 | [[-1, 1, Conv, [768, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 33 | [-1, 3, C3, [768, False]], # 15 34 | 35 | [-1, 1, Conv, [512, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 38 | [-1, 3, C3, [512, False]], # 19 39 | 40 | [-1, 1, Conv, [256, 1, 1]], 41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 42 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 43 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 44 | 45 | [-1, 1, Conv, [256, 3, 2]], 46 | [[-1, 20], 1, Concat, [1]], # cat head P4 47 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 48 | 49 | [-1, 1, Conv, [512, 3, 2]], 50 | [[-1, 16], 1, Concat, [1]], # cat head P5 51 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 52 | 53 | [-1, 1, Conv, [768, 3, 2]], 54 | [[-1, 12], 1, Concat, [1]], # cat head P6 55 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 56 | 57 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 58 | ] 59 | -------------------------------------------------------------------------------- /helmet_yolov5/models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, C3, [1024, False]], # 9 23 | ] 24 | 25 | # YOLOv5 head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, C3, [1024, False]], # 9 23 | ] 24 | 25 | # YOLOv5 head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 2 # number of classes 3 | depth_multiple: 0.33 # model depth multiple 4 | width_multiple: 0.50 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, C3, [1024, False]], # 9 23 | ] 24 | 25 | # YOLOv5 head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # Parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | anchors: 6 | - [10,13, 16,30, 33,23] # P3/8 7 | - [30,61, 62,45, 59,119] # P4/16 8 | - [116,90, 156,198, 373,326] # P5/32 9 | 10 | # YOLOv5 backbone 11 | backbone: 12 | # [from, number, module, args] 13 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 9, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 1, SPP, [1024, [5, 9, 13]]], 22 | [-1, 3, C3, [1024, False]], # 9 23 | ] 24 | 25 | # YOLOv5 head 26 | head: 27 | [[-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [-1, 1, Conv, [512, 3, 2]], 42 | [[-1, 10], 1, Concat, [1]], # cat head P5 43 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 44 | 45 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 46 | ] 47 | -------------------------------------------------------------------------------- /helmet_yolov5/requirements.txt: -------------------------------------------------------------------------------- 1 | # pip install -r requirements.txt 2 | 3 | # base ---------------------------------------- 4 | matplotlib>=3.2.2 5 | numpy>=1.18.5 6 | opencv-python>=4.1.2 7 | Pillow 8 | PyYAML>=5.3.1 9 | scipy>=1.4.1 10 | torch>=1.7.0 11 | torchvision>=0.8.1 12 | tqdm>=4.41.0 13 | 14 | # logging ------------------------------------- 15 | tensorboard>=2.4.1 16 | # wandb 17 | 18 | # plotting ------------------------------------ 19 | seaborn>=0.11.0 20 | pandas 21 | 22 | # export -------------------------------------- 23 | # coremltools>=4.1 24 | # onnx>=1.9.0 25 | # scikit-learn==0.19.2 # for coreml quantization 26 | 27 | # extras -------------------------------------- 28 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 29 | # pycocotools>=2.0 # COCO mAP 30 | # albumentations>=1.0.3 31 | thop # FLOPs computation 32 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_yolov5/utils/__init__.py -------------------------------------------------------------------------------- /helmet_yolov5/utils/activations.py: -------------------------------------------------------------------------------- 1 | # Activation functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- 9 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 10 | @staticmethod 11 | def forward(x): 12 | return x * torch.sigmoid(x) 13 | 14 | 15 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() 16 | @staticmethod 17 | def forward(x): 18 | # return x * F.hardsigmoid(x) # for torchscript and CoreML 19 | return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX 20 | 21 | 22 | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- 23 | class Mish(nn.Module): 24 | @staticmethod 25 | def forward(x): 26 | return x * F.softplus(x).tanh() 27 | 28 | 29 | class MemoryEfficientMish(nn.Module): 30 | class F(torch.autograd.Function): 31 | @staticmethod 32 | def forward(ctx, x): 33 | ctx.save_for_backward(x) 34 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 35 | 36 | @staticmethod 37 | def backward(ctx, grad_output): 38 | x = ctx.saved_tensors[0] 39 | sx = torch.sigmoid(x) 40 | fx = F.softplus(x).tanh() 41 | return grad_output * (fx + x * sx * (1 - fx * fx)) 42 | 43 | def forward(self, x): 44 | return self.F.apply(x) 45 | 46 | 47 | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- 48 | class FReLU(nn.Module): 49 | def __init__(self, c1, k=3): # ch_in, kernel 50 | super().__init__() 51 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 52 | self.bn = nn.BatchNorm2d(c1) 53 | 54 | def forward(self, x): 55 | return torch.max(x, self.bn(self.conv(x))) 56 | 57 | 58 | # ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- 59 | class AconC(nn.Module): 60 | r""" ACON activation (activate or not). 61 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter 62 | according to "Activate or Not: Learning Customized Activation" . 63 | """ 64 | 65 | def __init__(self, c1): 66 | super().__init__() 67 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 68 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 69 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) 70 | 71 | def forward(self, x): 72 | dpx = (self.p1 - self.p2) * x 73 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x 74 | 75 | 76 | class MetaAconC(nn.Module): 77 | r""" ACON activation (activate or not). 78 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network 79 | according to "Activate or Not: Learning Customized Activation" . 80 | """ 81 | 82 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r 83 | super().__init__() 84 | c2 = max(r, c1 // r) 85 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 86 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 87 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) 88 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) 89 | # self.bn1 = nn.BatchNorm2d(c2) 90 | # self.bn2 = nn.BatchNorm2d(c1) 91 | 92 | def forward(self, x): 93 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) 94 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 95 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable 96 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed 97 | dpx = (self.p1 - self.p2) * x 98 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x 99 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/helmet_yolov5/utils/aws/__init__.py -------------------------------------------------------------------------------- /helmet_yolov5/utils/aws/mime.sh: -------------------------------------------------------------------------------- 1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ 2 | # This script will run on every instance restart, not only on first start 3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- 4 | 5 | Content-Type: multipart/mixed; boundary="//" 6 | MIME-Version: 1.0 7 | 8 | --// 9 | Content-Type: text/cloud-config; charset="us-ascii" 10 | MIME-Version: 1.0 11 | Content-Transfer-Encoding: 7bit 12 | Content-Disposition: attachment; filename="cloud-config.txt" 13 | 14 | #cloud-config 15 | cloud_final_modules: 16 | - [scripts-user, always] 17 | 18 | --// 19 | Content-Type: text/x-shellscript; charset="us-ascii" 20 | MIME-Version: 1.0 21 | Content-Transfer-Encoding: 7bit 22 | Content-Disposition: attachment; filename="userdata.txt" 23 | 24 | #!/bin/bash 25 | # --- paste contents of userdata.sh here --- 26 | --// 27 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/aws/resume.py: -------------------------------------------------------------------------------- 1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings 2 | # Usage: $ python utils/aws/resume.py 3 | 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import torch 9 | import yaml 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | 13 | port = 0 # --master_port 14 | path = Path('').resolve() 15 | for last in path.rglob('*/**/last.pt'): 16 | ckpt = torch.load(last) 17 | if ckpt['optimizer'] is None: 18 | continue 19 | 20 | # Load opt.yaml 21 | with open(last.parent.parent / 'opt.yaml') as f: 22 | opt = yaml.safe_load(f) 23 | 24 | # Get device count 25 | d = opt['device'].split(',') # devices 26 | nd = len(d) # number of devices 27 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel 28 | 29 | if ddp: # multi-GPU 30 | port += 1 31 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' 32 | else: # single-GPU 33 | cmd = f'python train.py --resume {last}' 34 | 35 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread 36 | print(cmd) 37 | os.system(cmd) 38 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/aws/userdata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html 3 | # This script will run only once on first instance start (for a re-start script see mime.sh) 4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir 5 | # Use >300 GB SSD 6 | 7 | cd home/ubuntu 8 | if [ ! -d yolov5 ]; then 9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker 10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 11 | cd yolov5 12 | bash data/scripts/get_coco.sh && echo "COCO done." & 13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & 14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & 15 | wait && echo "All tasks done." # finish background tasks 16 | else 17 | echo "Running re-start script." # resume interrupted runs 18 | i=0 19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' 20 | while IFS= read -r id; do 21 | ((i++)) 22 | echo "restarting container $i: $id" 23 | sudo docker start $id 24 | # sudo docker exec -it $id python train.py --resume # single-GPU 25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario 26 | done <<<"$list" 27 | fi 28 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/callbacks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | class Callbacks: 4 | """" 5 | Handles all registered callbacks for YOLOv5 Hooks 6 | """ 7 | 8 | _callbacks = { 9 | 'on_pretrain_routine_start': [], 10 | 'on_pretrain_routine_end': [], 11 | 12 | 'on_train_start': [], 13 | 'on_train_epoch_start': [], 14 | 'on_train_batch_start': [], 15 | 'optimizer_step': [], 16 | 'on_before_zero_grad': [], 17 | 'on_train_batch_end': [], 18 | 'on_train_epoch_end': [], 19 | 20 | 'on_val_start': [], 21 | 'on_val_batch_start': [], 22 | 'on_val_image_end': [], 23 | 'on_val_batch_end': [], 24 | 'on_val_end': [], 25 | 26 | 'on_fit_epoch_end': [], # fit = train + val 27 | 'on_model_save': [], 28 | 'on_train_end': [], 29 | 30 | 'teardown': [], 31 | } 32 | 33 | def __init__(self): 34 | return 35 | 36 | def register_action(self, hook, name='', callback=None): 37 | """ 38 | Register a new action to a callback hook 39 | 40 | Args: 41 | hook The callback hook name to register the action to 42 | name The name of the action 43 | callback The callback to fire 44 | """ 45 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 46 | assert callable(callback), f"callback '{callback}' is not callable" 47 | self._callbacks[hook].append({'name': name, 'callback': callback}) 48 | 49 | def get_registered_actions(self, hook=None): 50 | """" 51 | Returns all the registered actions by callback hook 52 | 53 | Args: 54 | hook The name of the hook to check, defaults to all 55 | """ 56 | if hook: 57 | return self._callbacks[hook] 58 | else: 59 | return self._callbacks 60 | 61 | def run_callbacks(self, hook, *args, **kwargs): 62 | """ 63 | Loop through the registered actions and fire all callbacks 64 | """ 65 | for logger in self._callbacks[hook]: 66 | # print(f"Running callbacks.{logger['callback'].__name__}()") 67 | logger['callback'](*args, **kwargs) 68 | 69 | def on_pretrain_routine_start(self, *args, **kwargs): 70 | """ 71 | Fires all registered callbacks at the start of each pretraining routine 72 | """ 73 | self.run_callbacks('on_pretrain_routine_start', *args, **kwargs) 74 | 75 | def on_pretrain_routine_end(self, *args, **kwargs): 76 | """ 77 | Fires all registered callbacks at the end of each pretraining routine 78 | """ 79 | self.run_callbacks('on_pretrain_routine_end', *args, **kwargs) 80 | 81 | def on_train_start(self, *args, **kwargs): 82 | """ 83 | Fires all registered callbacks at the start of each training 84 | """ 85 | self.run_callbacks('on_train_start', *args, **kwargs) 86 | 87 | def on_train_epoch_start(self, *args, **kwargs): 88 | """ 89 | Fires all registered callbacks at the start of each training epoch 90 | """ 91 | self.run_callbacks('on_train_epoch_start', *args, **kwargs) 92 | 93 | def on_train_batch_start(self, *args, **kwargs): 94 | """ 95 | Fires all registered callbacks at the start of each training batch 96 | """ 97 | self.run_callbacks('on_train_batch_start', *args, **kwargs) 98 | 99 | def optimizer_step(self, *args, **kwargs): 100 | """ 101 | Fires all registered callbacks on each optimizer step 102 | """ 103 | self.run_callbacks('optimizer_step', *args, **kwargs) 104 | 105 | def on_before_zero_grad(self, *args, **kwargs): 106 | """ 107 | Fires all registered callbacks before zero grad 108 | """ 109 | self.run_callbacks('on_before_zero_grad', *args, **kwargs) 110 | 111 | def on_train_batch_end(self, *args, **kwargs): 112 | """ 113 | Fires all registered callbacks at the end of each training batch 114 | """ 115 | self.run_callbacks('on_train_batch_end', *args, **kwargs) 116 | 117 | def on_train_epoch_end(self, *args, **kwargs): 118 | """ 119 | Fires all registered callbacks at the end of each training epoch 120 | """ 121 | self.run_callbacks('on_train_epoch_end', *args, **kwargs) 122 | 123 | def on_val_start(self, *args, **kwargs): 124 | """ 125 | Fires all registered callbacks at the start of the validation 126 | """ 127 | self.run_callbacks('on_val_start', *args, **kwargs) 128 | 129 | def on_val_batch_start(self, *args, **kwargs): 130 | """ 131 | Fires all registered callbacks at the start of each validation batch 132 | """ 133 | self.run_callbacks('on_val_batch_start', *args, **kwargs) 134 | 135 | def on_val_image_end(self, *args, **kwargs): 136 | """ 137 | Fires all registered callbacks at the end of each val image 138 | """ 139 | self.run_callbacks('on_val_image_end', *args, **kwargs) 140 | 141 | def on_val_batch_end(self, *args, **kwargs): 142 | """ 143 | Fires all registered callbacks at the end of each validation batch 144 | """ 145 | self.run_callbacks('on_val_batch_end', *args, **kwargs) 146 | 147 | def on_val_end(self, *args, **kwargs): 148 | """ 149 | Fires all registered callbacks at the end of the validation 150 | """ 151 | self.run_callbacks('on_val_end', *args, **kwargs) 152 | 153 | def on_fit_epoch_end(self, *args, **kwargs): 154 | """ 155 | Fires all registered callbacks at the end of each fit (train+val) epoch 156 | """ 157 | self.run_callbacks('on_fit_epoch_end', *args, **kwargs) 158 | 159 | def on_model_save(self, *args, **kwargs): 160 | """ 161 | Fires all registered callbacks after each model save 162 | """ 163 | self.run_callbacks('on_model_save', *args, **kwargs) 164 | 165 | def on_train_end(self, *args, **kwargs): 166 | """ 167 | Fires all registered callbacks at the end of training 168 | """ 169 | self.run_callbacks('on_train_end', *args, **kwargs) 170 | 171 | def teardown(self, *args, **kwargs): 172 | """ 173 | Fires all registered callbacks before teardown 174 | """ 175 | self.run_callbacks('teardown', *args, **kwargs) 176 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/downloads.py: -------------------------------------------------------------------------------- 1 | # Download utils 2 | 3 | import os 4 | import platform 5 | import subprocess 6 | import time 7 | import urllib 8 | from pathlib import Path 9 | 10 | import requests 11 | import torch 12 | 13 | 14 | def gsutil_getsize(url=''): 15 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 16 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') 17 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes 18 | 19 | 20 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): 21 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes 22 | file = Path(file) 23 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" 24 | try: # url1 25 | print(f'Downloading {url} to {file}...') 26 | torch.hub.download_url_to_file(url, str(file)) 27 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check 28 | except Exception as e: # url2 29 | file.unlink(missing_ok=True) # remove partial downloads 30 | print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') 31 | os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail 32 | finally: 33 | if not file.exists() or file.stat().st_size < min_bytes: # check 34 | file.unlink(missing_ok=True) # remove partial downloads 35 | print(f"ERROR: {assert_msg}\n{error_msg}") 36 | print('') 37 | 38 | 39 | def attempt_download(file, repo='ultralytics/yolov5'): # from utils.google_utils import *; attempt_download() 40 | # Attempt file download if does not exist 41 | file = Path(str(file).strip().replace("'", '')) 42 | 43 | if not file.exists(): 44 | # URL specified 45 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. 46 | if str(file).startswith(('http:/', 'https:/')): # download 47 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ 48 | name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... 49 | safe_download(file=name, url=url, min_bytes=1E5) 50 | return name 51 | 52 | # GitHub assets 53 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) 54 | try: 55 | response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api 56 | assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] 57 | tag = response['tag_name'] # i.e. 'v1.0' 58 | except: # fallback plan 59 | assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 60 | 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] 61 | try: 62 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] 63 | except: 64 | tag = 'v5.0' # current release 65 | 66 | if name in assets: 67 | safe_download(file, 68 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}', 69 | # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) 70 | min_bytes=1E5, 71 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') 72 | 73 | return str(file) 74 | 75 | 76 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): 77 | # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() 78 | t = time.time() 79 | file = Path(file) 80 | cookie = Path('cookie') # gdrive cookie 81 | print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') 82 | file.unlink(missing_ok=True) # remove existing file 83 | cookie.unlink(missing_ok=True) # remove existing cookie 84 | 85 | # Attempt file download 86 | out = "NUL" if platform.system() == "Windows" else "/dev/null" 87 | os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') 88 | if os.path.exists('cookie'): # large file 89 | s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' 90 | else: # small file 91 | s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' 92 | r = os.system(s) # execute, capture return 93 | cookie.unlink(missing_ok=True) # remove existing cookie 94 | 95 | # Error check 96 | if r != 0: 97 | file.unlink(missing_ok=True) # remove partial 98 | print('Download error ') # raise Exception('Download error') 99 | return r 100 | 101 | # Unzip if archive 102 | if file.suffix == '.zip': 103 | print('unzipping... ', end='') 104 | os.system(f'unzip -q {file}') # unzip 105 | file.unlink() # remove zip to free space 106 | 107 | print(f'Done ({time.time() - t:.1f}s)') 108 | return r 109 | 110 | 111 | def get_token(cookie="./cookie"): 112 | with open(cookie) as f: 113 | for line in f: 114 | if "download" in line: 115 | return line.split()[-1] 116 | return "" 117 | 118 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- 119 | # 120 | # 121 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): 122 | # # Uploads a file to a bucket 123 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python 124 | # 125 | # storage_client = storage.Client() 126 | # bucket = storage_client.get_bucket(bucket_name) 127 | # blob = bucket.blob(destination_blob_name) 128 | # 129 | # blob.upload_from_filename(source_file_name) 130 | # 131 | # print('File {} uploaded to {}.'.format( 132 | # source_file_name, 133 | # destination_blob_name)) 134 | # 135 | # 136 | # def download_blob(bucket_name, source_blob_name, destination_file_name): 137 | # # Uploads a blob from a bucket 138 | # storage_client = storage.Client() 139 | # bucket = storage_client.get_bucket(bucket_name) 140 | # blob = bucket.blob(source_blob_name) 141 | # 142 | # blob.download_to_filename(destination_file_name) 143 | # 144 | # print('Blob {} downloaded to {}.'.format( 145 | # source_blob_name, 146 | # destination_file_name)) 147 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/flask_rest_api/README.md: -------------------------------------------------------------------------------- 1 | # Flask REST API 2 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). 3 | 4 | ## Requirements 5 | 6 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with: 7 | ```shell 8 | $ pip install Flask 9 | ``` 10 | 11 | ## Run 12 | 13 | After Flask installation run: 14 | 15 | ```shell 16 | $ python3 restapi.py --port 5000 17 | ``` 18 | 19 | Then use [curl](https://curl.se/) to perform a request: 20 | 21 | ```shell 22 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' 23 | ``` 24 | 25 | The model inference results are returned as a JSON response: 26 | 27 | ```json 28 | [ 29 | { 30 | "class": 0, 31 | "confidence": 0.8900438547, 32 | "height": 0.9318675399, 33 | "name": "person", 34 | "width": 0.3264600933, 35 | "xcenter": 0.7438579798, 36 | "ycenter": 0.5207948685 37 | }, 38 | { 39 | "class": 0, 40 | "confidence": 0.8440024257, 41 | "height": 0.7155083418, 42 | "name": "person", 43 | "width": 0.6546785235, 44 | "xcenter": 0.427829951, 45 | "ycenter": 0.6334488392 46 | }, 47 | { 48 | "class": 27, 49 | "confidence": 0.3771208823, 50 | "height": 0.3902671337, 51 | "name": "tie", 52 | "width": 0.0696444362, 53 | "xcenter": 0.3675483763, 54 | "ycenter": 0.7991207838 55 | }, 56 | { 57 | "class": 27, 58 | "confidence": 0.3527112305, 59 | "height": 0.1540903747, 60 | "name": "tie", 61 | "width": 0.0336618312, 62 | "xcenter": 0.7814827561, 63 | "ycenter": 0.5065554976 64 | } 65 | ] 66 | ``` 67 | 68 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` 69 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/flask_rest_api/example_request.py: -------------------------------------------------------------------------------- 1 | """Perform test request""" 2 | import pprint 3 | 4 | import requests 5 | 6 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" 7 | TEST_IMAGE = "zidane.jpg" 8 | 9 | image_data = open(TEST_IMAGE, "rb").read() 10 | 11 | response = requests.post(DETECTION_URL, files={"image": image_data}).json() 12 | 13 | pprint.pprint(response) 14 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/flask_rest_api/restapi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Run a rest API exposing the yolov5s object detection model 3 | """ 4 | import argparse 5 | import io 6 | 7 | import torch 8 | from PIL import Image 9 | from flask import Flask, request 10 | 11 | app = Flask(__name__) 12 | 13 | DETECTION_URL = "/v1/object-detection/yolov5s" 14 | 15 | 16 | @app.route(DETECTION_URL, methods=["POST"]) 17 | def predict(): 18 | if not request.method == "POST": 19 | return 20 | 21 | if request.files.get("image"): 22 | image_file = request.files["image"] 23 | image_bytes = image_file.read() 24 | 25 | img = Image.open(io.BytesIO(image_bytes)) 26 | 27 | results = model(img, size=640) # reduce size=320 for faster inference 28 | return results.pandas().xyxy[0].to_json(orient="records") 29 | 30 | 31 | if __name__ == "__main__": 32 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") 33 | parser.add_argument("--port", default=5000, type=int, help="port number") 34 | args = parser.parse_args() 35 | 36 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache 37 | app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat 38 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==19.2 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /helmet_yolov5/utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 -------------------------------------------------------------------------------- /helmet_yolov5/utils/loggers/__init__.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 experiment logging utils 2 | import warnings 3 | from threading import Thread 4 | 5 | import torch 6 | from torch.utils.tensorboard import SummaryWriter 7 | 8 | from utils.general import colorstr, emojis 9 | from utils.loggers.wandb.wandb_utils import WandbLogger 10 | from utils.plots import plot_images, plot_results 11 | from utils.torch_utils import de_parallel 12 | 13 | LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases 14 | 15 | try: 16 | import wandb 17 | 18 | assert hasattr(wandb, '__version__') # verify package import not local dir 19 | except (ImportError, AssertionError): 20 | wandb = None 21 | 22 | 23 | class Loggers(): 24 | # YOLOv5 Loggers class 25 | def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): 26 | self.save_dir = save_dir 27 | self.weights = weights 28 | self.opt = opt 29 | self.hyp = hyp 30 | self.logger = logger # for printing results to console 31 | self.include = include 32 | self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 33 | 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 34 | 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 35 | 'x/lr0', 'x/lr1', 'x/lr2'] # params 36 | for k in LOGGERS: 37 | setattr(self, k, None) # init empty logger dictionary 38 | self.csv = True # always log to csv 39 | 40 | # Message 41 | if not wandb: 42 | prefix = colorstr('Weights & Biases: ') 43 | s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" 44 | print(emojis(s)) 45 | 46 | # TensorBoard 47 | s = self.save_dir 48 | if 'tb' in self.include and not self.opt.evolve: 49 | prefix = colorstr('TensorBoard: ') 50 | self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") 51 | self.tb = SummaryWriter(str(s)) 52 | 53 | # W&B 54 | if wandb and 'wandb' in self.include: 55 | wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') 56 | run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None 57 | self.opt.hyp = self.hyp # add hyperparameters 58 | self.wandb = WandbLogger(self.opt, run_id) 59 | else: 60 | self.wandb = None 61 | 62 | def on_pretrain_routine_end(self): 63 | # Callback runs on pre-train routine end 64 | paths = self.save_dir.glob('*labels*.jpg') # training labels 65 | if self.wandb: 66 | self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) 67 | 68 | def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): 69 | # Callback runs on train batch end 70 | if plots: 71 | if ni == 0: 72 | with warnings.catch_warnings(): 73 | warnings.simplefilter('ignore') # suppress jit trace warning 74 | self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) 75 | if ni < 3: 76 | f = self.save_dir / f'train_batch{ni}.jpg' # filename 77 | Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() 78 | if self.wandb and ni == 10: 79 | files = sorted(self.save_dir.glob('train*.jpg')) 80 | self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) 81 | 82 | def on_train_epoch_end(self, epoch): 83 | # Callback runs on train epoch end 84 | if self.wandb: 85 | self.wandb.current_epoch = epoch + 1 86 | 87 | def on_val_image_end(self, pred, predn, path, names, im): 88 | # Callback runs on val image end 89 | if self.wandb: 90 | self.wandb.val_one_image(pred, predn, path, names, im) 91 | 92 | def on_val_end(self): 93 | # Callback runs on val end 94 | if self.wandb: 95 | files = sorted(self.save_dir.glob('val*.jpg')) 96 | self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) 97 | 98 | def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): 99 | # Callback runs at the end of each fit (train+val) epoch 100 | x = {k: v for k, v in zip(self.keys, vals)} # dict 101 | if self.csv: 102 | file = self.save_dir / 'results.csv' 103 | n = len(x) + 1 # number of cols 104 | s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header 105 | with open(file, 'a') as f: 106 | f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') 107 | 108 | if self.tb: 109 | for k, v in x.items(): 110 | self.tb.add_scalar(k, v, epoch) 111 | 112 | if self.wandb: 113 | self.wandb.log(x) 114 | self.wandb.end_epoch(best_result=best_fitness == fi) 115 | 116 | def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): 117 | # Callback runs on model save event 118 | if self.wandb: 119 | if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: 120 | self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) 121 | 122 | def on_train_end(self, last, best, plots, epoch): 123 | # Callback runs on training end 124 | if plots: 125 | plot_results(file=self.save_dir / 'results.csv') # save results.png 126 | files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] 127 | files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter 128 | 129 | if self.tb: 130 | from PIL import Image 131 | import numpy as np 132 | for f in files: 133 | self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') 134 | 135 | if self.wandb: 136 | self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) 137 | # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model 138 | wandb.log_artifact(str(best if best.exists() else last), type='model', 139 | name='run_' + self.wandb.wandb_run.id + '_model', 140 | aliases=['latest', 'best', 'stripped']) 141 | self.wandb.finish_run() 142 | -------------------------------------------------------------------------------- /result_img/results.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/result_img/results.jpg -------------------------------------------------------------------------------- /result_img/results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/result_img/results.png -------------------------------------------------------------------------------- /result_img/val_batch2_pred.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yurizzzzz/Helmet-Detection-YoloV5/f451003a1dce4de69c2d48be056248091f547cef/result_img/val_batch2_pred.jpg -------------------------------------------------------------------------------- /utils/crawl_webImg.py: -------------------------------------------------------------------------------- 1 | import re 2 | import requests 3 | from urllib import error 4 | from bs4 import BeautifulSoup 5 | import os 6 | 7 | num = 0 8 | numPicture = 0 9 | file = '' 10 | List = [] 11 | 12 | 13 | def Find(url, A): 14 | global List 15 | print('正在检测图片总数,请稍等.....') 16 | t = 0 17 | i = 1 18 | s = 0 19 | while t < 1000: 20 | Url = url + str(t) 21 | try: 22 | Result = A.get(Url, timeout=7, allow_redirects=False) 23 | except BaseException: 24 | t = t + 60 25 | continue 26 | else: 27 | result = Result.text 28 | pic_url = re.findall('"objURL":"(.*?)",', result, re.S) 29 | s += len(pic_url) 30 | if len(pic_url) == 0: 31 | break 32 | else: 33 | List.append(pic_url) 34 | t = t + 60 35 | return s 36 | 37 | 38 | def recommend(url): 39 | Re = [] 40 | try: 41 | html = requests.get(url, allow_redirects=False) 42 | except error.HTTPError as e: 43 | return 44 | else: 45 | html.encoding = 'utf-8' 46 | bsObj = BeautifulSoup(html.text, 'html.parser') 47 | div = bsObj.find('div', id='topRS') 48 | if div is not None: 49 | listA = div.findAll('a') 50 | for i in listA: 51 | if i is not None: 52 | Re.append(i.get_text()) 53 | return Re 54 | 55 | 56 | def dowmloadPicture(html, keyword): 57 | global num 58 | # t =0 59 | pic_url = re.findall('"objURL":"(.*?)",', html, re.S) 60 | print('找到关键词:' + keyword + '的图片,即将开始下载图片...') 61 | for each in pic_url: 62 | print('正在下载第' + str(num + 1) + '张图片,图片地址:' + str(each)) 63 | try: 64 | if each is not None: 65 | pic = requests.get(each, timeout=7) 66 | else: 67 | continue 68 | except BaseException: 69 | print('错误,当前图片无法下载') 70 | continue 71 | else: 72 | string = file + r'\\' + keyword + '_' + str(num) + '.jpg' 73 | fp = open(string, 'wb') 74 | fp.write(pic.content) 75 | fp.close() 76 | num += 1 77 | if num >= numPicture: 78 | return 79 | 80 | 81 | if __name__ == '__main__': 82 | headers = { 83 | 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 84 | 'Connection': 'keep-alive', 85 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0', 86 | 'Upgrade-Insecure-Requests': '1' 87 | } 88 | 89 | A = requests.Session() 90 | A.headers = headers 91 | 92 | word = input("请输入搜索关键词(可以是人名,地名等): ") 93 | # add = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=%E5%BC%A0%E5%A4%A9%E7%88%B1&pn=120' 94 | url = 'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn=' 95 | 96 | tot = Find(url, A) 97 | Recommend = recommend(url) 98 | print('经过检测%s类图片共有%d张' % (word, tot)) 99 | numPicture = int(input('请输入想要下载的图片数量 ')) 100 | file = input('请建立一个存储图片的文件夹,输入文件夹名称即可') 101 | y = os.path.exists(file) 102 | if y == 1: 103 | print('该文件已存在,请重新输入') 104 | file = input('请建立一个存储图片的文件夹,)输入文件夹名称即可') 105 | os.mkdir(file) 106 | else: 107 | os.mkdir(file) 108 | t = 0 109 | tmp = url 110 | while t < numPicture: 111 | try: 112 | url = tmp + str(t) 113 | 114 | result = A.get(url, timeout=10, allow_redirects=False) 115 | except error.HTTPError as e: 116 | print('网络错误,请调整网络后重试') 117 | t = t + 60 118 | else: 119 | dowmloadPicture(result.text, word) 120 | t = t + 60 121 | 122 | print('当前搜索结束,感谢使用') 123 | print('猜你喜欢') 124 | for re in Recommend: 125 | print(re, end=' ') -------------------------------------------------------------------------------- /utils/gen_yolo_format.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2020/7/29 20:29 3 | # @Author : PeterH 4 | # @Email : peterhuang0323@outlook.com 5 | # @File : data_cfg.py 6 | # @Software: PyCharm 7 | # @Brief : 生成测试、验证、训练的图片和标签 8 | 9 | import os 10 | import shutil 11 | from pathlib import Path 12 | from shutil import copyfile 13 | 14 | from PIL import Image, ImageDraw 15 | from xml.dom.minidom import parse 16 | import numpy as np 17 | from tqdm import tqdm 18 | 19 | FILE_ROOT = Path(r"C:\Users\dell\Desktop") 20 | 21 | # 原始数据集 22 | IMAGE_SET_ROOT = FILE_ROOT.joinpath(r"VOC\ImageSets\Main") # 图片区分文件的路径 23 | IMAGE_PATH = FILE_ROOT.joinpath(r"VOC\JPEGImages") # 图片的位置 24 | ANNOTATIONS_PATH = FILE_ROOT.joinpath(r"VOC\Annotations") # 数据集标签文件的位置 25 | LABELS_ROOT = FILE_ROOT.joinpath(r"VOC\Labels") # 进行归一化之后的标签位置 26 | 27 | # YOLO 需要的数据集形式的新数据集 28 | DEST_IMAGES_PATH = Path(r"Safety_Helmet_Train_dataset\score\images") # 区分训练集、测试集、验证集的图片目标路径 29 | DEST_LABELS_PATH = Path(r"Safety_Helmet_Train_dataset\score\labels") # 区分训练集、测试集、验证集的标签文件目标路径 30 | 31 | 32 | def cord_converter(size, box): 33 | """ 34 | 将标注的 xml 文件标注转换为 darknet 形的坐标 35 | :param size: 图片的尺寸: [w,h] 36 | :param box: anchor box 的坐标 [左上角x,左上角y,右下角x,右下角y,] 37 | :return: 转换后的 [x,y,w,h] 38 | """ 39 | 40 | x1 = int(box[0]) 41 | y1 = int(box[1]) 42 | x2 = int(box[2]) 43 | y2 = int(box[3]) 44 | 45 | dw = np.float32(1. / int(size[0])) 46 | dh = np.float32(1. / int(size[1])) 47 | 48 | w = x2 - x1 49 | h = y2 - y1 50 | x = x1 + (w / 2) 51 | y = y1 + (h / 2) 52 | 53 | x = x * dw 54 | w = w * dw 55 | y = y * dh 56 | h = h * dh 57 | return [x, y, w, h] 58 | 59 | 60 | def save_label_file(img_jpg_file_name, size, img_box): 61 | """ 62 | 保存标签的解析文件 63 | :param img_jpg_file_name: 64 | :param size: 65 | :param img_box: 66 | :return: 67 | """ 68 | save_file_name = LABELS_ROOT.joinpath(img_jpg_file_name).with_suffix('.txt') 69 | with open(save_file_name, "a+") as f: 70 | for box in img_box: 71 | if box[0] == 'reflective_clothes': # 数据集 xml 中的 person 指的是头 72 | cls_num = 0 73 | elif box[0] == 'other_clothes': 74 | cls_num = 1 75 | else: 76 | continue 77 | new_box = cord_converter(size, box[1:]) # 转换坐标 78 | f.write(f"{cls_num} {new_box[0]} {new_box[1]} {new_box[2]} {new_box[3]}\n") 79 | 80 | 81 | def test_dataset_box_feature(file_name, point_array): 82 | """ 83 | 使用样本数据测试数据集的建议框 84 | :param file_name: 图片文件名 85 | :param point_array: 全部的点 [建议框sx1,sy1,sx2,sy2] 86 | :return: None 87 | """ 88 | im = Image.open(IMAGE_PATH.joinpath(file_name).with_suffix(".jpg")) 89 | im_draw = ImageDraw.Draw(im) 90 | for box in point_array: 91 | x1 = box[1] 92 | y1 = box[2] 93 | x2 = box[3] 94 | y2 = box[4] 95 | im_draw.rectangle((x1, y1, x2, y2), outline='red') 96 | 97 | im.show() 98 | 99 | 100 | def get_xml_data(img_xml_file: Path): 101 | """ 102 | 获取 xml 数据 103 | :param img_xml_file: 图片路径 104 | :return: 105 | """ 106 | dom = parse(str(img_xml_file)) 107 | xml_root = dom.documentElement 108 | img_name = xml_root.getElementsByTagName("filename")[0].childNodes[0].data 109 | img_size = xml_root.getElementsByTagName("size")[0] 110 | objects = xml_root.getElementsByTagName("object") 111 | img_w = img_size.getElementsByTagName("width")[0].childNodes[0].data 112 | img_h = img_size.getElementsByTagName("height")[0].childNodes[0].data 113 | img_c = img_size.getElementsByTagName("depth")[0].childNodes[0].data 114 | img_box = [] 115 | for box in objects: 116 | cls_name = box.getElementsByTagName("name")[0].childNodes[0].data 117 | x1 = int(box.getElementsByTagName("xmin")[0].childNodes[0].data) 118 | y1 = int(box.getElementsByTagName("ymin")[0].childNodes[0].data) 119 | x2 = int(box.getElementsByTagName("xmax")[0].childNodes[0].data) 120 | y2 = int(box.getElementsByTagName("ymax")[0].childNodes[0].data) 121 | img_box.append([cls_name, x1, y1, x2, y2]) 122 | 123 | # test_dataset_box_feature(img_xml_file.name, img_box) 124 | save_label_file(img_xml_file.name, [img_w, img_h], img_box) 125 | 126 | 127 | def copy_data(img_set_source, img_labels_root, imgs_source, dataset_type): 128 | """ 129 | 将标签文件和图片复制到最终数据集文件夹中 130 | :param img_set_source: 原数据集图片总路径 131 | :param img_labels_root: 生成的 txt 标签总路径 132 | :param imgs_source: 133 | :param dataset_type: 生成数据集的种类 134 | :return: 135 | """ 136 | file_name = img_set_source.joinpath(dataset_type).with_suffix(".txt") # 获取对应数据集种类的图片 137 | 138 | # 判断目标图片文件夹和标签文件夹是否存在,不存在则创建 139 | os.makedirs(FILE_ROOT.joinpath(DEST_IMAGES_PATH, dataset_type), exist_ok=True) 140 | os.makedirs(FILE_ROOT.joinpath(DEST_LABELS_PATH, dataset_type), exist_ok=True) 141 | 142 | with open(file_name, encoding="UTF-8") as f: 143 | for img_name in tqdm(f.read().splitlines()): 144 | img_sor_file = imgs_source.joinpath(img_name).with_suffix('.jpg') 145 | label_sor_file = img_labels_root.joinpath(img_name).with_suffix('.txt') 146 | 147 | # 复制图片 148 | dict_file = FILE_ROOT.joinpath(DEST_IMAGES_PATH, dataset_type, img_name).with_suffix('.jpg') 149 | copyfile(img_sor_file, dict_file) 150 | 151 | # 复制 label 152 | dict_file = FILE_ROOT.joinpath(DEST_LABELS_PATH, dataset_type, img_name).with_suffix('.txt') 153 | copyfile(label_sor_file, dict_file) 154 | 155 | 156 | if __name__ == '__main__': 157 | root = ANNOTATIONS_PATH # 数据集 xml 标签的位置 158 | 159 | if LABELS_ROOT.exists(): 160 | # 清空标签文件夹 161 | print("Cleaning Label dir for safety generating label, pls wait...") 162 | shutil.rmtree(LABELS_ROOT) 163 | print("Cleaning Label dir done!") 164 | LABELS_ROOT.mkdir(exist_ok=True) # 建立 Label 文件夹 165 | 166 | # 生成标签 167 | print("Generating Label files...") 168 | with tqdm(total=len(os.listdir(root))) as p_bar: 169 | for file in root.iterdir(): 170 | p_bar.update(1) 171 | get_xml_data(file) 172 | 173 | # 将文件进行 train、val、test 的区分 174 | for dataset_input_type in ["train", "val", "test"]: 175 | print(f"Copying data {dataset_input_type}, pls wait...") 176 | copy_data(IMAGE_SET_ROOT, LABELS_ROOT, IMAGE_PATH, dataset_input_type) 177 | -------------------------------------------------------------------------------- /utils/generate_txt.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | 4 | trainval_percent = 1.0 5 | train_percent = 0.8 6 | xmlfilepath = 'C:\\Users\\dell\\Desktop\\VOC\\Annotations' 7 | txtsavepath = 'C:\\Users\\dell\\Desktop\\VOC\\ImageSets\\Main' 8 | total_xml = os.listdir(xmlfilepath) 9 | 10 | num = len(total_xml) 11 | list = range(num) 12 | tv = int(num * trainval_percent) 13 | tr = int(tv * train_percent) 14 | trainval = random.sample(list, tv) 15 | train = random.sample(trainval, tr) 16 | 17 | ftrainval = open('C:\\Users\\dell\\Desktop\\VOC\\ImageSets\\Main\\trainval.txt', 'w') 18 | ftest = open('C:\\Users\\dell\\Desktop\\VOC\\ImageSets\\Main\\test.txt', 'w') 19 | ftrain = open('C:\\Users\\dell\\Desktop\\VOC\\ImageSets\\Main\\train.txt', 'w') 20 | fval = open('C:\\Users\\dell\\Desktop\\VOC\\ImageSets\\Main\\val.txt', 'w') 21 | 22 | for i in list: 23 | name = total_xml[i][:-4] + '\n' 24 | if i in trainval: 25 | ftrainval.write(name) 26 | if i in train: 27 | ftrain.write(name) 28 | else: 29 | fval.write(name) 30 | else: 31 | ftest.write(name) 32 | 33 | ftrainval.close() 34 | ftrain.close() 35 | fval.close() 36 | ftest.close() -------------------------------------------------------------------------------- /utils/remove_empty.py: -------------------------------------------------------------------------------- 1 | import os 2 | from tqdm import tqdm 3 | 4 | for i in range(1083): 5 | filename = '{:0>4s}'.format(str(i)) 6 | size = os.path.getsize('C:/Users/dell/Desktop/VOC2021/labels/' + filename + 7 | '.txt') 8 | if size == 0: 9 | os.remove('C:/Users/dell/Desktop/VOC2021/labels/' + filename + '.txt') 10 | os.remove('C:/Users/dell/Desktop/VOC2021/JPEGImages/' + filename + '.jpg') 11 | os.remove('C:/Users/dell/Desktop/VOC2021/Annotations/' + filename + '.xml') 12 | -------------------------------------------------------------------------------- /utils/rename.py: -------------------------------------------------------------------------------- 1 | import os 2 | from tqdm import tqdm 3 | 4 | train_list = [ 5 | file 6 | for file in os.listdir('C:/Users/dell/Desktop/datasets/val/Annotations') 7 | if file.endswith('.xml') 8 | ] 9 | train_list = tqdm(train_list) 10 | j = 0 11 | 12 | for i in train_list: 13 | os.rename( 14 | 'C:/Users/dell/Desktop/datasets/val/Annotations/' + i, 15 | 'C:/Users/dell/Desktop/datasets/val/Annotations/' + 16 | '{:0>4s}'.format(str(j)) + '.xml') 17 | j += 1 18 | 19 | # for i in range(100): 20 | # s = str(i + 1) 21 | # s = s.zfill(6) 22 | # os.rename('C:/Users/dell/Desktop/HR/DIV2K_' + s + '.png', 'C:/Users/dell/Desktop/HR/' + str(i+1) + '.jpg') 23 | --------------------------------------------------------------------------------