├── demo
├── TensorRT
│ ├── cpp
│ │ ├── .idea
│ │ │ ├── .name
│ │ │ ├── cpp.iml
│ │ │ ├── misc.xml
│ │ │ ├── .gitignore
│ │ │ └── modules.xml
│ │ ├── cmake-build-debug
│ │ │ ├── CMakeFiles
│ │ │ │ ├── progress.marks
│ │ │ │ ├── yolox.dir
│ │ │ │ │ ├── objects1.rsp
│ │ │ │ │ ├── progress.make
│ │ │ │ │ ├── objects.a
│ │ │ │ │ ├── yolox.cpp.obj
│ │ │ │ │ ├── CXX.includecache
│ │ │ │ │ ├── includes_CXX.rsp
│ │ │ │ │ ├── cmake_clean.cmake
│ │ │ │ │ ├── flags.make
│ │ │ │ │ ├── link.txt
│ │ │ │ │ ├── DependInfo.cmake
│ │ │ │ │ ├── linklibs.rsp
│ │ │ │ │ └── build.make
│ │ │ │ ├── clion-environment.txt
│ │ │ │ ├── cmake.check_cache
│ │ │ │ ├── 3.20.2
│ │ │ │ │ ├── CompilerIdC
│ │ │ │ │ │ └── a.exe
│ │ │ │ │ ├── CompilerIdCXX
│ │ │ │ │ │ └── a.exe
│ │ │ │ │ ├── CMakeDetermineCompilerABI_C.bin
│ │ │ │ │ ├── CMakeDetermineCompilerABI_CXX.bin
│ │ │ │ │ ├── CMakeRCCompiler.cmake
│ │ │ │ │ ├── CMakeSystem.cmake
│ │ │ │ │ └── CMakeCCompiler.cmake
│ │ │ │ ├── TargetDirectories.txt
│ │ │ │ ├── CMakeDirectoryInformation.cmake
│ │ │ │ ├── clion-log.txt
│ │ │ │ ├── Makefile.cmake
│ │ │ │ └── Makefile2
│ │ │ ├── yolox.exe
│ │ │ ├── Testing
│ │ │ │ └── Temporary
│ │ │ │ │ └── LastTest.log
│ │ │ ├── cmake_install.cmake
│ │ │ └── Makefile
│ │ ├── README.md
│ │ └── CMakeLists.txt
│ └── python
│ │ └── README.md
├── ncnn
│ ├── android
│ │ ├── settings.gradle
│ │ ├── gradle
│ │ │ └── wrapper
│ │ │ │ ├── gradle-wrapper.jar
│ │ │ │ └── gradle-wrapper.properties
│ │ ├── app
│ │ │ ├── src
│ │ │ │ └── main
│ │ │ │ │ ├── res
│ │ │ │ │ ├── values
│ │ │ │ │ │ └── strings.xml
│ │ │ │ │ └── layout
│ │ │ │ │ │ └── main.xml
│ │ │ │ │ ├── jni
│ │ │ │ │ └── CMakeLists.txt
│ │ │ │ │ ├── AndroidManifest.xml
│ │ │ │ │ └── java
│ │ │ │ │ └── com
│ │ │ │ │ └── megvii
│ │ │ │ │ └── yoloXncnn
│ │ │ │ │ └── yoloXncnn.java
│ │ │ └── build.gradle
│ │ ├── build.gradle
│ │ ├── README.md
│ │ ├── gradlew.bat
│ │ └── gradlew
│ └── cpp
│ │ └── README.md
├── OpenVINO
│ ├── README.md
│ ├── cpp
│ │ ├── CMakeLists.txt
│ │ └── README.md
│ └── python
│ │ └── README.md
├── MegEngine
│ ├── python
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── yolox.py
│ │ │ ├── yolo_fpn.py
│ │ │ └── yolo_pafpn.py
│ │ ├── README.md
│ │ ├── dump.py
│ │ ├── coco_classes.py
│ │ ├── build.py
│ │ ├── convert_weights.py
│ │ ├── process.py
│ │ └── visualize.py
│ └── cpp
│ │ └── build.sh
└── ONNXRuntime
│ ├── onnx_inference.py
│ └── README.md
├── .gitattributes
├── assets
├── demo.png
├── dog.jpg
├── logo.png
└── git_fig.png
├── yolox
├── __pycache__
│ └── __init__.cpython-38.pyc
├── exp
│ ├── __pycache__
│ │ ├── build.cpython-38.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── base_exp.cpython-38.pyc
│ │ └── yolox_base.cpython-38.pyc
│ ├── __init__.py
│ ├── build.py
│ └── base_exp.py
├── utils
│ ├── __pycache__
│ │ ├── dist.cpython-38.pyc
│ │ ├── ema.cpython-38.pyc
│ │ ├── boxes.cpython-38.pyc
│ │ ├── logger.cpython-38.pyc
│ │ ├── metric.cpython-38.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── setup_env.cpython-38.pyc
│ │ ├── visualize.cpython-38.pyc
│ │ ├── checkpoint.cpython-38.pyc
│ │ ├── demo_utils.cpython-38.pyc
│ │ ├── lr_scheduler.cpython-38.pyc
│ │ ├── model_utils.cpython-38.pyc
│ │ └── allreduce_norm.cpython-38.pyc
│ ├── __init__.py
│ ├── checkpoint.py
│ ├── setup_env.py
│ ├── ema.py
│ ├── logger.py
│ ├── demo_utils.py
│ ├── allreduce_norm.py
│ ├── metric.py
│ ├── model_utils.py
│ ├── visualize.py
│ └── boxes.py
├── data
│ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ ├── samplers.cpython-38.pyc
│ │ ├── data_augment.cpython-38.pyc
│ │ ├── dataloading.cpython-38.pyc
│ │ └── data_prefetcher.cpython-38.pyc
│ ├── datasets
│ │ ├── __pycache__
│ │ │ ├── coco.cpython-38.pyc
│ │ │ ├── voc.cpython-38.pyc
│ │ │ ├── __init__.cpython-38.pyc
│ │ │ ├── my_classes.cpython-38.pyc
│ │ │ ├── voc_classes.cpython-38.pyc
│ │ │ ├── coco_classes.cpython-38.pyc
│ │ │ ├── datasets_wrapper.cpython-38.pyc
│ │ │ └── mosaicdetection.cpython-38.pyc
│ │ ├── __init__.py
│ │ ├── my_classes.py
│ │ ├── voc_classes.py
│ │ ├── coco_classes.py
│ │ └── datasets_wrapper.py
│ ├── __init__.py
│ ├── data_prefetcher.py
│ └── samplers.py
├── models
│ ├── __pycache__
│ │ ├── losses.cpython-38.pyc
│ │ ├── yolox.cpython-38.pyc
│ │ ├── __init__.cpython-38.pyc
│ │ ├── darknet.cpython-38.pyc
│ │ ├── yolo_fpn.cpython-38.pyc
│ │ ├── yolo_head.cpython-38.pyc
│ │ ├── yolo_pafpn.cpython-38.pyc
│ │ └── network_blocks.cpython-38.pyc
│ ├── __init__.py
│ ├── yolox.py
│ ├── losses.py
│ ├── yolo_fpn.py
│ └── yolo_pafpn.py
├── __init__.py
├── evaluators
│ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ ├── voc_eval.cpython-38.pyc
│ │ ├── coco_evaluator.cpython-38.pyc
│ │ └── voc_evaluator.cpython-38.pyc
│ └── __init__.py
├── core
│ ├── __init__.py
│ └── launch.py
└── layers
│ ├── __init__.py
│ └── csrc
│ ├── vision.cpp
│ └── cocoeval
│ └── cocoeval.h
├── exps
├── default
│ ├── __pycache__
│ │ ├── yolox_l.cpython-38.pyc
│ │ └── yolox_m.cpython-38.pyc
│ ├── yolox_l.py
│ ├── yolox_m.py
│ ├── yolox_s.py
│ ├── yolox_x.py
│ ├── yolox_tiny.py
│ ├── nano.py
│ └── yolov3.py
└── example
│ └── yolox_voc
│ └── yolox_voc_s.py
├── .idea
├── misc.xml
├── .gitignore
├── inspectionProfiles
│ └── profiles_settings.xml
├── modules.xml
└── YOLOX-main.iml
├── requirements.txt
├── setup.cfg
├── generate_data.py
├── datasets
└── README.md
├── README.md
├── setup.py
├── tools
├── trt.py
└── export_onnx.py
└── train.py
/demo/TensorRT/cpp/.idea/.name:
--------------------------------------------------------------------------------
1 | yolox_cao
--------------------------------------------------------------------------------
/demo/ncnn/android/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/progress.marks:
--------------------------------------------------------------------------------
1 | 2
2 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/assets/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/assets/demo.png
--------------------------------------------------------------------------------
/assets/dog.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/assets/dog.jpg
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/assets/logo.png
--------------------------------------------------------------------------------
/demo/OpenVINO/README.md:
--------------------------------------------------------------------------------
1 | ## YOLOX for OpenVINO
2 |
3 | * [C++ Demo](./cpp)
4 | * [Python Demo](./python)
--------------------------------------------------------------------------------
/assets/git_fig.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/assets/git_fig.png
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/objects1.rsp:
--------------------------------------------------------------------------------
1 | CMakeFiles/yolox.dir/yolox.cpp.obj
2 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/progress.make:
--------------------------------------------------------------------------------
1 | CMAKE_PROGRESS_1 = 1
2 | CMAKE_PROGRESS_2 = 2
3 |
4 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/.idea/cpp.iml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/clion-environment.txt:
--------------------------------------------------------------------------------
1 | ToolSet: w64 6.0 (local)@C:\Program Files\mingw64
2 | Options:
3 |
4 | Options:
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/cmake.check_cache:
--------------------------------------------------------------------------------
1 | # This file is generated by cmake for dependency checking of the CMakeCache.txt file
2 |
--------------------------------------------------------------------------------
/yolox/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/exp/__pycache__/build.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/exp/__pycache__/build.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/dist.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/dist.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/ema.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/ema.cpython-38.pyc
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/yolox.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/yolox.exe
--------------------------------------------------------------------------------
/yolox/data/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/__pycache__/samplers.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/__pycache__/samplers.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/exp/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/exp/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/exp/__pycache__/base_exp.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/exp/__pycache__/base_exp.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/losses.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/losses.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/yolox.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/yolox.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/boxes.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/boxes.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/logger.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/logger.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/metric.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/metric.cpython-38.pyc
--------------------------------------------------------------------------------
/exps/default/__pycache__/yolox_l.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/exps/default/__pycache__/yolox_l.cpython-38.pyc
--------------------------------------------------------------------------------
/exps/default/__pycache__/yolox_m.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/exps/default/__pycache__/yolox_m.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/exp/__pycache__/yolox_base.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/exp/__pycache__/yolox_base.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/darknet.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/darknet.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/yolo_fpn.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/yolo_fpn.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/setup_env.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/setup_env.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/visualize.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/visualize.cpython-38.pyc
--------------------------------------------------------------------------------
/demo/ncnn/android/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/ncnn/android/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/yolox/data/__pycache__/data_augment.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/__pycache__/data_augment.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/__pycache__/dataloading.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/__pycache__/dataloading.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/coco.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/coco.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/voc.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/voc.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/yolo_head.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/yolo_head.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/yolo_pafpn.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/yolo_pafpn.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/checkpoint.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/checkpoint.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/demo_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/demo_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/lr_scheduler.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/lr_scheduler.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/model_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/model_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 |
4 | from .utils import configure_module
5 |
6 | configure_module()
7 |
8 | __version__ = "0.1.0"
9 |
--------------------------------------------------------------------------------
/yolox/data/__pycache__/data_prefetcher.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/__pycache__/data_prefetcher.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/evaluators/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/evaluators/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/evaluators/__pycache__/voc_eval.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/evaluators/__pycache__/voc_eval.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/utils/__pycache__/allreduce_norm.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/utils/__pycache__/allreduce_norm.cpython-38.pyc
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/demo/ncnn/android/app/src/main/res/values/strings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | yoloXncnn
4 |
5 |
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/models/__pycache__/network_blocks.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/models/__pycache__/network_blocks.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/my_classes.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/my_classes.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/voc_classes.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/voc_classes.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/evaluators/__pycache__/coco_evaluator.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/evaluators/__pycache__/coco_evaluator.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/evaluators/__pycache__/voc_evaluator.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/evaluators/__pycache__/voc_evaluator.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/coco_classes.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/coco_classes.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/core/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | from .launch import launch
6 | from .trainer import Trainer
7 |
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/datasets_wrapper.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/datasets_wrapper.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/data/datasets/__pycache__/mosaicdetection.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/yolox/data/datasets/__pycache__/mosaicdetection.cpython-38.pyc
--------------------------------------------------------------------------------
/yolox/layers/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | from .fast_coco_eval_api import COCOeval_opt
6 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/objects.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/objects.a
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/Testing/Temporary/LastTest.log:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/Testing/Temporary/LastTest.log
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CompilerIdC/a.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CompilerIdC/a.exe
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/yolox.cpp.obj:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/yolox.cpp.obj
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CompilerIdCXX/a.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CompilerIdCXX/a.exe
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/yolox/evaluators/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | from .coco_evaluator import COCOEvaluator
6 | from .voc_evaluator import VOCEvaluator
7 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeDetermineCompilerABI_C.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeDetermineCompilerABI_C.bin
--------------------------------------------------------------------------------
/yolox/exp/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | from .base_exp import BaseExp
6 | from .build import get_exp
7 | from .yolox_base import Exp
8 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeDetermineCompilerABI_CXX.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sunanlin13174/YOLOX-train-your-data/HEAD/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeDetermineCompilerABI_CXX.bin
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/CXX.includecache:
--------------------------------------------------------------------------------
1 | #IncludeRegexLine: ^[ ]*[#%][ ]*(include|import)[ ]*[<"]([^">]+)([">])
2 |
3 | #IncludeRegexScan: ^.*$
4 |
5 | #IncludeRegexComplain: ^$
6 |
7 | #IncludeRegexTransform:
8 |
9 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/TargetDirectories.txt:
--------------------------------------------------------------------------------
1 | E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/edit_cache.dir
2 | E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir
3 | E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/rebuild_cache.dir
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/demo/ncnn/android/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Sun Aug 25 10:34:48 CST 2019
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip
7 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # TODO: Update with exact module version
2 | numpy
3 | torch>=1.7
4 | opencv_python
5 | loguru
6 | scikit-image
7 | tqdm
8 | torchvision
9 | Pillow
10 | thop
11 | ninja
12 | tabulate
13 | tensorboard
14 |
15 | # verified versions
16 | onnx==1.8.1
17 | onnxruntime==1.8.0
18 | onnx-simplifier==0.3.5
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/includes_CXX.rsp:
--------------------------------------------------------------------------------
1 | -IE:/YOLOX-main/demo/TensorRT/cpp/include -IC:/PROGRA~1/NVIDIA~2/CUDA/v11.1/include -IC:/PROGRA~1/NVIDIA~2/CUDA/v11.1/bin -ID:/TensorRT-7.2.3.4/include -isystem D:/opencv/build/x64/MinGW/install/include -isystem D:/opencv/build/x64/MinGW/install/include/opencv
2 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeRCCompiler.cmake:
--------------------------------------------------------------------------------
1 | set(CMAKE_RC_COMPILER "C:/Program Files/mingw64/bin/windres.exe")
2 | set(CMAKE_RC_COMPILER_ARG1 "")
3 | set(CMAKE_RC_COMPILER_LOADED 1)
4 | set(CMAKE_RC_SOURCE_FILE_EXTENSIONS rc;RC)
5 | set(CMAKE_RC_OUTPUT_EXTENSION .obj)
6 | set(CMAKE_RC_COMPILER_ENV_VAR "RC")
7 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/models/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | from .darknet import CSPDarknet, Darknet
6 | from .yolo_fpn import YOLOFPN
7 | from .yolo_head import YOLOXHead
8 | from .yolo_pafpn import YOLOPAFPN
9 | from .yolox import YOLOX
10 |
--------------------------------------------------------------------------------
/yolox/data/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | from .coco import COCODataset
6 | from .datasets_wrapper import ConcatDataset, Dataset, MixConcatDataset
7 | from .mosaicdetection import MosaicDetection
8 | from .voc import VOCDetection
9 | from .my_classes import MY_CLASSES
--------------------------------------------------------------------------------
/yolox/models/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | from .darknet import CSPDarknet, Darknet
6 | from .losses import IOUloss
7 | from .yolo_fpn import YOLOFPN
8 | from .yolo_head import YOLOXHead
9 | from .yolo_pafpn import YOLOPAFPN
10 | from .yolox import YOLOX
11 |
--------------------------------------------------------------------------------
/yolox/data/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | from .data_augment import TrainTransform, ValTransform
6 | from .data_prefetcher import DataPrefetcher
7 | from .dataloading import DataLoader, get_yolox_datadir
8 | from .datasets import *
9 | from .samplers import InfiniteSampler, YoloBatchSampler
10 |
--------------------------------------------------------------------------------
/demo/ncnn/android/app/src/main/jni/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | project(yoloXncnn)
2 |
3 | cmake_minimum_required(VERSION 3.4.1)
4 |
5 | set(ncnn_DIR ${CMAKE_SOURCE_DIR}/ncnn-20210525-android-vulkan/${ANDROID_ABI}/lib/cmake/ncnn)
6 | find_package(ncnn REQUIRED)
7 |
8 | add_library(yoloXncnn SHARED yoloXncnn_jni.cpp)
9 |
10 | target_link_libraries(yoloXncnn
11 | ncnn
12 |
13 | jnigraphics
14 | )
15 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/cmake_clean.cmake:
--------------------------------------------------------------------------------
1 | file(REMOVE_RECURSE
2 | "CMakeFiles/yolox.dir/yolox.cpp.obj"
3 | "libyolox.dll.a"
4 | "yolox.exe"
5 | "yolox.exe.manifest"
6 | "yolox.pdb"
7 | )
8 |
9 | # Per-language clean rules from dependency scanning.
10 | foreach(lang CXX)
11 | include(CMakeFiles/yolox.dir/cmake_clean_${lang}.cmake OPTIONAL)
12 | endforeach()
13 |
--------------------------------------------------------------------------------
/demo/ncnn/android/build.gradle:
--------------------------------------------------------------------------------
1 | // Top-level build file where you can add configuration options common to all sub-projects/modules.
2 | buildscript {
3 | repositories {
4 | jcenter()
5 | google()
6 | }
7 | dependencies {
8 | classpath 'com.android.tools.build:gradle:3.5.0'
9 | }
10 | }
11 |
12 | allprojects {
13 | repositories {
14 | jcenter()
15 | google()
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/exps/default/yolox_l.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 |
7 | from yolox.exp import Exp as MyExp
8 |
9 |
10 | class Exp(MyExp):
11 | def __init__(self):
12 | super(Exp, self).__init__()
13 | self.depth = 1.0
14 | self.width = 1.0
15 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
16 |
--------------------------------------------------------------------------------
/exps/default/yolox_m.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 |
7 | from yolox.exp import Exp as MyExp
8 |
9 |
10 | class Exp(MyExp):
11 | def __init__(self):
12 | super(Exp, self).__init__()
13 | self.depth = 0.67
14 | self.width = 0.75
15 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
16 |
--------------------------------------------------------------------------------
/exps/default/yolox_s.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 |
7 | from yolox.exp import Exp as MyExp
8 |
9 |
10 | class Exp(MyExp):
11 | def __init__(self):
12 | super(Exp, self).__init__()
13 | self.depth = 0.33
14 | self.width = 0.50
15 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
16 |
--------------------------------------------------------------------------------
/exps/default/yolox_x.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 |
7 | from yolox.exp import Exp as MyExp
8 |
9 |
10 | class Exp(MyExp):
11 | def __init__(self):
12 | super(Exp, self).__init__()
13 | self.depth = 1.33
14 | self.width = 1.25
15 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
16 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/flags.make:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "MinGW Makefiles" Generator, CMake Version 3.20
3 |
4 | # compile CXX with C:/Program Files/mingw64/bin/g++.exe
5 | CXX_DEFINES =
6 |
7 | CXX_INCLUDES = @CMakeFiles/yolox.dir/includes_CXX.rsp
8 |
9 | CXX_FLAGS = -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -std=c++11 -O2 -pthread -std=gnu++11
10 |
11 |
--------------------------------------------------------------------------------
/yolox/data/datasets/my_classes.py:
--------------------------------------------------------------------------------
1 | MY_CLASSES = (
2 | 'short_sleeved_shirt', # 0 ~ 38 id
3 | 'long_sleeved_shirt',
4 | 'short_sleeved_outwear',
5 | 'long_sleeved_outwear',
6 | 'vest',
7 | 'sling',
8 | 'shorts',
9 | 'trousers',
10 | 'skirt',
11 | 'short_sleeved_dress','long_sleeved_dress',
12 | 'vest_dress',
13 | 'sling_dress',
14 | 'a0','a1','a2','a3','a4','a5','a6','a7','a8','a9','a10','a11','a12','a13','a14','a15','a16','a17','a18','a19','a20','a21','a22','a23','a24'
15 | )
16 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeSystem.cmake:
--------------------------------------------------------------------------------
1 | set(CMAKE_HOST_SYSTEM "Windows-10.0.19042")
2 | set(CMAKE_HOST_SYSTEM_NAME "Windows")
3 | set(CMAKE_HOST_SYSTEM_VERSION "10.0.19042")
4 | set(CMAKE_HOST_SYSTEM_PROCESSOR "AMD64")
5 |
6 |
7 |
8 | set(CMAKE_SYSTEM "Windows-10.0.19042")
9 | set(CMAKE_SYSTEM_NAME "Windows")
10 | set(CMAKE_SYSTEM_VERSION "10.0.19042")
11 | set(CMAKE_SYSTEM_PROCESSOR "AMD64")
12 |
13 | set(CMAKE_CROSSCOMPILING "FALSE")
14 |
15 | set(CMAKE_SYSTEM_LOADED 1)
16 |
--------------------------------------------------------------------------------
/.idea/YOLOX-main.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/yolox/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | from .allreduce_norm import *
6 | from .boxes import *
7 | from .checkpoint import load_ckpt, save_checkpoint
8 | from .demo_utils import *
9 | from .dist import *
10 | from .ema import ModelEMA
11 | from .logger import setup_logger
12 | from .lr_scheduler import LRScheduler
13 | from .metric import *
14 | from .model_utils import *
15 | from .setup_env import *
16 | from .visualize import *
17 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/link.txt:
--------------------------------------------------------------------------------
1 | "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" -E rm -f CMakeFiles\yolox.dir/objects.a
2 | C:\PROGRA~1\mingw64\bin\ar.exe cr CMakeFiles\yolox.dir/objects.a @CMakeFiles\yolox.dir\objects1.rsp
3 | C:\PROGRA~1\mingw64\bin\G__~1.EXE -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED -g -Wl,--whole-archive CMakeFiles\yolox.dir/objects.a -Wl,--no-whole-archive -o yolox.exe -Wl,--out-implib,libyolox.dll.a -Wl,--major-image-version,0,--minor-image-version,0 @CMakeFiles\yolox.dir\linklibs.rsp
4 |
--------------------------------------------------------------------------------
/yolox/data/datasets/voc_classes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | # VOC_CLASSES = ( '__background__', # always index 0
6 | VOC_CLASSES = (
7 | "aeroplane",
8 | "bicycle",
9 | "bird",
10 | "boat",
11 | "bottle",
12 | "bus",
13 | "car",
14 | "cat",
15 | "chair",
16 | "cow",
17 | "diningtable",
18 | "dog",
19 | "horse",
20 | "motorbike",
21 | "person",
22 | "pottedplant",
23 | "sheep",
24 | "sofa",
25 | "train",
26 | "tvmonitor",
27 | )
28 |
--------------------------------------------------------------------------------
/exps/default/yolox_tiny.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 |
7 | from yolox.exp import Exp as MyExp
8 |
9 |
10 | class Exp(MyExp):
11 | def __init__(self):
12 | super(Exp, self).__init__()
13 | self.depth = 0.33
14 | self.width = 0.375
15 | self.scale = (0.5, 1.5)
16 | self.random_size = (10, 20)
17 | self.test_size = (416, 416)
18 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
19 | self.enable_mixup = False
20 |
--------------------------------------------------------------------------------
/yolox/layers/csrc/vision.cpp:
--------------------------------------------------------------------------------
1 | #include "cocoeval/cocoeval.h"
2 |
3 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
4 | m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate");
5 | m.def(
6 | "COCOevalEvaluateImages",
7 | &COCOeval::EvaluateImages,
8 | "COCOeval::EvaluateImages");
9 | pybind11::class_(m, "InstanceAnnotation")
10 | .def(pybind11::init());
11 | pybind11::class_(m, "ImageEvaluation")
12 | .def(pybind11::init<>());
13 | }
14 |
--------------------------------------------------------------------------------
/demo/OpenVINO/cpp/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.4.1)
2 | set(CMAKE_CXX_STANDARD 14)
3 |
4 | project(yolox_openvino_demo)
5 |
6 | find_package(OpenCV REQUIRED)
7 | find_package(InferenceEngine REQUIRED)
8 | find_package(ngraph REQUIRED)
9 |
10 | include_directories(
11 | ${OpenCV_INCLUDE_DIRS}
12 | ${CMAKE_CURRENT_SOURCE_DIR}
13 | ${CMAKE_CURRENT_BINARY_DIR}
14 | )
15 |
16 | add_executable(yolox_openvino yolox_openvino.cpp)
17 |
18 | target_link_libraries(
19 | yolox_openvino
20 | ${InferenceEngine_LIBRARIES}
21 | ${NGRAPH_LIBRARIES}
22 | ${OpenCV_LIBS}
23 | )
--------------------------------------------------------------------------------
/demo/ncnn/android/app/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'com.android.application'
2 |
3 | android {
4 | compileSdkVersion 24
5 | buildToolsVersion "29.0.2"
6 |
7 | defaultConfig {
8 | applicationId "com.megvii.yoloXncnn"
9 | archivesBaseName = "$applicationId"
10 |
11 | ndk {
12 | moduleName "ncnn"
13 | abiFilters "armeabi-v7a", "arm64-v8a"
14 | }
15 | minSdkVersion 24
16 | }
17 |
18 | externalNativeBuild {
19 | cmake {
20 | version "3.10.2"
21 | path file('src/main/jni/CMakeLists.txt')
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | line_length = 100
3 | multi_line_output = 3
4 | balanced_wrapping = True
5 | known_standard_library = setuptools
6 | known_third_party = tqdm,loguru
7 | known_data_processing = cv2,numpy,scipy,PIL,matplotlib,scikit_image
8 | known_datasets = pycocotools
9 | known_deeplearning = torch,torchvision,caffe2,onnx,apex,timm,thop,torch2trt,tensorrt,openvino,onnxruntime
10 | known_myself = yolox
11 | sections = FUTURE,STDLIB,THIRDPARTY,data_processing,datasets,deeplearning,myself,FIRSTPARTY,LOCALFOLDER
12 | no_lines_before=STDLIB,THIRDPARTY,datasets
13 | default_section = FIRSTPARTY
14 |
15 | [flake8]
16 | max-line-length = 100
17 | max-complexity = 18
18 | exclude = __init__.py
19 |
--------------------------------------------------------------------------------
/generate_data.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import cv2
4 |
5 | data_dir = '/home/meprint/sunanlin_folder/data_copy'
6 |
7 | with open('data_copy.txt','w') as f:
8 | for folder in os.listdir(data_dir):
9 | folder_path = os.path.join(data_dir,folder)
10 | for img_name in os.listdir(folder_path):
11 | if "(1)" not in img_name:
12 | img_path = os.path.join(folder_path,img_name)
13 | img = cv2.imread(img_path)
14 | h,w,_ = img.shape
15 | if h==0 or w==0:
16 | break
17 | bbox = str(0)+','+str(0)+','+str(w)+','+str(h)
18 | f.write(img_path+' '+bbox+','+folder+'\n')
19 | f.close()
--------------------------------------------------------------------------------
/demo/ncnn/android/app/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/yoloXncnn.java:
--------------------------------------------------------------------------------
1 | // Copyright (C) Megvii, Inc. and its affiliates. All rights reserved.
2 |
3 | package com.megvii.yoloXncnn;
4 |
5 | import android.content.res.AssetManager;
6 | import android.graphics.Bitmap;
7 |
8 | public class YOLOXncnn
9 | {
10 | public native boolean Init(AssetManager mgr);
11 |
12 | public class Obj
13 | {
14 | public float x;
15 | public float y;
16 | public float w;
17 | public float h;
18 | public String label;
19 | public float prob;
20 | }
21 |
22 | public native Obj[] Detect(Bitmap bitmap, boolean use_gpu);
23 |
24 | static {
25 | System.loadLibrary("yoloXncnn");
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/CMakeDirectoryInformation.cmake:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "MinGW Makefiles" Generator, CMake Version 3.20
3 |
4 | # Relative path conversion top directories.
5 | set(CMAKE_RELATIVE_PATH_TOP_SOURCE "E:/YOLOX-main/demo/TensorRT/cpp")
6 | set(CMAKE_RELATIVE_PATH_TOP_BINARY "E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug")
7 |
8 | # Force unix paths in dependencies.
9 | set(CMAKE_FORCE_UNIX_PATHS 1)
10 |
11 |
12 | # The C and CXX include file regular expressions for this directory.
13 | set(CMAKE_C_INCLUDE_REGEX_SCAN "^.*$")
14 | set(CMAKE_C_INCLUDE_REGEX_COMPLAIN "^$")
15 | set(CMAKE_CXX_INCLUDE_REGEX_SCAN ${CMAKE_C_INCLUDE_REGEX_SCAN})
16 | set(CMAKE_CXX_INCLUDE_REGEX_COMPLAIN ${CMAKE_C_INCLUDE_REGEX_COMPLAIN})
17 |
--------------------------------------------------------------------------------
/datasets/README.md:
--------------------------------------------------------------------------------
1 | # Prepare datasets
2 |
3 | If you have a dataset directory, you could use os environment variable named `YOLOX_DATADIR`. Under this directory, YOLOX will look for datasets in the structure described below, if needed.
4 | ```
5 | $YOLOX_DATADIR/
6 | COCO/
7 | ```
8 | You can set the location for builtin datasets by
9 | ```shell
10 | export YOLOX_DATADIR=/path/to/your/datasets
11 | ```
12 | If `YOLOX_DATADIR` is not set, the default value of dataset directory is `./datasets` relative to your current working directory.
13 |
14 | ## Expected dataset structure for [COCO detection](https://cocodataset.org/#download):
15 |
16 | ```
17 | COCO/
18 | annotations/
19 | instances_{train,val}2017.json
20 | {train,val}2017/
21 | # image files that are mentioned in the corresponding json
22 | ```
23 |
24 | You can use the 2014 version of the dataset as well.
25 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/clion-log.txt:
--------------------------------------------------------------------------------
1 | "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" -DCMAKE_BUILD_TYPE=Debug -DCMAKE_DEPENDS_USE_COMPILER=FALSE -G "CodeBlocks - MinGW Makefiles" E:\YOLOX-main\demo\TensorRT\cpp
2 | CMake Deprecation Warning at CMakeLists.txt:3 (cmake_minimum_required):
3 | Compatibility with CMake < 2.8.12 will be removed from a future version of
4 | CMake.
5 |
6 | Update the VERSION argument value or use a ... suffix to tell
7 | CMake that the project does not need compatibility with older versions.
8 |
9 |
10 | -- OpenCV ARCH: x64
11 | -- OpenCV RUNTIME: mingw
12 | -- OpenCV STATIC: OFF
13 | -- Found OpenCV 3.4.8 in D:/opencv/build/x64/MinGW/install/x64/mingw/lib
14 | -- You might need to add D:/opencv/build/x64/MinGW/install/x64/mingw/bin to your PATH to be able to run your applications.
15 | -- Configuring done
16 | -- Generating done
17 | -- Build files have been written to: E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug
18 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX train your data
2 | you need generate one data.txt like follow format **(per line-> one image)**.
3 | ## prepare one data.txt like this:
4 |
img_path1 x1,y1,x2,y2,class_id x1,y1,x2,y2,class_id2
5 |
img_path2 x1,y1,x2,y2,class_id
6 |
img_path3 ..........
7 | ### note:
8 | **
x1,y1,x2,y2 is int type and it belong to 0-img_w ,0-img_h, not 0~1 !!!
9 |
img_path is abs path ;must be careful the sign " " and "," in data.txt, there was an example:
10 |
/home/sal/images/000010.jpg 0,190,466,516,1
11 |
/home/sal/images/000011.jpg 284,548,458,851,7 256,393,369,608,1
**
12 | ## Train
13 | **i.step1** , before train,you need change yolox/exp/yolox_base.py follow you need, i add some explain in it. **such as change data.txt path in it.**
14 | **ii.step2** , change train.py params, just as https://github.com/Megvii-BaseDetection/YOLOX.git ,when you have changed , just run : **python train.py**
15 |
16 | **iii. star**
17 |
18 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-Python-MegEngine
2 |
3 | Python version of YOLOX object detection base on [MegEngine](https://github.com/MegEngine/MegEngine).
4 |
5 | ## Tutorial
6 |
7 | ### Step1: install requirements
8 |
9 | ```
10 | python3 -m pip install megengine -f https://megengine.org.cn/whl/mge.html
11 | ```
12 |
13 | ### Step2: convert checkpoint weights from torch's path file
14 |
15 | ```
16 | python3 convert_weights.py -w yolox_s.pth.tar -o yolox_s_mge.pkl
17 | ```
18 |
19 | ### Step3: run demo
20 |
21 | This part is the same as torch's python demo, but no need to specify device.
22 |
23 | ```
24 | python3 demo.py image -n yolox-s -c yolox_s_mge.pkl --path ../../../assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result
25 | ```
26 |
27 | ### [Optional]Step4: dump model for cpp inference
28 |
29 | > **Note**: result model is dumped with `optimize_for_inference` and `enable_fuse_conv_bias_nonlinearity`.
30 |
31 | ```
32 | python3 dump.py -n yolox-s -c yolox_s_mge.pkl --dump_path yolox_s.mge
33 | ```
34 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/models/yolox.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import megengine.module as M
6 |
7 | from .yolo_head import YOLOXHead
8 | from .yolo_pafpn import YOLOPAFPN
9 |
10 |
11 | class YOLOX(M.Module):
12 | """
13 | YOLOX model module. The module list is defined by create_yolov3_modules function.
14 | The network returns loss values from three YOLO layers during training
15 | and detection results during test.
16 | """
17 |
18 | def __init__(self, backbone=None, head=None):
19 | super().__init__()
20 | if backbone is None:
21 | backbone = YOLOPAFPN()
22 | if head is None:
23 | head = YOLOXHead(80)
24 |
25 | self.backbone = backbone
26 | self.head = head
27 |
28 | def forward(self, x):
29 | # fpn output content features of [dark3, dark4, dark5]
30 | fpn_outs = self.backbone(x)
31 | assert not self.training
32 | outputs = self.head(fpn_outs)
33 |
34 | return outputs
35 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-TensorRT in C++
2 |
3 | As YOLOX models is easy to converted to tensorrt using [torch2trt gitrepo](https://github.com/NVIDIA-AI-IOT/torch2trt),
4 | our C++ demo will not include the model converting or constructing like other tenorrt demos.
5 |
6 |
7 | ## Step 1: Prepare serialized engine file
8 |
9 | Follow the trt [python demo README](../python/README.md) to convert and save the serialized engine file.
10 |
11 | Check the 'model_trt.engine' file generated from Step 1, which will automatically saved at the current demo dir.
12 |
13 |
14 | ## Step 2: build the demo
15 |
16 | Please follow the [TensorRT Installation Guide](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html) to install TensorRT.
17 |
18 | Install opencv with ```sudo apt-get install libopencv-dev```.
19 |
20 | build the demo:
21 |
22 | ```shell
23 | mkdir build
24 | cd build
25 | cmake ..
26 | make
27 | ```
28 |
29 | Then run the demo:
30 |
31 | ```shell
32 | ./yolox ../model_trt.engine -i ../../../../assets/dog.jpg
33 | ```
34 |
35 | or
36 |
37 | ```shell
38 | ./yolox -i
39 | ```
40 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/DependInfo.cmake:
--------------------------------------------------------------------------------
1 |
2 | # Consider dependencies only in project.
3 | set(CMAKE_DEPENDS_IN_PROJECT_ONLY OFF)
4 |
5 | # The set of languages for which implicit dependencies are needed:
6 | set(CMAKE_DEPENDS_LANGUAGES
7 | "CXX"
8 | )
9 | # The set of files for implicit dependencies of each language:
10 | set(CMAKE_DEPENDS_CHECK_CXX
11 | "E:/YOLOX-main/demo/TensorRT/cpp/yolox.cpp" "E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/yolox.cpp.obj"
12 | )
13 | set(CMAKE_CXX_COMPILER_ID "GNU")
14 |
15 | # The include file search paths:
16 | set(CMAKE_CXX_TARGET_INCLUDE_PATH
17 | "../include"
18 | "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/include"
19 | "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/bin"
20 | "D:/TensorRT-7.2.3.4/include"
21 | "D:/opencv/build/x64/MinGW/install/include"
22 | "D:/opencv/build/x64/MinGW/install/include/opencv"
23 | )
24 |
25 | # The set of dependency files which are needed:
26 | set(CMAKE_DEPENDS_DEPENDENCY_FILES
27 | )
28 |
29 | # Targets to which this target links.
30 | set(CMAKE_TARGET_LINKED_INFO_FILES
31 | )
32 |
33 | # Fortran module output directory.
34 | set(CMAKE_Fortran_TARGET_MODULE_DIR "")
35 |
--------------------------------------------------------------------------------
/demo/ncnn/android/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-Android-ncnn
2 |
3 | Andoird app of YOLOX object detection base on [ncnn](https://github.com/Tencent/ncnn)
4 |
5 |
6 | ## Tutorial
7 |
8 | ### Step1
9 |
10 | Download ncnn-android-vulkan.zip from [releases of ncnn](https://github.com/Tencent/ncnn/releases). This repo uses
11 | [20210525 release](https://github.com/Tencent/ncnn/releases/download/20210525/ncnn-20210525-android-vulkan.zip) for building.
12 |
13 | ### Step2
14 |
15 | After downloading, please extract your zip file. Then, there are two ways to finish this step:
16 | * put your extracted directory into **app/src/main/jni**
17 | * change the **ncnn_DIR** path in **app/src/main/jni/CMakeLists.txt** to your extracted directory
18 |
19 | ### Step3
20 | Download example param and bin file from [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ESXBH_GSSmFMszWJ6YG2VkQB5cWDfqVWXgk0D996jH0rpQ?e=qzEqUh) or [github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s_ncnn.tar.gz). Unzip the file to **app/src/main/assets**.
21 |
22 | ### Step4
23 | Open this project with Android Studio, build it and enjoy!
24 |
25 | ## Reference
26 |
27 | * [ncnn-android-yolov5](https://github.com/nihui/ncnn-android-yolov5)
28 |
--------------------------------------------------------------------------------
/demo/ncnn/android/app/src/main/res/layout/main.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
11 |
12 |
17 |
22 |
27 |
28 |
29 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/exps/default/nano.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 | import torch.nn as nn
7 |
8 | from yolox.exp import Exp as MyExp
9 |
10 |
11 | class Exp(MyExp):
12 | def __init__(self):
13 | super(Exp, self).__init__()
14 | self.depth = 0.33
15 | self.width = 0.25
16 | self.scale = (0.5, 1.5)
17 | self.random_size = (10, 20)
18 | self.test_size = (416, 416)
19 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
20 | self.enable_mixup = False
21 |
22 | def get_model(self, sublinear=False):
23 |
24 | def init_yolo(M):
25 | for m in M.modules():
26 | if isinstance(m, nn.BatchNorm2d):
27 | m.eps = 1e-3
28 | m.momentum = 0.03
29 | if "model" not in self.__dict__:
30 | from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead
31 | in_channels = [256, 512, 1024]
32 | # NANO model use depthwise = True, which is main difference.
33 | backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, depthwise=True)
34 | head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, depthwise=True)
35 | self.model = YOLOX(backbone, head)
36 |
37 | self.model.apply(init_yolo)
38 | self.model.head.initialize_biases(1e-2)
39 | return self.model
40 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/dump.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import argparse
6 |
7 | import megengine as mge
8 | import numpy as np
9 | from megengine import jit
10 |
11 | from build import build_and_load
12 |
13 |
14 | def make_parser():
15 | parser = argparse.ArgumentParser("YOLOX Demo Dump")
16 | parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
17 | parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
18 | parser.add_argument(
19 | "--dump_path", default="model.mge", help="path to save the dumped model"
20 | )
21 | return parser
22 |
23 |
24 | def dump_static_graph(model, graph_name="model.mge"):
25 | model.eval()
26 | model.head.decode_in_inference = False
27 |
28 | data = mge.Tensor(np.random.random((1, 3, 640, 640)))
29 |
30 | @jit.trace(capture_as_const=True)
31 | def pred_func(data):
32 | outputs = model(data)
33 | return outputs
34 |
35 | pred_func(data)
36 | pred_func.dump(
37 | graph_name,
38 | arg_names=["data"],
39 | optimize_for_inference=True,
40 | enable_fuse_conv_bias_nonlinearity=True,
41 | )
42 |
43 |
44 | def main(args):
45 | model = build_and_load(args.ckpt, name=args.name)
46 | dump_static_graph(model, args.dump_path)
47 |
48 |
49 | if __name__ == "__main__":
50 | args = make_parser().parse_args()
51 | main(args)
52 |
--------------------------------------------------------------------------------
/yolox/utils/checkpoint.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 | import os
5 | import shutil
6 | from loguru import logger
7 |
8 | import torch
9 |
10 |
11 | def load_ckpt(model, ckpt):
12 | model_state_dict = model.state_dict()
13 | load_dict = {}
14 | for key_model, v in model_state_dict.items():
15 | if key_model not in ckpt:
16 | logger.warning(
17 | "{} is not in the ckpt. Please double check and see if this is desired.".format(
18 | key_model
19 | )
20 | )
21 | continue
22 | v_ckpt = ckpt[key_model]
23 | if v.shape != v_ckpt.shape:
24 | logger.warning(
25 | "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
26 | key_model, v_ckpt.shape, key_model, v.shape
27 | )
28 | )
29 | continue
30 | load_dict[key_model] = v_ckpt
31 |
32 | model.load_state_dict(load_dict, strict=False)
33 | return model
34 |
35 |
36 | def save_checkpoint(state, is_best, save_dir, model_name=""):
37 | if not os.path.exists(save_dir):
38 | os.makedirs(save_dir)
39 | filename = os.path.join(save_dir, model_name + "_ckpt.pth.tar")
40 | torch.save(state, filename)
41 | if is_best:
42 | best_filename = os.path.join(save_dir, "best_ckpt.pth.tar")
43 | shutil.copyfile(filename, best_filename)
44 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/linklibs.rsp:
--------------------------------------------------------------------------------
1 | -LC:/PROGRA~1/NVIDIA~2/CUDA/v11.1/lib/x64 -LC:/PROGRA~1/NVIDIA~2/CUDA/v11.1/bin/lib/x64 -LD:/TensorRT-7.2.3.4/lib -lnvinfer -lcudart D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_dnn348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_highgui348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_ml348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_objdetect348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_shape348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_stitching348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_superres348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_videostab348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_calib3d348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_features2d348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_flann348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_photo348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_video348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_videoio348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_imgcodecs348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_imgproc348.dll.a D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_core348.dll.a -lkernel32 -luser32 -lgdi32 -lwinspool -lshell32 -lole32 -loleaut32 -luuid -lcomdlg32 -ladvapi32
2 |
--------------------------------------------------------------------------------
/yolox/models/yolox.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import torch.nn as nn
6 |
7 | from .yolo_head import YOLOXHead
8 | from .yolo_pafpn import YOLOPAFPN
9 |
10 |
11 | class YOLOX(nn.Module):
12 | """
13 | YOLOX model module. The module list is defined by create_yolov3_modules function.
14 | The network returns loss values from three YOLO layers during training
15 | and detection results during test.
16 | """
17 |
18 | def __init__(self, backbone=None, head=None):
19 | super().__init__()
20 | if backbone is None:
21 | backbone = YOLOPAFPN()
22 | if head is None:
23 | head = YOLOXHead(80)
24 |
25 | self.backbone = backbone
26 | self.head = head
27 |
28 | def forward(self, x, targets=None):
29 | # fpn output content features of [dark3, dark4, dark5]
30 | fpn_outs = self.backbone(x)
31 |
32 | if self.training:
33 | assert targets is not None
34 | loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
35 | fpn_outs, targets, x
36 | )
37 | outputs = {
38 | "total_loss": loss,
39 | "iou_loss": iou_loss,
40 | "l1_loss": l1_loss,
41 | "conf_loss": conf_loss,
42 | "cls_loss": cls_loss,
43 | "num_fg": num_fg,
44 | }
45 | else:
46 | outputs = self.head(fpn_outs)
47 |
48 | return outputs
49 |
--------------------------------------------------------------------------------
/yolox/exp/build.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import importlib
6 | import os
7 | import sys
8 |
9 |
10 | def get_exp_by_file(exp_file):
11 | try:
12 | sys.path.append(os.path.dirname(exp_file))
13 | current_exp = importlib.import_module(os.path.basename(exp_file).split(".")[0])
14 | exp = current_exp.Exp()
15 | except Exception:
16 | raise ImportError("{} doesn't contains class named 'Exp'".format(exp_file))
17 | return exp
18 |
19 |
20 | def get_exp_by_name(exp_name):
21 | import yolox
22 | yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
23 | filedict = {
24 | "yolox-s": "yolox_s.py",
25 | "yolox-m": "yolox_m.py",
26 | "yolox-l": "yolox_l.py",
27 | "yolox-x": "yolox_x.py",
28 | "yolox-tiny": "yolox_tiny.py",
29 | "yolox-nano": "nano.py",
30 | "yolov3": "yolov3.py",
31 | }
32 | filename = filedict[exp_name]
33 | exp_path = os.path.join(yolox_path, "exps", "default", filename)
34 | return get_exp_by_file(exp_path)
35 |
36 |
37 | def get_exp(exp_file, exp_name):
38 | """
39 | get Exp object by file or name. If exp_file and exp_name
40 | are both provided, get Exp by exp_file.
41 |
42 | Args:
43 | exp_file (str): file path of experiment.
44 | exp_name (str): name of experiment. "yolo-s",
45 | """
46 | assert exp_file is not None or exp_name is not None, "plz provide exp file or exp name."
47 | if exp_file is not None:
48 | return get_exp_by_file(exp_file)
49 | else:
50 | return get_exp_by_name(exp_name)
51 |
--------------------------------------------------------------------------------
/yolox/data/datasets/coco_classes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | COCO_CLASSES = (
6 | "person",
7 | "bicycle",
8 | "car",
9 | "motorcycle",
10 | "airplane",
11 | "bus",
12 | "train",
13 | "truck",
14 | "boat",
15 | "traffic light",
16 | "fire hydrant",
17 | "stop sign",
18 | "parking meter",
19 | "bench",
20 | "bird",
21 | "cat",
22 | "dog",
23 | "horse",
24 | "sheep",
25 | "cow",
26 | "elephant",
27 | "bear",
28 | "zebra",
29 | "giraffe",
30 | "backpack",
31 | "umbrella",
32 | "handbag",
33 | "tie",
34 | "suitcase",
35 | "frisbee",
36 | "skis",
37 | "snowboard",
38 | "sports ball",
39 | "kite",
40 | "baseball bat",
41 | "baseball glove",
42 | "skateboard",
43 | "surfboard",
44 | "tennis racket",
45 | "bottle",
46 | "wine glass",
47 | "cup",
48 | "fork",
49 | "knife",
50 | "spoon",
51 | "bowl",
52 | "banana",
53 | "apple",
54 | "sandwich",
55 | "orange",
56 | "broccoli",
57 | "carrot",
58 | "hot dog",
59 | "pizza",
60 | "donut",
61 | "cake",
62 | "chair",
63 | "couch",
64 | "potted plant",
65 | "bed",
66 | "dining table",
67 | "toilet",
68 | "tv",
69 | "laptop",
70 | "mouse",
71 | "remote",
72 | "keyboard",
73 | "cell phone",
74 | "microwave",
75 | "oven",
76 | "toaster",
77 | "sink",
78 | "refrigerator",
79 | "book",
80 | "clock",
81 | "vase",
82 | "scissors",
83 | "teddy bear",
84 | "hair drier",
85 | "toothbrush",
86 | )
87 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/coco_classes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | COCO_CLASSES = (
6 | "person",
7 | "bicycle",
8 | "car",
9 | "motorcycle",
10 | "airplane",
11 | "bus",
12 | "train",
13 | "truck",
14 | "boat",
15 | "traffic light",
16 | "fire hydrant",
17 | "stop sign",
18 | "parking meter",
19 | "bench",
20 | "bird",
21 | "cat",
22 | "dog",
23 | "horse",
24 | "sheep",
25 | "cow",
26 | "elephant",
27 | "bear",
28 | "zebra",
29 | "giraffe",
30 | "backpack",
31 | "umbrella",
32 | "handbag",
33 | "tie",
34 | "suitcase",
35 | "frisbee",
36 | "skis",
37 | "snowboard",
38 | "sports ball",
39 | "kite",
40 | "baseball bat",
41 | "baseball glove",
42 | "skateboard",
43 | "surfboard",
44 | "tennis racket",
45 | "bottle",
46 | "wine glass",
47 | "cup",
48 | "fork",
49 | "knife",
50 | "spoon",
51 | "bowl",
52 | "banana",
53 | "apple",
54 | "sandwich",
55 | "orange",
56 | "broccoli",
57 | "carrot",
58 | "hot dog",
59 | "pizza",
60 | "donut",
61 | "cake",
62 | "chair",
63 | "couch",
64 | "potted plant",
65 | "bed",
66 | "dining table",
67 | "toilet",
68 | "tv",
69 | "laptop",
70 | "mouse",
71 | "remote",
72 | "keyboard",
73 | "cell phone",
74 | "microwave",
75 | "oven",
76 | "toaster",
77 | "sink",
78 | "refrigerator",
79 | "book",
80 | "clock",
81 | "vase",
82 | "scissors",
83 | "teddy bear",
84 | "hair drier",
85 | "toothbrush",
86 | )
87 |
--------------------------------------------------------------------------------
/demo/TensorRT/python/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-TensorRT in Python
2 |
3 | This toturial includes a Python demo for TensorRT.
4 |
5 | ## Install TensorRT Toolkit
6 |
7 | Please follow the [TensorRT Installation Guide](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html) and [torch2trt gitrepo](https://github.com/NVIDIA-AI-IOT/torch2trt) to install TensorRT and torch2trt.
8 |
9 | ## Convert model
10 |
11 | YOLOX models can be easily conveted to TensorRT models using torch2trt
12 |
13 | If you want to convert our model, use the flag -n to specify a model name:
14 | ```shell
15 | python tools/trt.py -n -c
16 | ```
17 | For example:
18 | ```shell
19 | python tools/trt.py -n yolox-s -c your_ckpt.pth.tar
20 | ```
21 | can be: yolox-nano, yolox-tiny. yolox-s, yolox-m, yolox-l, yolox-x.
22 |
23 | If you want to convert your customized model, use the flag -f to specify you exp file:
24 | ```shell
25 | python tools/trt.py -f -c
26 | ```
27 | For example:
28 | ```shell
29 | python tools/trt.py -f /path/to/your/yolox/exps/yolox_s.py -c your_ckpt.pth.tar
30 | ```
31 | *yolox_s.py* can be any exp file modified by you.
32 |
33 | The converted model and the serialized engine file (for C++ demo) will be saved on your experiment output dir.
34 |
35 | ## Demo
36 |
37 | The TensorRT python demo is merged on our pytorch demo file, so you can run the pytorch demo command with ```--trt```.
38 |
39 | ```shell
40 | python tools/demo.py image -n yolox-s --trt --save_result
41 | ```
42 | or
43 | ```shell
44 | python tools/demo.py image -f exps/default/yolox_s.py --trt --save_result
45 | ```
46 |
47 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/build.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 |
4 | import megengine as mge
5 | import megengine.module as M
6 | from megengine import jit
7 |
8 | from models.yolo_fpn import YOLOFPN
9 | from models.yolo_head import YOLOXHead
10 | from models.yolo_pafpn import YOLOPAFPN
11 | from models.yolox import YOLOX
12 |
13 |
14 | def build_yolox(name="yolox-s"):
15 | num_classes = 80
16 |
17 | # value meaning: depth, width
18 | param_dict = {
19 | "yolox-nano": (0.33, 0.25),
20 | "yolox-tiny": (0.33, 0.375),
21 | "yolox-s": (0.33, 0.50),
22 | "yolox-m": (0.67, 0.75),
23 | "yolox-l": (1.0, 1.0),
24 | "yolox-x": (1.33, 1.25),
25 | }
26 | if name == "yolov3":
27 | depth = 1.0
28 | width = 1.0
29 | backbone = YOLOFPN()
30 | head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu")
31 | model = YOLOX(backbone, head)
32 | else:
33 | assert name in param_dict
34 | kwargs = {}
35 | depth, width = param_dict[name]
36 | if name == "yolox-nano":
37 | kwargs["depthwise"] = True
38 | in_channels = [256, 512, 1024]
39 | backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs)
40 | head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs)
41 | model = YOLOX(backbone, head)
42 |
43 | for m in model.modules():
44 | if isinstance(m, M.BatchNorm2d):
45 | m.eps = 1e-3
46 |
47 | return model
48 |
49 |
50 | def build_and_load(weight_file, name="yolox-s"):
51 | model = build_yolox(name)
52 | model_weights = mge.load(weight_file)
53 | model.load_state_dict(model_weights, strict=False)
54 | return model
55 |
--------------------------------------------------------------------------------
/yolox/utils/setup_env.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import os
6 | import subprocess
7 |
8 | import cv2
9 |
10 | __all__ = ["configure_nccl", "configure_module"]
11 |
12 |
13 | def configure_nccl():
14 | """Configure multi-machine environment variables of NCCL."""
15 | os.environ["NCCL_LAUNCH_MODE"] = "PARALLEL"
16 | os.environ["NCCL_IB_HCA"] = subprocess.getoutput(
17 | "pushd /sys/class/infiniband/ > /dev/null; for i in mlx5_*; "
18 | "do cat $i/ports/1/gid_attrs/types/* 2>/dev/null "
19 | "| grep v >/dev/null && echo $i ; done; popd > /dev/null"
20 | )
21 | os.environ["NCCL_IB_GID_INDEX"] = "3"
22 | os.environ["NCCL_IB_TC"] = "106"
23 |
24 |
25 | def configure_module(ulimit_value=8192):
26 | """
27 | Configure pytorch module environment. setting of ulimit and cv2 will be set.
28 |
29 | Args:
30 | ulimit_value(int): default open file number on linux. Default value: 8192.
31 | """
32 | # system setting
33 | try:
34 | import resource
35 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
36 | resource.setrlimit(resource.RLIMIT_NOFILE, (ulimit_value, rlimit[1]))
37 | except Exception:
38 | # Exception might be raised in Windows OS or rlimit reaches max limit number.
39 | # However, set rlimit value might not be necessary.
40 | pass
41 |
42 | # cv2
43 | # multiprocess might be harmful on performance of torch dataloader
44 | os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
45 | try:
46 | cv2.setNumThreads(0)
47 | cv2.ocl.setUseOpenCL(False)
48 | except Exception:
49 | # cv2 version mismatch might rasie exceptions.
50 | pass
51 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/cmake_install.cmake:
--------------------------------------------------------------------------------
1 | # Install script for directory: E:/YOLOX-main/demo/TensorRT/cpp
2 |
3 | # Set the install prefix
4 | if(NOT DEFINED CMAKE_INSTALL_PREFIX)
5 | set(CMAKE_INSTALL_PREFIX "C:/Program Files (x86)/yolox")
6 | endif()
7 | string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
8 |
9 | # Set the install configuration name.
10 | if(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME)
11 | if(BUILD_TYPE)
12 | string(REGEX REPLACE "^[^A-Za-z0-9_]+" ""
13 | CMAKE_INSTALL_CONFIG_NAME "${BUILD_TYPE}")
14 | else()
15 | set(CMAKE_INSTALL_CONFIG_NAME "Debug")
16 | endif()
17 | message(STATUS "Install configuration: \"${CMAKE_INSTALL_CONFIG_NAME}\"")
18 | endif()
19 |
20 | # Set the component getting installed.
21 | if(NOT CMAKE_INSTALL_COMPONENT)
22 | if(COMPONENT)
23 | message(STATUS "Install component: \"${COMPONENT}\"")
24 | set(CMAKE_INSTALL_COMPONENT "${COMPONENT}")
25 | else()
26 | set(CMAKE_INSTALL_COMPONENT)
27 | endif()
28 | endif()
29 |
30 | # Is this installation the result of a crosscompile?
31 | if(NOT DEFINED CMAKE_CROSSCOMPILING)
32 | set(CMAKE_CROSSCOMPILING "FALSE")
33 | endif()
34 |
35 | # Set default install directory permissions.
36 | if(NOT DEFINED CMAKE_OBJDUMP)
37 | set(CMAKE_OBJDUMP "C:/Program Files/mingw64/bin/objdump.exe")
38 | endif()
39 |
40 | if(CMAKE_INSTALL_COMPONENT)
41 | set(CMAKE_INSTALL_MANIFEST "install_manifest_${CMAKE_INSTALL_COMPONENT}.txt")
42 | else()
43 | set(CMAKE_INSTALL_MANIFEST "install_manifest.txt")
44 | endif()
45 |
46 | string(REPLACE ";" "\n" CMAKE_INSTALL_MANIFEST_CONTENT
47 | "${CMAKE_INSTALL_MANIFEST_FILES}")
48 | file(WRITE "E:/YOLOX-main/demo/TensorRT/cpp/cmake-build-debug/${CMAKE_INSTALL_MANIFEST}"
49 | "${CMAKE_INSTALL_MANIFEST_CONTENT}")
50 |
--------------------------------------------------------------------------------
/yolox/models/losses.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import torch
6 | import torch.nn as nn
7 |
8 |
9 | class IOUloss(nn.Module):
10 | def __init__(self, reduction="none", loss_type="iou"):
11 | super(IOUloss, self).__init__()
12 | self.reduction = reduction
13 | self.loss_type = loss_type
14 |
15 | def forward(self, pred, target):
16 | assert pred.shape[0] == target.shape[0]
17 |
18 | pred = pred.view(-1, 4)
19 | target = target.view(-1, 4)
20 | tl = torch.max(
21 | (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
22 | )
23 | br = torch.min(
24 | (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
25 | )
26 |
27 | area_p = torch.prod(pred[:, 2:], 1)
28 | area_g = torch.prod(target[:, 2:], 1)
29 |
30 | en = (tl < br).type(tl.type()).prod(dim=1)
31 | area_i = torch.prod(br - tl, 1) * en
32 | iou = (area_i) / (area_p + area_g - area_i + 1e-16)
33 |
34 | if self.loss_type == "iou":
35 | loss = 1 - iou ** 2
36 | elif self.loss_type == "giou":
37 | c_tl = torch.min(
38 | (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
39 | )
40 | c_br = torch.max(
41 | (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
42 | )
43 | area_c = torch.prod(c_br - c_tl, 1)
44 | giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
45 | loss = 1 - giou.clamp(min=-1.0, max=1.0)
46 |
47 | if self.reduction == "mean":
48 | loss = loss.mean()
49 | elif self.reduction == "sum":
50 | loss = loss.sum()
51 |
52 | return loss
53 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved
3 |
4 | import re
5 | import setuptools
6 | import glob
7 | from os import path
8 | import torch
9 | from torch.utils.cpp_extension import CppExtension
10 |
11 | torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
12 | assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3"
13 |
14 |
15 | def get_extensions():
16 | this_dir = path.dirname(path.abspath(__file__))
17 | extensions_dir = path.join(this_dir, "yolox", "layers", "csrc")
18 |
19 | main_source = path.join(extensions_dir, "vision.cpp")
20 | sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
21 |
22 | sources = [main_source] + sources
23 | extension = CppExtension
24 |
25 | extra_compile_args = {"cxx": ["-O3"]}
26 | define_macros = []
27 |
28 | include_dirs = [extensions_dir]
29 |
30 | ext_modules = [
31 | extension(
32 | "yolox._C",
33 | sources,
34 | include_dirs=include_dirs,
35 | define_macros=define_macros,
36 | extra_compile_args=extra_compile_args,
37 | )
38 | ]
39 |
40 | return ext_modules
41 |
42 |
43 | with open("yolox/__init__.py", "r") as f:
44 | version = re.search(
45 | r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
46 | f.read(), re.MULTILINE
47 | ).group(1)
48 |
49 |
50 | with open("README.md", "r") as f:
51 | long_description = f.read()
52 |
53 |
54 | setuptools.setup(
55 | name="yolox",
56 | version=version,
57 | author="basedet team",
58 | python_requires=">=3.6",
59 | long_description=long_description,
60 | ext_modules=get_extensions(),
61 | classifiers=["Programming Language :: Python :: 3", "Operating System :: OS Independent"],
62 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
63 | packages=setuptools.find_packages(),
64 | )
65 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/convert_weights.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | import argparse
4 | from collections import OrderedDict
5 |
6 | import megengine as mge
7 | import torch
8 |
9 |
10 | def make_parser():
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument("-w", "--weights", type=str, help="path of weight file")
13 | parser.add_argument(
14 | "-o",
15 | "--output",
16 | default="weight_mge.pkl",
17 | type=str,
18 | help="path of weight file",
19 | )
20 | return parser
21 |
22 |
23 | def numpy_weights(weight_file):
24 | torch_weights = torch.load(weight_file, map_location="cpu")
25 | if "model" in torch_weights:
26 | torch_weights = torch_weights["model"]
27 | new_dict = OrderedDict()
28 | for k, v in torch_weights.items():
29 | new_dict[k] = v.cpu().numpy()
30 | return new_dict
31 |
32 |
33 | def map_weights(weight_file, output_file):
34 | torch_weights = numpy_weights(weight_file)
35 |
36 | new_dict = OrderedDict()
37 | for k, v in torch_weights.items():
38 | if "num_batches_tracked" in k:
39 | print("drop: {}".format(k))
40 | continue
41 | if k.endswith("bias"):
42 | print("bias key: {}".format(k))
43 | v = v.reshape(1, -1, 1, 1)
44 | new_dict[k] = v
45 | elif "dconv" in k and "conv.weight" in k:
46 | print("depthwise conv key: {}".format(k))
47 | cout, cin, k1, k2 = v.shape
48 | v = v.reshape(cout, 1, cin, k1, k2)
49 | new_dict[k] = v
50 | else:
51 | new_dict[k] = v
52 |
53 | mge.save(new_dict, output_file)
54 | print("save weights to {}".format(output_file))
55 |
56 |
57 | def main():
58 | parser = make_parser()
59 | args = parser.parse_args()
60 | map_weights(args.weights, args.output)
61 |
62 |
63 | if __name__ == "__main__":
64 | main()
65 |
--------------------------------------------------------------------------------
/yolox/exp/base_exp.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import ast
6 | import pprint
7 | from abc import ABCMeta, abstractmethod
8 | from typing import Dict
9 | from tabulate import tabulate
10 |
11 | import torch
12 | from torch.nn import Module
13 |
14 | from yolox.utils import LRScheduler
15 |
16 |
17 | class BaseExp(metaclass=ABCMeta):
18 | """Basic class for any experiment.
19 | """
20 |
21 | def __init__(self):
22 | self.seed = None
23 | self.output_dir = "./YOLOX_outputs"
24 | self.print_interval = 100
25 | self.eval_interval = 10
26 |
27 | @abstractmethod
28 | def get_model(self) -> Module:
29 | pass
30 |
31 | @abstractmethod
32 | def get_data_loader(
33 | self, batch_size: int, is_distributed: bool
34 | ) -> Dict[str, torch.utils.data.DataLoader]:
35 | pass
36 |
37 | @abstractmethod
38 | def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer:
39 | pass
40 |
41 | @abstractmethod
42 | def get_lr_scheduler(
43 | self, lr: float, iters_per_epoch: int, **kwargs
44 | ) -> LRScheduler:
45 | pass
46 |
47 | @abstractmethod
48 | def get_evaluator(self):
49 | pass
50 |
51 | @abstractmethod
52 | def eval(self, model, evaluator, weights):
53 | pass
54 |
55 | def __repr__(self):
56 | table_header = ["keys", "values"]
57 | exp_table = [
58 | (str(k), pprint.pformat(v)) for k, v in vars(self).items() if not k.startswith("_")
59 | ]
60 | return tabulate(exp_table, headers=table_header, tablefmt="fancy_grid")
61 |
62 | def merge(self, cfg_list):
63 | assert len(cfg_list) % 2 == 0
64 | for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
65 | # only update value with same key
66 | if hasattr(self, k):
67 | src_value = getattr(self, k)
68 | src_type = type(src_value)
69 | if src_value is not None and src_type != type(v):
70 | try:
71 | v = src_type(v)
72 | except Exception:
73 | v = ast.literal_eval(v)
74 | setattr(self, k, v)
75 |
--------------------------------------------------------------------------------
/demo/ncnn/android/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | set DIRNAME=%~dp0
12 | if "%DIRNAME%" == "" set DIRNAME=.
13 | set APP_BASE_NAME=%~n0
14 | set APP_HOME=%DIRNAME%
15 |
16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17 | set DEFAULT_JVM_OPTS=
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windows variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 |
53 | :win9xME_args
54 | @rem Slurp the command line arguments.
55 | set CMD_LINE_ARGS=
56 | set _SKIP=2
57 |
58 | :win9xME_args_slurp
59 | if "x%~1" == "x" goto execute
60 |
61 | set CMD_LINE_ARGS=%*
62 |
63 | :execute
64 | @rem Setup the command line
65 |
66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67 |
68 | @rem Execute Gradle
69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70 |
71 | :end
72 | @rem End local scope for the variables with windows NT shell
73 | if "%ERRORLEVEL%"=="0" goto mainEnd
74 |
75 | :fail
76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77 | rem the _cmd.exe /c_ return code!
78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79 | exit /b 1
80 |
81 | :mainEnd
82 | if "%OS%"=="Windows_NT" endlocal
83 |
84 | :omega
85 |
--------------------------------------------------------------------------------
/tools/trt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import argparse
6 | import os
7 | import shutil
8 | from loguru import logger
9 |
10 | import tensorrt as trt
11 | import torch
12 | from torch2trt import torch2trt
13 |
14 | from yolox.exp import get_exp
15 |
16 |
17 | def make_parser():
18 | parser = argparse.ArgumentParser("YOLOX ncnn deploy")
19 | parser.add_argument("-expn", "--experiment-name", type=str, default=None)
20 | parser.add_argument("-n", "--name", type=str, default=None, help="model name")
21 |
22 | parser.add_argument(
23 | "-f",
24 | "--exp_file",
25 | default=None,
26 | type=str,
27 | help="pls input your expriment description file",
28 | )
29 | parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
30 | return parser
31 |
32 |
33 | @logger.catch
34 | def main():
35 | args = make_parser().parse_args()
36 | exp = get_exp(args.exp_file, args.name)
37 | if not args.experiment_name:
38 | args.experiment_name = exp.exp_name
39 |
40 | model = exp.get_model()
41 | file_name = os.path.join(exp.output_dir, args.experiment_name)
42 | os.makedirs(file_name, exist_ok=True)
43 | if args.ckpt is None:
44 | ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
45 | else:
46 | ckpt_file = args.ckpt
47 |
48 | ckpt = torch.load(ckpt_file, map_location="cpu")
49 | # load the model state dict
50 |
51 | model.load_state_dict(ckpt["model"])
52 | logger.info("loaded checkpoint done.")
53 | model.eval()
54 | model.cuda()
55 | model.head.decode_in_inference = False
56 | x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
57 | model_trt = torch2trt(
58 | model,
59 | [x],
60 | fp16_mode=True,
61 | log_level=trt.Logger.INFO,
62 | max_workspace_size=(1 << 32),
63 | )
64 | torch.save(model_trt.state_dict(), os.path.join(file_name, 'model_trt.pth'))
65 | logger.info("Converted TensorRT model done.")
66 | engine_file = os.path.join(file_name, 'model_trt.engine')
67 | engine_file_demo = os.path.join('demo', 'TensorRT', 'cpp', 'model_trt.engine')
68 | with open(engine_file, 'wb') as f:
69 | f.write(model_trt.engine.serialize())
70 |
71 | shutil.copyfile(engine_file, engine_file_demo)
72 |
73 | logger.info("Converted TensorRT model engine file is saved for C++ inference.")
74 |
75 |
76 | if __name__ == "__main__":
77 | main()
78 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/models/yolo_fpn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import megengine.functional as F
6 | import megengine.module as M
7 |
8 | from .darknet import Darknet
9 | from .network_blocks import BaseConv, UpSample
10 |
11 |
12 | class YOLOFPN(M.Module):
13 | """
14 | YOLOFPN module. Darknet 53 is the default backbone of this model.
15 | """
16 |
17 | def __init__(
18 | self, depth=53, in_features=["dark3", "dark4", "dark5"],
19 | ):
20 | super().__init__()
21 |
22 | self.backbone = Darknet(depth)
23 | self.in_features = in_features
24 |
25 | # out 1
26 | self.out1_cbl = self._make_cbl(512, 256, 1)
27 | self.out1 = self._make_embedding([256, 512], 512 + 256)
28 |
29 | # out 2
30 | self.out2_cbl = self._make_cbl(256, 128, 1)
31 | self.out2 = self._make_embedding([128, 256], 256 + 128)
32 |
33 | # upsample
34 | self.upsample = UpSample(scale_factor=2, mode="bilinear")
35 |
36 | def _make_cbl(self, _in, _out, ks):
37 | return BaseConv(_in, _out, ks, stride=1, act="lrelu")
38 |
39 | def _make_embedding(self, filters_list, in_filters):
40 | m = M.Sequential(
41 | *[
42 | self._make_cbl(in_filters, filters_list[0], 1),
43 | self._make_cbl(filters_list[0], filters_list[1], 3),
44 |
45 | self._make_cbl(filters_list[1], filters_list[0], 1),
46 |
47 | self._make_cbl(filters_list[0], filters_list[1], 3),
48 | self._make_cbl(filters_list[1], filters_list[0], 1),
49 | ]
50 | )
51 | return m
52 |
53 | def forward(self, inputs):
54 | """
55 | Args:
56 | inputs (Tensor): input image.
57 |
58 | Returns:
59 | Tuple[Tensor]: FPN output features..
60 | """
61 | # backbone
62 | out_features = self.backbone(inputs)
63 | x2, x1, x0 = [out_features[f] for f in self.in_features]
64 |
65 | # yolo branch 1
66 | x1_in = self.out1_cbl(x0)
67 | x1_in = self.upsample(x1_in)
68 | x1_in = F.concat([x1_in, x1], 1)
69 | out_dark4 = self.out1(x1_in)
70 |
71 | # yolo branch 2
72 | x2_in = self.out2_cbl(out_dark4)
73 | x2_in = self.upsample(x2_in)
74 | x2_in = F.concat([x2_in, x2], 1)
75 | out_dark3 = self.out2(x2_in)
76 |
77 | outputs = (out_dark3, out_dark4, x0)
78 | return outputs
79 |
--------------------------------------------------------------------------------
/yolox/data/data_prefetcher.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import random
6 |
7 | import torch
8 | import torch.distributed as dist
9 |
10 | from yolox.utils import synchronize
11 |
12 |
13 | class DataPrefetcher:
14 | """
15 | DataPrefetcher is inspired by code of following file:
16 | https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
17 | It could speedup your pytorch dataloader. For more information, please check
18 | https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789.
19 | """
20 |
21 | def __init__(self, loader):
22 | self.loader = iter(loader)
23 | self.stream = torch.cuda.Stream()
24 | self.input_cuda = self._input_cuda_for_image
25 | self.record_stream = DataPrefetcher._record_stream_for_image
26 | self.preload()
27 |
28 | def preload(self):
29 | try:
30 | self.next_input, self.next_target, _, _ = next(self.loader)
31 | except StopIteration:
32 | self.next_input = None
33 | self.next_target = None
34 | return
35 |
36 | with torch.cuda.stream(self.stream):
37 | self.input_cuda()
38 | self.next_target = self.next_target.cuda(non_blocking=True)
39 |
40 | def next(self):
41 | torch.cuda.current_stream().wait_stream(self.stream)
42 | input = self.next_input
43 | target = self.next_target
44 | if input is not None:
45 | self.record_stream(input)
46 | if target is not None:
47 | target.record_stream(torch.cuda.current_stream())
48 | self.preload()
49 | return input, target
50 |
51 | def _input_cuda_for_image(self):
52 | self.next_input = self.next_input.cuda(non_blocking=True)
53 |
54 | @staticmethod
55 | def _record_stream_for_image(input):
56 | input.record_stream(torch.cuda.current_stream())
57 |
58 |
59 | def random_resize(data_loader, exp, epoch, rank, is_distributed):
60 | tensor = torch.LongTensor(1).cuda()
61 | if is_distributed:
62 | synchronize()
63 |
64 | if rank == 0:
65 | if epoch > exp.max_epoch - 10:
66 | size = exp.input_size
67 | else:
68 | size = random.randint(*exp.random_size)
69 | size = int(32 * size)
70 | tensor.fill_(size)
71 |
72 | if is_distributed:
73 | synchronize()
74 | dist.broadcast(tensor, 0)
75 |
76 | input_size = data_loader.change_input_dim(multiple=tensor.item(), random_range=None)
77 | return input_size
78 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #1. 草,cmakelist.txt里面的参数编写,千万别随便带空格,它的规则就是见到空格,自动换参数。。。。所以,直接用“”包含进路径就可以。
2 | #2. set() 定义定义隐性变量要在 find_package()之前。
3 | #3. 首先,电脑依次源码编译 cuda->环境变量bin;通常cudnn->cuda的bin里面,环境变量->lib/x64;tensorrt按照官方指导就可以,/
4 | # 环境变量bin;opencv编译选定次新版本,与cuda、cudnn对应,然后按照csdn收藏的博客一步步编译就好,将编译的bin环境变量。
5 |
6 | cmake_minimum_required(VERSION 2.6) #cmake版本要求,必须项
7 |
8 | project(yolox_cao) #显性变量, 项目名字,没太大意义。
9 |
10 | add_definitions(-std=c++11) #必须项
11 |
12 | option(CUDA_USE_STATIC_CUDA_RUNTIME OFF) #CUDA是否静态推理,可选项
13 | set(CMAKE_CXX_STANDARD 11) #设定cmake版本标准
14 | set(CMAKE_BUILD_TYPE Debug) #debug
15 |
16 | find_package(CUDA REQUIRED) #必须项
17 | #include_directories("${CUDA_INCLUDE_DIRS}")
18 |
19 | include_directories(${PROJECT_SOURCE_DIR}/include)
20 |
21 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different
22 | # cuda
23 | include_directories("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/include") #用“cuda/include”
24 | link_directories("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/lib/x64") #cuda对应链接库
25 | # cudnn
26 | include_directories("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/bin") #。。。
27 | link_directories("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/bin/lib/x64") #。。。
28 | # tensorrt
29 | include_directories("D:/TensorRT-7.2.3.4/include") #。。。
30 | link_directories("D:/TensorRT-7.2.3.4/lib") #。。。
31 |
32 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED")
33 |
34 | #set(OpenCV_DIR "D:/opencv/build/x64/vc15/lib")
35 | set(OpenCV_DIR "D:/opencv/build/x64/MinGW/install") #改为mingw-bulid的位置 注意这里,OpenCV_DIR "包含路径:编译后的文件夹里面的install文件夹下的",不是未编译的opencv
36 | set(OpenCV_LIBS opencv_core opencv_imgproc opencv_highgui opencv_imgcodecs) #重要,找到opencv后,将其依赖库全部导入。
37 |
38 | find_package(OpenCV) #set()部分是定义变量,这里是寻找
39 | include_directories(${OpenCV_INCLUDE_DIRS}) #包含inculde
40 |
41 |
42 |
43 | add_executable(yolox ${PROJECT_SOURCE_DIR}/yolox.cpp) #####必改项,第一个参数 yolox通常与x.cpp保持一致,第二个参数是 x.cpp
44 | target_link_libraries(yolox nvinfer) ###链接库,连接tensorrt下的nvifer库,cuda的cudart库,以及前面的opencv依赖库。
45 | target_link_libraries(yolox cudart)
46 | target_link_libraries(yolox ${OpenCV_LIBS})
47 |
48 |
49 | add_definitions(-O2 -pthread)
50 |
51 |
--------------------------------------------------------------------------------
/demo/MegEngine/cpp/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | if [ -z $CXX ];then
5 | echo "please export you c++ toolchain to CXX"
6 | echo "for example:"
7 | echo "build for host: export CXX=g++"
8 | echo "cross build for aarch64-android(always locate in NDK): export CXX=aarch64-linux-android21-clang++"
9 | echo "cross build for aarch64-linux: export CXX=aarch64-linux-gnu-g++"
10 | exit -1
11 | fi
12 |
13 | if [ -z $MGE_INSTALL_PATH ];then
14 | echo "please refsi ./README.md to init MGE_INSTALL_PATH env"
15 | exit -1
16 | fi
17 |
18 | if [ -z $OPENCV_INSTALL_INCLUDE_PATH ];then
19 | echo "please refs ./README.md to init OPENCV_INSTALL_INCLUDE_PATH env"
20 | exit -1
21 | fi
22 |
23 | if [ -z $OPENCV_INSTALL_LIB_PATH ];then
24 | echo "please refs ./README.md to init OPENCV_INSTALL_LIB_PATH env"
25 | exit -1
26 | fi
27 |
28 | INCLUDE_FLAG="-I$MGE_INSTALL_PATH/include -I$OPENCV_INSTALL_INCLUDE_PATH"
29 | LINK_FLAG="-L$MGE_INSTALL_PATH/lib/ -lmegengine -L$OPENCV_INSTALL_LIB_PATH -lopencv_core -lopencv_highgui -lopencv_imgproc -lopencv_imgcodecs"
30 | BUILD_FLAG="-static-libstdc++ -O3 -pie -fPIE -g"
31 |
32 | if [[ $CXX =~ "android" ]]; then
33 | LINK_FLAG="${LINK_FLAG} -llog -lz"
34 | fi
35 |
36 | echo "CXX: $CXX"
37 | echo "MGE_INSTALL_PATH: $MGE_INSTALL_PATH"
38 | echo "INCLUDE_FLAG: $INCLUDE_FLAG"
39 | echo "LINK_FLAG: $LINK_FLAG"
40 | echo "BUILD_FLAG: $BUILD_FLAG"
41 |
42 | echo "[" > compile_commands.json
43 | echo "{" >> compile_commands.json
44 | echo "\"directory\": \"$PWD\"," >> compile_commands.json
45 | echo "\"command\": \"$CXX yolox.cpp -o yolox ${INCLUDE_FLAG} ${LINK_FLAG}\"," >> compile_commands.json
46 | echo "\"file\": \"$PWD/yolox.cpp\"," >> compile_commands.json
47 | echo "}," >> compile_commands.json
48 | echo "]" >> compile_commands.json
49 | $CXX yolox.cpp -o yolox ${INCLUDE_FLAG} ${LINK_FLAG} ${BUILD_FLAG}
50 |
51 | echo "build success, output file: yolox"
52 | if [[ $CXX =~ "android" ]]; then
53 | echo "try command to run:"
54 | echo "adb push/scp $MGE_INSTALL_PATH/lib/libmegengine.so android_phone"
55 | echo "adb push/scp $OPENCV_INSTALL_LIB_PATH/*.so android_phone"
56 | echo "adb push/scp ./yolox yolox_s.mge android_phone"
57 | echo "adb push/scp ../../../assets/dog.jpg android_phone"
58 | echo "adb/ssh to android_phone, then run: LD_LIBRARY_PATH=. ./yolox yolox_s.mge dog.jpg cpu/multithread "
59 | else
60 | echo "try command to run: LD_LIBRARY_PATH=$MGE_INSTALL_PATH/lib/:$OPENCV_INSTALL_LIB_PATH ./yolox yolox_s.mge ../../../assets/dog.jpg cuda/cpu/multithread "
61 | fi
62 |
--------------------------------------------------------------------------------
/yolox/models/yolo_fpn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import torch
6 | import torch.nn as nn
7 |
8 | from .darknet import Darknet
9 | from .network_blocks import BaseConv
10 |
11 |
12 | class YOLOFPN(nn.Module):
13 | """
14 | YOLOFPN module. Darknet 53 is the default backbone of this model.
15 | """
16 |
17 | def __init__(
18 | self, depth=53, in_features=["dark3", "dark4", "dark5"],
19 | ):
20 | super().__init__()
21 |
22 | self.backbone = Darknet(depth)
23 | self.in_features = in_features
24 |
25 | # out 1
26 | self.out1_cbl = self._make_cbl(512, 256, 1)
27 | self.out1 = self._make_embedding([256, 512], 512 + 256)
28 |
29 | # out 2
30 | self.out2_cbl = self._make_cbl(256, 128, 1)
31 | self.out2 = self._make_embedding([128, 256], 256 + 128)
32 |
33 | # upsample
34 | self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
35 |
36 | def _make_cbl(self, _in, _out, ks):
37 | return BaseConv(_in, _out, ks, stride=1, act="lrelu")
38 |
39 | def _make_embedding(self, filters_list, in_filters):
40 | m = nn.Sequential(
41 | *[
42 | self._make_cbl(in_filters, filters_list[0], 1),
43 | self._make_cbl(filters_list[0], filters_list[1], 3),
44 |
45 | self._make_cbl(filters_list[1], filters_list[0], 1),
46 |
47 | self._make_cbl(filters_list[0], filters_list[1], 3),
48 | self._make_cbl(filters_list[1], filters_list[0], 1),
49 | ]
50 | )
51 | return m
52 |
53 | def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"):
54 | with open(filename, "rb") as f:
55 | state_dict = torch.load(f, map_location="cpu")
56 | print("loading pretrained weights...")
57 | self.backbone.load_state_dict(state_dict)
58 |
59 | def forward(self, inputs):
60 | """
61 | Args:
62 | inputs (Tensor): input image.
63 |
64 | Returns:
65 | Tuple[Tensor]: FPN output features..
66 | """
67 | # backbone
68 | out_features = self.backbone(inputs)
69 | x2, x1, x0 = [out_features[f] for f in self.in_features]
70 |
71 | # yolo branch 1
72 | x1_in = self.out1_cbl(x0)
73 | x1_in = self.upsample(x1_in)
74 | x1_in = torch.cat([x1_in, x1], 1)
75 | out_dark4 = self.out1(x1_in)
76 |
77 | # yolo branch 2
78 | x2_in = self.out2_cbl(out_dark4)
79 | x2_in = self.upsample(x2_in)
80 | x2_in = torch.cat([x2_in, x2], 1)
81 | out_dark3 = self.out2(x2_in)
82 |
83 | outputs = (out_dark3, out_dark4, x0)
84 | return outputs
85 |
--------------------------------------------------------------------------------
/yolox/utils/ema.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 | import math
5 | from copy import deepcopy
6 |
7 | import torch
8 | import torch.nn as nn
9 |
10 |
11 | def is_parallel(model):
12 | """check if model is in parallel mode."""
13 | import apex
14 |
15 | parallel_type = (
16 | nn.parallel.DataParallel,
17 | nn.parallel.DistributedDataParallel,
18 | apex.parallel.distributed.DistributedDataParallel,
19 | )
20 | return isinstance(model, parallel_type)
21 |
22 |
23 | def copy_attr(a, b, include=(), exclude=()):
24 | # Copy attributes from b to a, options to only include [...] and to exclude [...]
25 | for k, v in b.__dict__.items():
26 | if (len(include) and k not in include) or k.startswith("_") or k in exclude:
27 | continue
28 | else:
29 | setattr(a, k, v)
30 |
31 |
32 | class ModelEMA:
33 | """
34 | Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
35 | Keep a moving average of everything in the model state_dict (parameters and buffers).
36 | This is intended to allow functionality like
37 | https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
38 | A smoothed version of the weights is necessary for some training schemes to perform well.
39 | This class is sensitive where it is initialized in the sequence of model init,
40 | GPU assignment and distributed training wrappers.
41 | """
42 | def __init__(self, model, decay=0.9999, updates=0):
43 | """
44 | Args:
45 | model (nn.Module): model to apply EMA.
46 | decay (float): ema decay reate.
47 | updates (int): counter of EMA updates.
48 | """
49 | # Create EMA(FP32)
50 | self.ema = deepcopy(model.module if is_parallel(model) else model).eval()
51 | self.updates = updates
52 | # decay exponential ramp (to help early epochs)
53 | self.decay = lambda x: decay * (1 - math.exp(-x / 2000))
54 | for p in self.ema.parameters():
55 | p.requires_grad_(False)
56 |
57 | def update(self, model):
58 | # Update EMA parameters
59 | with torch.no_grad():
60 | self.updates += 1
61 | d = self.decay(self.updates)
62 |
63 | msd = (
64 | model.module.state_dict() if is_parallel(model) else model.state_dict()
65 | ) # model state_dict
66 | for k, v in self.ema.state_dict().items():
67 | if v.dtype.is_floating_point:
68 | v *= d
69 | v += (1.0 - d) * msd[k].detach()
70 |
71 | def update_attr(self, model, include=(), exclude=("process_group", "reducer")):
72 | # Update EMA attributes
73 | copy_attr(self.ema, model, include, exclude)
74 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/process.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import cv2
6 | import megengine.functional as F
7 | import numpy as np
8 |
9 | __all__ = [
10 | "preprocess",
11 | "postprocess",
12 | ]
13 |
14 |
15 | def preprocess(image, input_size, mean, std, swap=(2, 0, 1)):
16 | if len(image.shape) == 3:
17 | padded_img = np.ones((input_size[0], input_size[1], 3)) * 114.0
18 | else:
19 | padded_img = np.ones(input_size) * 114.0
20 | img = np.array(image)
21 | r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
22 | resized_img = cv2.resize(
23 | img,
24 | (int(img.shape[1] * r), int(img.shape[0] * r)),
25 | interpolation=cv2.INTER_LINEAR,
26 | ).astype(np.float32)
27 | padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
28 | image = padded_img
29 |
30 | image = image.astype(np.float32)
31 | image = image[:, :, ::-1]
32 | image /= 255.0
33 | if mean is not None:
34 | image -= mean
35 | if std is not None:
36 | image /= std
37 | image = image.transpose(swap)
38 | image = np.ascontiguousarray(image, dtype=np.float32)
39 | return image, r
40 |
41 |
42 | def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
43 | box_corner = F.zeros_like(prediction)
44 | box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
45 | box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
46 | box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
47 | box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
48 | prediction[:, :, :4] = box_corner[:, :, :4]
49 |
50 | output = [None for _ in range(len(prediction))]
51 | for i, image_pred in enumerate(prediction):
52 |
53 | # If none are remaining => process next image
54 | if not image_pred.shape[0]:
55 | continue
56 | # Get score and class with highest confidence
57 | class_conf = F.max(image_pred[:, 5 : 5 + num_classes], 1, keepdims=True)
58 | class_pred = F.argmax(image_pred[:, 5 : 5 + num_classes], 1, keepdims=True)
59 |
60 | class_conf_squeeze = F.squeeze(class_conf)
61 | conf_mask = image_pred[:, 4] * class_conf_squeeze >= conf_thre
62 | detections = F.concat((image_pred[:, :5], class_conf, class_pred), 1)
63 | detections = detections[conf_mask]
64 | if not detections.shape[0]:
65 | continue
66 |
67 | nms_out_index = F.vision.nms(
68 | detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre,
69 | )
70 | detections = detections[nms_out_index]
71 | if output[i] is None:
72 | output[i] = detections
73 | else:
74 | output[i] = F.concat((output[i], detections))
75 |
76 | return output
77 |
--------------------------------------------------------------------------------
/yolox/utils/logger.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import inspect
6 | import os
7 | import sys
8 | from loguru import logger
9 |
10 |
11 | def get_caller_name(depth=0):
12 | """
13 | Args:
14 | depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0.
15 |
16 | Returns:
17 | str: module name of the caller
18 | """
19 | # the following logic is a little bit faster than inspect.stack() logic
20 | frame = inspect.currentframe().f_back
21 | for _ in range(depth):
22 | frame = frame.f_back
23 |
24 | return frame.f_globals["__name__"]
25 |
26 |
27 | class StreamToLoguru:
28 | """
29 | stream object that redirects writes to a logger instance.
30 | """
31 | def __init__(self, level="INFO", caller_names=("apex", "pycocotools")):
32 | """
33 | Args:
34 | level(str): log level string of loguru. Default value: "INFO".
35 | caller_names(tuple): caller names of redirected module.
36 | Default value: (apex, pycocotools).
37 | """
38 | self.level = level
39 | self.linebuf = ""
40 | self.caller_names = caller_names
41 |
42 | def write(self, buf):
43 | full_name = get_caller_name(depth=1)
44 | module_name = full_name.rsplit(".", maxsplit=-1)[0]
45 | if module_name in self.caller_names:
46 | for line in buf.rstrip().splitlines():
47 | # use caller level log
48 | logger.opt(depth=2).log(self.level, line.rstrip())
49 | else:
50 | sys.__stdout__.write(buf)
51 |
52 | def flush(self):
53 | pass
54 |
55 |
56 | def redirect_sys_output(log_level="INFO"):
57 | redirect_logger = StreamToLoguru(log_level)
58 | sys.stderr = redirect_logger
59 | sys.stdout = redirect_logger
60 |
61 |
62 | def setup_logger(save_dir, distributed_rank=0, filename="log.txt", mode="a"):
63 | """setup logger for training and testing.
64 | Args:
65 | save_dir(str): location to save log file
66 | distributed_rank(int): device rank when multi-gpu environment
67 | filename (string): log save name.
68 | mode(str): log file write mode, `append` or `override`. default is `a`.
69 |
70 | Return:
71 | logger instance.
72 | """
73 | loguru_format = (
74 | "{time:YYYY-MM-DD HH:mm:ss} | "
75 | "{level: <8} | "
76 | "{name}:{line} - {message}"
77 | )
78 |
79 | logger.remove()
80 | save_file = os.path.join(save_dir, filename)
81 | if mode == "o" and os.path.exists(save_file):
82 | os.remove(save_file)
83 | # only keep logger in rank0 process
84 | if distributed_rank == 0:
85 | logger.add(
86 | sys.stderr,
87 | format=loguru_format,
88 | level="INFO",
89 | enqueue=True,
90 | )
91 | logger.add(save_file)
92 |
93 | # redirect stdout/stderr to loguru
94 | redirect_sys_output("INFO")
95 |
--------------------------------------------------------------------------------
/yolox/utils/demo_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import os
6 |
7 | import numpy as np
8 |
9 | __all__ = ["mkdir", "nms", "multiclass_nms", "demo_postprocess"]
10 |
11 |
12 | def mkdir(path):
13 | if not os.path.exists(path):
14 | os.makedirs(path)
15 |
16 |
17 | def nms(boxes, scores, nms_thr):
18 | """Single class NMS implemented in Numpy."""
19 | x1 = boxes[:, 0]
20 | y1 = boxes[:, 1]
21 | x2 = boxes[:, 2]
22 | y2 = boxes[:, 3]
23 |
24 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
25 | order = scores.argsort()[::-1]
26 |
27 | keep = []
28 | while order.size > 0:
29 | i = order[0]
30 | keep.append(i)
31 | xx1 = np.maximum(x1[i], x1[order[1:]])
32 | yy1 = np.maximum(y1[i], y1[order[1:]])
33 | xx2 = np.minimum(x2[i], x2[order[1:]])
34 | yy2 = np.minimum(y2[i], y2[order[1:]])
35 |
36 | w = np.maximum(0.0, xx2 - xx1 + 1)
37 | h = np.maximum(0.0, yy2 - yy1 + 1)
38 | inter = w * h
39 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
40 |
41 | inds = np.where(ovr <= nms_thr)[0]
42 | order = order[inds + 1]
43 |
44 | return keep
45 |
46 |
47 | def multiclass_nms(boxes, scores, nms_thr, score_thr):
48 | """Multiclass NMS implemented in Numpy"""
49 | final_dets = []
50 | num_classes = scores.shape[1]
51 | for cls_ind in range(num_classes):
52 | cls_scores = scores[:, cls_ind]
53 | valid_score_mask = cls_scores > score_thr
54 | if valid_score_mask.sum() == 0:
55 | continue
56 | else:
57 | valid_scores = cls_scores[valid_score_mask]
58 | valid_boxes = boxes[valid_score_mask]
59 | keep = nms(valid_boxes, valid_scores, nms_thr)
60 | if len(keep) > 0:
61 | cls_inds = np.ones((len(keep), 1)) * cls_ind
62 | dets = np.concatenate([valid_boxes[keep], valid_scores[keep, None], cls_inds], 1)
63 | final_dets.append(dets)
64 | if len(final_dets) == 0:
65 | return None
66 | return np.concatenate(final_dets, 0)
67 |
68 |
69 | def demo_postprocess(outputs, img_size, p6=False):
70 |
71 | grids = []
72 | expanded_strides = []
73 |
74 | if not p6:
75 | strides = [8, 16, 32]
76 | else:
77 | strides = [8, 16, 32, 64]
78 |
79 | hsizes = [img_size[0]//stride for stride in strides]
80 | wsizes = [img_size[1]//stride for stride in strides]
81 |
82 | for hsize, wsize, stride in zip(hsizes, wsizes, strides):
83 | xv, yv = np.meshgrid(np.arange(hsize), np.arange(wsize))
84 | grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
85 | grids.append(grid)
86 | shape = grid.shape[:2]
87 | expanded_strides.append(np.full((*shape, 1), stride))
88 |
89 | grids = np.concatenate(grids, 1)
90 | expanded_strides = np.concatenate(expanded_strides, 1)
91 | outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
92 | outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
93 |
94 | return outputs
95 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/3.20.2/CMakeCCompiler.cmake:
--------------------------------------------------------------------------------
1 | set(CMAKE_C_COMPILER "C:/Program Files/mingw64/bin/gcc.exe")
2 | set(CMAKE_C_COMPILER_ARG1 "")
3 | set(CMAKE_C_COMPILER_ID "GNU")
4 | set(CMAKE_C_COMPILER_VERSION "8.1.0")
5 | set(CMAKE_C_COMPILER_VERSION_INTERNAL "")
6 | set(CMAKE_C_COMPILER_WRAPPER "")
7 | set(CMAKE_C_STANDARD_COMPUTED_DEFAULT "11")
8 | set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert")
9 | set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes")
10 | set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros")
11 | set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert")
12 |
13 | set(CMAKE_C_PLATFORM_ID "MinGW")
14 | set(CMAKE_C_SIMULATE_ID "")
15 | set(CMAKE_C_COMPILER_FRONTEND_VARIANT "")
16 | set(CMAKE_C_SIMULATE_VERSION "")
17 |
18 |
19 |
20 |
21 | set(CMAKE_AR "C:/Program Files/mingw64/bin/ar.exe")
22 | set(CMAKE_C_COMPILER_AR "C:/Program Files/mingw64/bin/gcc-ar.exe")
23 | set(CMAKE_RANLIB "C:/Program Files/mingw64/bin/ranlib.exe")
24 | set(CMAKE_C_COMPILER_RANLIB "C:/Program Files/mingw64/bin/gcc-ranlib.exe")
25 | set(CMAKE_LINKER "C:/Program Files/mingw64/bin/ld.exe")
26 | set(CMAKE_MT "")
27 | set(CMAKE_COMPILER_IS_GNUCC 1)
28 | set(CMAKE_C_COMPILER_LOADED 1)
29 | set(CMAKE_C_COMPILER_WORKS TRUE)
30 | set(CMAKE_C_ABI_COMPILED TRUE)
31 | set(CMAKE_COMPILER_IS_MINGW 1)
32 | set(CMAKE_COMPILER_IS_CYGWIN )
33 | if(CMAKE_COMPILER_IS_CYGWIN)
34 | set(CYGWIN 1)
35 | set(UNIX 1)
36 | endif()
37 |
38 | set(CMAKE_C_COMPILER_ENV_VAR "CC")
39 |
40 | if(CMAKE_COMPILER_IS_MINGW)
41 | set(MINGW 1)
42 | endif()
43 | set(CMAKE_C_COMPILER_ID_RUN 1)
44 | set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m)
45 | set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC)
46 | set(CMAKE_C_LINKER_PREFERENCE 10)
47 |
48 | # Save compiler ABI information.
49 | set(CMAKE_C_SIZEOF_DATA_PTR "8")
50 | set(CMAKE_C_COMPILER_ABI "")
51 | set(CMAKE_C_BYTE_ORDER "LITTLE_ENDIAN")
52 | set(CMAKE_C_LIBRARY_ARCHITECTURE "")
53 |
54 | if(CMAKE_C_SIZEOF_DATA_PTR)
55 | set(CMAKE_SIZEOF_VOID_P "${CMAKE_C_SIZEOF_DATA_PTR}")
56 | endif()
57 |
58 | if(CMAKE_C_COMPILER_ABI)
59 | set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_C_COMPILER_ABI}")
60 | endif()
61 |
62 | if(CMAKE_C_LIBRARY_ARCHITECTURE)
63 | set(CMAKE_LIBRARY_ARCHITECTURE "")
64 | endif()
65 |
66 | set(CMAKE_C_CL_SHOWINCLUDES_PREFIX "")
67 | if(CMAKE_C_CL_SHOWINCLUDES_PREFIX)
68 | set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_C_CL_SHOWINCLUDES_PREFIX}")
69 | endif()
70 |
71 |
72 |
73 |
74 |
75 | set(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES "C:/Program Files/mingw64/lib/gcc/x86_64-w64-mingw32/8.1.0/include;C:/Program Files/mingw64/lib/gcc/x86_64-w64-mingw32/8.1.0/include-fixed;C:/Program Files/mingw64/x86_64-w64-mingw32/include")
76 | set(CMAKE_C_IMPLICIT_LINK_LIBRARIES "mingw32;gcc;moldname;mingwex;pthread;advapi32;shell32;user32;kernel32;iconv;mingw32;gcc;moldname;mingwex")
77 | set(CMAKE_C_IMPLICIT_LINK_DIRECTORIES "C:/Program Files/mingw64/lib/gcc/x86_64-w64-mingw32/8.1.0;C:/Program Files/mingw64/lib/gcc;C:/Program Files/mingw64/x86_64-w64-mingw32/lib;C:/Program Files/mingw64/lib")
78 | set(CMAKE_C_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "")
79 |
--------------------------------------------------------------------------------
/demo/ONNXRuntime/onnx_inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import argparse
6 | import os
7 |
8 | import cv2
9 | import numpy as np
10 |
11 | import onnxruntime
12 |
13 | from yolox.data.data_augment import preproc as preprocess
14 | from yolox.data.datasets import COCO_CLASSES
15 | from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
16 |
17 |
18 | def make_parser():
19 | parser = argparse.ArgumentParser("onnxruntime inference sample")
20 | parser.add_argument(
21 | "-m",
22 | "--model",
23 | type=str,
24 | default="yolox.onnx",
25 | help="Input your onnx model.",
26 | )
27 | parser.add_argument(
28 | "-i",
29 | "--image_path",
30 | type=str,
31 | default='test_image.png',
32 | help="Path to your input image.",
33 | )
34 | parser.add_argument(
35 | "-o",
36 | "--output_dir",
37 | type=str,
38 | default='demo_output',
39 | help="Path to your output directory.",
40 | )
41 | parser.add_argument(
42 | "-s",
43 | "--score_thr",
44 | type=float,
45 | default=0.3,
46 | help="Score threshould to filter the result.",
47 | )
48 | parser.add_argument(
49 | "--input_shape",
50 | type=str,
51 | default="640,640",
52 | help="Specify an input shape for inference.",
53 | )
54 | parser.add_argument(
55 | "--with_p6",
56 | action="store_true",
57 | help="Whether your model uses p6 in FPN/PAN.",
58 | )
59 | return parser
60 |
61 |
62 | if __name__ == '__main__':
63 | args = make_parser().parse_args()
64 |
65 | input_shape = tuple(map(int, args.input_shape.split(',')))
66 | origin_img = cv2.imread(args.image_path)
67 | mean = (0.485, 0.456, 0.406)
68 | std = (0.229, 0.224, 0.225)
69 | img, ratio = preprocess(origin_img, input_shape, mean, std)
70 |
71 | session = onnxruntime.InferenceSession(args.model)
72 |
73 | ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
74 | output = session.run(None, ort_inputs)
75 | predictions = demo_postprocess(output[0], input_shape, p6=args.with_p6)[0]
76 |
77 | boxes = predictions[:, :4]
78 | scores = predictions[:, 4:5] * predictions[:, 5:]
79 |
80 | boxes_xyxy = np.ones_like(boxes)
81 | boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2.
82 | boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2.
83 | boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
84 | boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
85 | boxes_xyxy /= ratio
86 | dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
87 | if dets is not None:
88 | final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
89 | origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
90 | conf=args.score_thr, class_names=COCO_CLASSES)
91 |
92 | mkdir(args.output_dir)
93 | output_path = os.path.join(args.output_dir, args.image_path.split("/")[-1])
94 | cv2.imwrite(output_path, origin_img)
95 |
--------------------------------------------------------------------------------
/yolox/utils/allreduce_norm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import pickle
6 | from collections import OrderedDict
7 |
8 | import torch
9 | from torch import distributed as dist
10 | from torch import nn
11 |
12 | from .dist import _get_global_gloo_group, get_world_size
13 |
14 | ASYNC_NORM = (
15 | nn.BatchNorm1d,
16 | nn.BatchNorm2d,
17 | nn.BatchNorm3d,
18 | nn.InstanceNorm1d,
19 | nn.InstanceNorm2d,
20 | nn.InstanceNorm3d,
21 | )
22 |
23 | __all__ = [
24 | "get_async_norm_states", "pyobj2tensor", "tensor2pyobj", "all_reduce", "all_reduce_norm"
25 | ]
26 |
27 |
28 | def get_async_norm_states(module):
29 | async_norm_states = OrderedDict()
30 | for name, child in module.named_modules():
31 | if isinstance(child, ASYNC_NORM):
32 | for k, v in child.state_dict().items():
33 | async_norm_states[".".join([name, k])] = v
34 | return async_norm_states
35 |
36 |
37 | def pyobj2tensor(pyobj, device="cuda"):
38 | """serialize picklable python object to tensor"""
39 | storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
40 | return torch.ByteTensor(storage).to(device=device)
41 |
42 |
43 | def tensor2pyobj(tensor):
44 | """deserialize tensor to picklable python object"""
45 | return pickle.loads(tensor.cpu().numpy().tobytes())
46 |
47 |
48 | def _get_reduce_op(op_name):
49 | return {
50 | "sum": dist.ReduceOp.SUM,
51 | "mean": dist.ReduceOp.SUM,
52 | }[op_name.lower()]
53 |
54 |
55 | def all_reduce(py_dict, op="sum", group=None):
56 | """
57 | Apply all reduce function for python dict object.
58 | NOTE: make sure that every py_dict has the same keys and values are in the same shape.
59 |
60 | Args:
61 | py_dict (dict): dict to apply all reduce op.
62 | op (str): operator, could be "sum" or "mean".
63 | """
64 | world_size = get_world_size()
65 | if world_size == 1:
66 | return py_dict
67 | if group is None:
68 | group = _get_global_gloo_group()
69 | if dist.get_world_size(group) == 1:
70 | return py_dict
71 |
72 | # all reduce logic across different devices.
73 | py_key = list(py_dict.keys())
74 | py_key_tensor = pyobj2tensor(py_key)
75 | dist.broadcast(py_key_tensor, src=0)
76 | py_key = tensor2pyobj(py_key_tensor)
77 |
78 | tensor_shapes = [py_dict[k].shape for k in py_key]
79 | tensor_numels = [py_dict[k].numel() for k in py_key]
80 |
81 | flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
82 | dist.all_reduce(flatten_tensor, op=_get_reduce_op(op))
83 | if op == "mean":
84 | flatten_tensor /= world_size
85 |
86 | split_tensors = [
87 | x.reshape(shape) for x, shape in zip(
88 | torch.split(flatten_tensor, tensor_numels), tensor_shapes
89 | )
90 | ]
91 | return OrderedDict({k: v for k, v in zip(py_key, split_tensors)})
92 |
93 |
94 | def all_reduce_norm(module):
95 | """
96 | All reduce norm statistics in different devices.
97 | """
98 | states = get_async_norm_states(module)
99 | states = all_reduce(states, op="mean")
100 | module.load_state_dict(states, strict=False)
101 |
--------------------------------------------------------------------------------
/exps/default/yolov3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import os
6 | import torch
7 | import torch.nn as nn
8 |
9 | from yolox.exp import Exp as MyExp
10 |
11 |
12 | class Exp(MyExp):
13 | def __init__(self):
14 | super(Exp, self).__init__()
15 | self.depth = 1.0
16 | self.width = 1.0
17 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
18 |
19 | def get_model(self, sublinear=False):
20 | def init_yolo(M):
21 | for m in M.modules():
22 | if isinstance(m, nn.BatchNorm2d):
23 | m.eps = 1e-3
24 | m.momentum = 0.03
25 | if "model" not in self.__dict__:
26 | from yolox.models import YOLOX, YOLOFPN, YOLOXHead
27 | backbone = YOLOFPN()
28 | head = YOLOXHead(self.num_classes, self.width, in_channels=[128, 256, 512], act="lrelu")
29 | self.model = YOLOX(backbone, head)
30 | self.model.apply(init_yolo)
31 | self.model.head.initialize_biases(1e-2)
32 |
33 | return self.model
34 |
35 | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
36 | from data.datasets.cocodataset import COCODataset
37 | from data.datasets.mosaicdetection import MosaicDetection
38 | from data.datasets.data_augment import TrainTransform
39 | from data.datasets.dataloading import YoloBatchSampler, DataLoader, InfiniteSampler
40 | import torch.distributed as dist
41 |
42 | dataset = COCODataset(
43 | data_dir='data/COCO/',
44 | json_file=self.train_ann,
45 | img_size=self.input_size,
46 | preproc=TrainTransform(
47 | rgb_means=(0.485, 0.456, 0.406),
48 | std=(0.229, 0.224, 0.225),
49 | max_labels=50
50 | ),
51 | )
52 |
53 | dataset = MosaicDetection(
54 | dataset,
55 | mosaic=not no_aug,
56 | img_size=self.input_size,
57 | preproc=TrainTransform(
58 | rgb_means=(0.485, 0.456, 0.406),
59 | std=(0.229, 0.224, 0.225),
60 | max_labels=120
61 | ),
62 | degrees=self.degrees,
63 | translate=self.translate,
64 | scale=self.scale,
65 | shear=self.shear,
66 | perspective=self.perspective,
67 | )
68 |
69 | self.dataset = dataset
70 |
71 | if is_distributed:
72 | batch_size = batch_size // dist.get_world_size()
73 | sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)
74 | else:
75 | sampler = torch.utils.data.RandomSampler(self.dataset)
76 |
77 | batch_sampler = YoloBatchSampler(
78 | sampler=sampler,
79 | batch_size=batch_size,
80 | drop_last=False,
81 | input_dimension=self.input_size,
82 | mosaic=not no_aug
83 | )
84 |
85 | dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
86 | dataloader_kwargs["batch_sampler"] = batch_sampler
87 | train_loader = DataLoader(self.dataset, **dataloader_kwargs)
88 |
89 | return train_loader
90 |
--------------------------------------------------------------------------------
/demo/ncnn/cpp/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-CPP-ncnn
2 |
3 | Cpp file compile of YOLOX object detection base on [ncnn](https://github.com/Tencent/ncnn).
4 |
5 | ## Tutorial
6 |
7 | ### Step1
8 | Clone [ncnn](https://github.com/Tencent/ncnn) first, then please following [build tutorial of ncnn](https://github.com/Tencent/ncnn/wiki/how-to-build) to build on your own device.
9 |
10 | ### Step2
11 | Use provided tools to generate onnx file.
12 | For example, if you want to generate onnx file of yolox-s, please run the following command:
13 | ```shell
14 | cd
15 | python3 tools/export_onnx.py -n yolox-s
16 | ```
17 | Then, a yolox.onnx file is generated.
18 |
19 | ### Step3
20 | Generate ncnn param and bin file.
21 | ```shell
22 | cd
23 | cd build/tools/ncnn
24 | ./onnx2ncnn yolox.onnx yolox.param yolox.bin
25 | ```
26 |
27 | Since Focus module is not supported in ncnn. Warnings like:
28 | ```shell
29 | Unsupported slice step !
30 | ```
31 | will be printed. However, don't worry! C++ version of Focus layer is already implemented in yolox.cpp.
32 |
33 | ### Step4
34 | Open **yolox.param**, and modify it.
35 | Before (just an example):
36 | ```
37 | 295 328
38 | Input images 0 1 images
39 | Split splitncnn_input0 1 4 images images_splitncnn_0 images_splitncnn_1 images_splitncnn_2 images_splitncnn_3
40 | Crop Slice_4 1 1 images_splitncnn_3 647 -23309=1,0 -23310=1,2147483647 -23311=1,1
41 | Crop Slice_9 1 1 647 652 -23309=1,0 -23310=1,2147483647 -23311=1,2
42 | Crop Slice_14 1 1 images_splitncnn_2 657 -23309=1,0 -23310=1,2147483647 -23311=1,1
43 | Crop Slice_19 1 1 657 662 -23309=1,1 -23310=1,2147483647 -23311=1,2
44 | Crop Slice_24 1 1 images_splitncnn_1 667 -23309=1,1 -23310=1,2147483647 -23311=1,1
45 | Crop Slice_29 1 1 667 672 -23309=1,0 -23310=1,2147483647 -23311=1,2
46 | Crop Slice_34 1 1 images_splitncnn_0 677 -23309=1,1 -23310=1,2147483647 -23311=1,1
47 | Crop Slice_39 1 1 677 682 -23309=1,1 -23310=1,2147483647 -23311=1,2
48 | Concat Concat_40 4 1 652 672 662 682 683 0=0
49 | ...
50 | ```
51 | * Change first number for 295 to 295 - 9 = 286(since we will remove 10 layers and add 1 layers, total layers number should minus 9).
52 | * Then remove 10 lines of code from Split to Concat, but remember the last but 2nd number: 683.
53 | * Add YoloV5Focus layer After Input (using previous number 683):
54 | ```
55 | YoloV5Focus focus 1 1 images 683
56 | ```
57 | After(just an exmaple):
58 | ```
59 | 286 328
60 | Input images 0 1 images
61 | YoloV5Focus focus 1 1 images 683
62 | ...
63 | ```
64 |
65 | ### Step5
66 | Use onnx_optimze to generate new param and bin:
67 | ```shell
68 | # suppose you are still under ncnn/build/tools/ncnn dir.
69 | ../ncnnoptimize model.param model.bin yolox.param yolox.bin 65536
70 | ```
71 |
72 | ### Step6
73 | Copy or Move yolox.cpp file into ncnn/examples, modify the CMakeList.txt, then build yolox
74 |
75 | ### Step7
76 | Inference image with executable file yolox, enjoy the detect result:
77 | ```shell
78 | ./yolox demo.jpg
79 | ```
80 |
81 | ## Acknowledgement
82 |
83 | * [ncnn](https://github.com/Tencent/ncnn)
84 |
--------------------------------------------------------------------------------
/tools/export_onnx.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import argparse
6 | import os
7 | from loguru import logger
8 |
9 | import torch
10 | from torch import nn
11 |
12 | from yolox.exp import get_exp
13 | from yolox.models.network_blocks import SiLU
14 | from yolox.utils import replace_module
15 |
16 |
17 | def make_parser():
18 | parser = argparse.ArgumentParser("YOLOX onnx deploy")
19 | parser.add_argument("--output-name", type=str, default="yolox_m.onnx",
20 | help="output name of models")
21 | parser.add_argument("--input", default="input", type=str,
22 | help="input node name of onnx model")
23 | parser.add_argument("--output", default="output", type=str,
24 | help="output node name of onnx model")
25 | parser.add_argument("-o", "--opset", default=11, type=int,
26 | help="onnx opset version")
27 | parser.add_argument("--no-onnxsim", action="store_true",
28 | help="use onnxsim or not")
29 | parser.add_argument("-f", "--exp_file", default='../exps/default/yolox_m.py', type=str,
30 | help="expriment description file",)
31 | parser.add_argument("-expn", "--experiment-name", type=str, default=None)
32 | parser.add_argument("-n", "--name", type=str, default='yolox_m',
33 | help="model name")
34 | parser.add_argument("-c", "--ckpt", default='E:/yolox_m.pth.tar', type=str,
35 | help="ckpt path")
36 | parser.add_argument("opts", help="Modify config options using the command-line",
37 | default=None, nargs=argparse.REMAINDER,)
38 |
39 | return parser
40 |
41 |
42 | @logger.catch
43 | def main():
44 | args = make_parser().parse_args()
45 | logger.info("args value: {}".format(args))
46 | exp = get_exp(args.exp_file, args.name)
47 | exp.merge(args.opts)
48 |
49 | if not args.experiment_name:
50 | args.experiment_name = exp.exp_name
51 |
52 | model = exp.get_model()
53 | if args.ckpt is None:
54 | file_name = os.path.join(exp.output_dir, args.experiment_name)
55 | ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
56 | else:
57 | ckpt_file = args.ckpt
58 |
59 | # load the model state dict
60 | ckpt = torch.load(ckpt_file, map_location="cpu")
61 |
62 | model.eval()
63 | if "model" in ckpt:
64 | ckpt = ckpt["model"]
65 | model.load_state_dict(ckpt)
66 | model = replace_module(model, nn.SiLU, SiLU)
67 | model.head.decode_in_inference = False
68 |
69 | logger.info("loading checkpoint done.")
70 | dummy_input = torch.randn(1, 3, exp.test_size[0], exp.test_size[1])
71 | torch.onnx._export(
72 | model,
73 | dummy_input,
74 | args.output_name,
75 | input_names=[args.input],
76 | output_names=[args.output],
77 | opset_version=args.opset,
78 | )
79 | logger.info("generated onnx model named {}".format(args.output_name))
80 |
81 | if not args.no_onnxsim:
82 | import onnx
83 | from onnxsim import simplify
84 |
85 | # use onnxsimplify to reduce reduent model.
86 | onnx_model = onnx.load(args.output_name)
87 | model_simp, check = simplify(onnx_model)
88 | assert check, "Simplified ONNX model could not be validated"
89 | onnx.save(model_simp, args.output_name)
90 | logger.info("generated simplified onnx model named {}".format(args.output_name))
91 |
92 |
93 | if __name__ == "__main__":
94 | main()
95 |
--------------------------------------------------------------------------------
/yolox/utils/metric.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 | import functools
5 | import os
6 | import time
7 | from collections import defaultdict, deque
8 |
9 | import numpy as np
10 |
11 | import torch
12 |
13 | __all__ = [
14 | "AverageMeter",
15 | "MeterBuffer",
16 | "get_total_and_free_memory_in_Mb",
17 | "occupy_mem",
18 | "gpu_mem_usage",
19 | ]
20 |
21 |
22 | def get_total_and_free_memory_in_Mb(cuda_device):
23 | devices_info_str = os.popen(
24 | "nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader"
25 | )
26 | devices_info = devices_info_str.read().strip().split("\n")
27 | total, used = devices_info[int(cuda_device)].split(",")
28 | return int(total), int(used)
29 |
30 |
31 | def occupy_mem(cuda_device, mem_ratio=0.9):
32 | """
33 | pre-allocate gpu memory for training to avoid memory Fragmentation.
34 | """
35 | total, used = get_total_and_free_memory_in_Mb(cuda_device)
36 | max_mem = int(total * mem_ratio)
37 | block_mem = max_mem - used
38 | x = torch.cuda.FloatTensor(256, 1024, block_mem)
39 | del x
40 | time.sleep(5)
41 |
42 |
43 | def gpu_mem_usage():
44 | """
45 | Compute the GPU memory usage for the current device (MB).
46 | """
47 | mem_usage_bytes = torch.cuda.max_memory_allocated()
48 | return mem_usage_bytes / (1024 * 1024)
49 |
50 |
51 | class AverageMeter:
52 | """Track a series of values and provide access to smoothed values over a
53 | window or the global series average.
54 | """
55 |
56 | def __init__(self, window_size=50):
57 | self._deque = deque(maxlen=window_size)
58 | self._total = 0.0
59 | self._count = 0
60 |
61 | def update(self, value):
62 | self._deque.append(value)
63 | self._count += 1
64 | self._total += value
65 |
66 | @property
67 | def median(self):
68 | d = np.array(list(self._deque))
69 | return np.median(d)
70 |
71 | @property
72 | def avg(self):
73 | # if deque is empty, nan will be returned.
74 | d = np.array(list(self._deque))
75 | return d.mean()
76 |
77 | @property
78 | def global_avg(self):
79 | return self._total / max(self._count, 1e-5)
80 |
81 | @property
82 | def latest(self):
83 | return self._deque[-1] if len(self._deque) > 0 else None
84 |
85 | @property
86 | def total(self):
87 | return self._total
88 |
89 | def reset(self):
90 | self._deque.clear()
91 | self._total = 0.0
92 | self._count = 0
93 |
94 | def clear(self):
95 | self._deque.clear()
96 |
97 |
98 | class MeterBuffer(defaultdict):
99 | """Computes and stores the average and current value"""
100 |
101 | def __init__(self, window_size=20):
102 | factory = functools.partial(AverageMeter, window_size=window_size)
103 | super().__init__(factory)
104 |
105 | def reset(self):
106 | for v in self.values():
107 | v.reset()
108 |
109 | def get_filtered_meter(self, filter_key="time"):
110 | return {k: v for k, v in self.items() if filter_key in k}
111 |
112 | def update(self, values=None, **kwargs):
113 | if values is None:
114 | values = {}
115 | values.update(kwargs)
116 | for k, v in values.items():
117 | self[k].update(v)
118 |
119 | def clear_meters(self):
120 | for v in self.values():
121 | v.clear()
122 |
--------------------------------------------------------------------------------
/yolox/data/samplers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import itertools
6 | from typing import Optional
7 |
8 | import torch
9 | import torch.distributed as dist
10 | from torch.utils.data.sampler import BatchSampler as torchBatchSampler
11 | from torch.utils.data.sampler import Sampler
12 |
13 |
14 | class YoloBatchSampler(torchBatchSampler):
15 | """
16 | This batch sampler will generate mini-batches of (dim, index) tuples from another sampler.
17 | It works just like the :class:`torch.utils.data.sampler.BatchSampler`,
18 | but it will prepend a dimension, whilst ensuring it stays the same across one mini-batch.
19 | """
20 |
21 | def __init__(self, *args, input_dimension=None, mosaic=True, **kwargs):
22 | super().__init__(*args, **kwargs)
23 | self.input_dim = input_dimension
24 | self.new_input_dim = None
25 | self.mosaic = mosaic
26 |
27 | def __iter__(self):
28 | self.__set_input_dim()
29 | for batch in super().__iter__():
30 | yield [(self.input_dim, idx, self.mosaic) for idx in batch]
31 | self.__set_input_dim()
32 |
33 | def __set_input_dim(self):
34 | """ This function randomly changes the the input dimension of the dataset. """
35 | if self.new_input_dim is not None:
36 | self.input_dim = (self.new_input_dim[0], self.new_input_dim[1])
37 | self.new_input_dim = None
38 |
39 |
40 | class InfiniteSampler(Sampler):
41 | """
42 | In training, we only care about the "infinite stream" of training data.
43 | So this sampler produces an infinite stream of indices and
44 | all workers cooperate to correctly shuffle the indices and sample different indices.
45 | The samplers in each worker effectively produces `indices[worker_id::num_workers]`
46 | where `indices` is an infinite stream of indices consisting of
47 | `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
48 | or `range(size) + range(size) + ...` (if shuffle is False)
49 | """
50 |
51 | def __init__(
52 | self,
53 | size: int,
54 | shuffle: bool = True,
55 | seed: Optional[int] = 0,
56 | rank=0,
57 | world_size=1,
58 | ):
59 | """
60 | Args:
61 | size (int): the total number of data of the underlying dataset to sample from
62 | shuffle (bool): whether to shuffle the indices or not
63 | seed (int): the initial seed of the shuffle. Must be the same
64 | across all workers. If None, will use a random seed shared
65 | among workers (require synchronization among all workers).
66 | """
67 | self._size = size
68 | assert size > 0
69 | self._shuffle = shuffle
70 | self._seed = int(seed)
71 |
72 | if dist.is_available() and dist.is_initialized():
73 | self._rank = dist.get_rank()
74 | self._world_size = dist.get_world_size()
75 | else:
76 | self._rank = rank
77 | self._world_size = world_size
78 |
79 | def __iter__(self):
80 | start = self._rank
81 | yield from itertools.islice(
82 | self._infinite_indices(), start, None, self._world_size
83 | )
84 |
85 | def _infinite_indices(self):
86 | g = torch.Generator()
87 | g.manual_seed(self._seed)
88 | while True:
89 | if self._shuffle:
90 | yield from torch.randperm(self._size, generator=g)
91 | else:
92 | yield from torch.arange(self._size)
93 |
94 | def __len__(self):
95 | return self._size // self._world_size
96 |
--------------------------------------------------------------------------------
/yolox/utils/model_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | from copy import deepcopy
6 |
7 | import torch
8 | import torch.nn as nn
9 | from thop import profile
10 |
11 | __all__ = [
12 | "fuse_conv_and_bn", "fuse_model", "get_model_info", "replace_module",
13 | ]
14 |
15 |
16 | def get_model_info(model, tsize):
17 |
18 | stride = 64
19 | img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device)
20 | flops, params = profile(deepcopy(model), inputs=(img,), verbose=False)
21 | params /= 1e6
22 | flops /= 1e9
23 | flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops
24 | info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops)
25 | return info
26 |
27 |
28 | def fuse_conv_and_bn(conv, bn):
29 | # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
30 | fusedconv = (
31 | nn.Conv2d(
32 | conv.in_channels,
33 | conv.out_channels,
34 | kernel_size=conv.kernel_size,
35 | stride=conv.stride,
36 | padding=conv.padding,
37 | groups=conv.groups,
38 | bias=True,
39 | )
40 | .requires_grad_(False)
41 | .to(conv.weight.device)
42 | )
43 |
44 | # prepare filters
45 | w_conv = conv.weight.clone().view(conv.out_channels, -1)
46 | w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
47 | fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
48 |
49 | # prepare spatial bias
50 | b_conv = (
51 | torch.zeros(conv.weight.size(0), device=conv.weight.device)
52 | if conv.bias is None
53 | else conv.bias
54 | )
55 | b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(
56 | torch.sqrt(bn.running_var + bn.eps)
57 | )
58 | fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
59 |
60 | return fusedconv
61 |
62 |
63 | def fuse_model(model):
64 | from yolox.models.network_blocks import BaseConv
65 |
66 | for m in model.modules():
67 | if type(m) is BaseConv and hasattr(m, "bn"):
68 | m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
69 | delattr(m, "bn") # remove batchnorm
70 | m.forward = m.fuseforward # update forward
71 | return model
72 |
73 |
74 | def replace_module(module, replaced_module_type, new_module_type, replace_func=None):
75 | """
76 | Replace given type in module to a new type. mostly used in deploy.
77 |
78 | Args:
79 | module (nn.Module): model to apply replace operation.
80 | replaced_module_type (Type): module type to be replaced.
81 | new_module_type (Type)
82 | replace_func (function): python function to describe replace logic. Defalut value None.
83 |
84 | Returns:
85 | model (nn.Module): module that already been replaced.
86 | """
87 | def default_replace_func(replaced_module_type, new_module_type):
88 | return new_module_type()
89 |
90 | if replace_func is None:
91 | replace_func = default_replace_func
92 |
93 | model = module
94 | if isinstance(module, replaced_module_type):
95 | model = replace_func(replaced_module_type, new_module_type)
96 | else: # recurrsively replace
97 | for name, child in module.named_children():
98 | new_child = replace_module(child, replaced_module_type, new_module_type)
99 | if new_child is not child: # child is already replaced
100 | model.add_module(name, new_child)
101 |
102 | return model
103 |
--------------------------------------------------------------------------------
/yolox/layers/csrc/cocoeval/cocoeval.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | #pragma once
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | namespace py = pybind11;
11 |
12 | namespace COCOeval {
13 |
14 | // Annotation data for a single object instance in an image
15 | struct InstanceAnnotation {
16 | InstanceAnnotation(
17 | uint64_t id,
18 | double score,
19 | double area,
20 | bool is_crowd,
21 | bool ignore)
22 | : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {}
23 | uint64_t id;
24 | double score = 0.;
25 | double area = 0.;
26 | bool is_crowd = false;
27 | bool ignore = false;
28 | };
29 |
30 | // Stores intermediate results for evaluating detection results for a single
31 | // image that has D detected instances and G ground truth instances. This stores
32 | // matches between detected and ground truth instances
33 | struct ImageEvaluation {
34 | // For each of the D detected instances, the id of the matched ground truth
35 | // instance, or 0 if unmatched
36 | std::vector detection_matches;
37 |
38 | // The detection score of each of the D detected instances
39 | std::vector detection_scores;
40 |
41 | // Marks whether or not each of G instances was ignored from evaluation (e.g.,
42 | // because it's outside area_range)
43 | std::vector ground_truth_ignores;
44 |
45 | // Marks whether or not each of D instances was ignored from evaluation (e.g.,
46 | // because it's outside aRng)
47 | std::vector detection_ignores;
48 | };
49 |
50 | template
51 | using ImageCategoryInstances = std::vector>>;
52 |
53 | // C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each
54 | // combination of image, category, area range settings, and IOU thresholds to
55 | // evaluate, it matches detected instances to ground truth instances and stores
56 | // the results into a vector of ImageEvaluation results, which will be
57 | // interpreted by the COCOeval::Accumulate() function to produce precion-recall
58 | // curves. The parameters of nested vectors have the following semantics:
59 | // image_category_ious[i][c][d][g] is the intersection over union of the d'th
60 | // detected instance and g'th ground truth instance of
61 | // category category_ids[c] in image image_ids[i]
62 | // image_category_ground_truth_instances[i][c] is a vector of ground truth
63 | // instances in image image_ids[i] of category category_ids[c]
64 | // image_category_detection_instances[i][c] is a vector of detected
65 | // instances in image image_ids[i] of category category_ids[c]
66 | std::vector EvaluateImages(
67 | const std::vector>& area_ranges, // vector of 2-tuples
68 | int max_detections,
69 | const std::vector& iou_thresholds,
70 | const ImageCategoryInstances>& image_category_ious,
71 | const ImageCategoryInstances&
72 | image_category_ground_truth_instances,
73 | const ImageCategoryInstances&
74 | image_category_detection_instances);
75 |
76 | // C++ implementation of COCOeval.accumulate(), which generates precision
77 | // recall curves for each set of category, IOU threshold, detection area range,
78 | // and max number of detections parameters. It is assumed that the parameter
79 | // evaluations is the return value of the functon COCOeval::EvaluateImages(),
80 | // which was called with the same parameter settings params
81 | py::dict Accumulate(
82 | const py::object& params,
83 | const std::vector& evalutations);
84 |
85 | } // namespace COCOeval
86 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/Makefile.cmake:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "MinGW Makefiles" Generator, CMake Version 3.20
3 |
4 | # The generator used is:
5 | set(CMAKE_DEPENDS_GENERATOR "MinGW Makefiles")
6 |
7 | # The top level Makefile was generated from the following files:
8 | set(CMAKE_MAKEFILE_DEPENDS
9 | "CMakeCache.txt"
10 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeCInformation.cmake"
11 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeCXXInformation.cmake"
12 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeCommonLanguageInclude.cmake"
13 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeExtraGeneratorDetermineCompilerMacrosAndIncludeDirs.cmake"
14 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeFindCodeBlocks.cmake"
15 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeGenericSystem.cmake"
16 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeInitializeConfigs.cmake"
17 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeLanguageInformation.cmake"
18 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeRCInformation.cmake"
19 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeSystemSpecificInformation.cmake"
20 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/CMakeSystemSpecificInitialize.cmake"
21 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Compiler/CMakeCommonCompilerMacros.cmake"
22 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Compiler/GNU-C.cmake"
23 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Compiler/GNU-CXX.cmake"
24 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Compiler/GNU.cmake"
25 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/FindCUDA.cmake"
26 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/FindCUDA/select_compute_arch.cmake"
27 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/FindPackageHandleStandardArgs.cmake"
28 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/FindPackageMessage.cmake"
29 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows-GNU-C-ABI.cmake"
30 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows-GNU-C.cmake"
31 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows-GNU-CXX-ABI.cmake"
32 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows-GNU-CXX.cmake"
33 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows-GNU.cmake"
34 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows-windres.cmake"
35 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/Windows.cmake"
36 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/Platform/WindowsPaths.cmake"
37 | "D:/CLion 2021.2/bin/cmake/win/share/cmake-3.20/Modules/ProcessorCount.cmake"
38 | "D:/opencv/build/x64/MinGW/install/OpenCVConfig-version.cmake"
39 | "D:/opencv/build/x64/MinGW/install/OpenCVConfig.cmake"
40 | "D:/opencv/build/x64/MinGW/install/x64/mingw/lib/OpenCVConfig.cmake"
41 | "D:/opencv/build/x64/MinGW/install/x64/mingw/lib/OpenCVModules-release.cmake"
42 | "D:/opencv/build/x64/MinGW/install/x64/mingw/lib/OpenCVModules.cmake"
43 | "../CMakeLists.txt"
44 | "CMakeFiles/3.20.2/CMakeCCompiler.cmake"
45 | "CMakeFiles/3.20.2/CMakeCXXCompiler.cmake"
46 | "CMakeFiles/3.20.2/CMakeRCCompiler.cmake"
47 | "CMakeFiles/3.20.2/CMakeSystem.cmake"
48 | )
49 |
50 | # The corresponding makefile is:
51 | set(CMAKE_MAKEFILE_OUTPUTS
52 | "Makefile"
53 | "CMakeFiles/cmake.check_cache"
54 | )
55 |
56 | # Byproducts of CMake generate step:
57 | set(CMAKE_MAKEFILE_PRODUCTS
58 | "CMakeFiles/CMakeDirectoryInformation.cmake"
59 | )
60 |
61 | # Dependency information for all targets:
62 | set(CMAKE_DEPEND_INFO_FILES
63 | "CMakeFiles/yolox.dir/DependInfo.cmake"
64 | )
65 |
--------------------------------------------------------------------------------
/demo/ONNXRuntime/README.md:
--------------------------------------------------------------------------------
1 | ## YOLOX-ONNXRuntime in Python
2 |
3 | This doc introduces how to convert your pytorch model into onnx, and how to run an onnxruntime demo to verify your convertion.
4 |
5 | ### Download ONNX models.
6 | | Model | Parameters | GFLOPs | Test Size | mAP | Weights |
7 | |:------| :----: | :----: | :---: | :---: | :---: |
8 | | YOLOX-Nano | 0.91M | 1.08 | 416x416 | 25.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EfAGwvevU-lNhW5OqFAyHbwBJdI_7EaKu5yU04fgF5BU7w?e=gvq4hf)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano.onnx) |
9 | | YOLOX-Tiny | 5.06M | 6.45 | 416x416 |31.7 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EVigCszU1ilDn-MwLwHCF1ABsgTy06xFdVgZ04Yyo4lHVA?e=hVKiCw)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny.onnx) |
10 | | YOLOX-S | 9.0M | 26.8 | 640x640 |39.6 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/Ec0L1d1x2UtIpbfiahgxhtgBZVjb1NCXbotO8SCOdMqpQQ?e=siyIsK)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s.onnx) |
11 | | YOLOX-M | 25.3M | 73.8 | 640x640 |46.4 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ERUKlQe-nlxBoTKPy1ynbxsBmAZ_h-VBEV-nnfPdzUIkZQ?e=hyQQtl)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_m.onnx) |
12 | | YOLOX-L | 54.2M | 155.6 | 640x640 |50.0 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ET5w926jCA5GlVfg9ixB4KEBiW0HYl7SzaHNRaRG9dYO_A?e=ISmCYX)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_l.onnx) |
13 | | YOLOX-Darknet53| 63.72M | 185.3 | 640x640 |47.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ESArloSW-MlPlLuemLh9zKkBdovgweKbfu4zkvzKAp7pPQ?e=f81Ikw)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_darknet53.onnx) |
14 | | YOLOX-X | 99.1M | 281.9 | 640x640 |51.2 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ERjqoeMJlFdGuM3tQfXQmhABmGHlIHydWCwhlugeWLE9AA)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox.onnx) |
15 |
16 |
17 | ### Convert Your Model to ONNX
18 |
19 | First, you should move to by:
20 | ```shell
21 | cd
22 | ```
23 | Then, you can:
24 |
25 | 1. Convert a standard YOLOX model by -n:
26 | ```shell
27 | python3 tools/export_onnx.py --output-name yolox_s.onnx -n yolox-s -c yolox_s.pth.tar
28 | ```
29 | Notes:
30 | * -n: specify a model name. The model name must be one of the [yolox-s,m,l,x and yolox-nane, yolox-tiny, yolov3]
31 | * -c: the model you have trained
32 | * -o: opset version, default 11. **However, if you will further convert your onnx model to [OpenVINO](../OpenVINO/), please specify the opset version to 10.**
33 | * --no-onnxsim: disable onnxsim
34 | * To customize an input shape for onnx model, modify the following code in tools/export.py:
35 |
36 | ```python
37 | dummy_input = torch.randn(1, 3, exp.test_size[0], exp.test_size[1])
38 | ```
39 |
40 | 2. Convert a standard YOLOX model by -f. When using -f, the above command is equivalent to:
41 |
42 | ```shell
43 | python3 tools/export_onnx.py --output-name yolox_s.onnx -f exps/default/yolox_s.py -c yolox_s.pth.tar
44 | ```
45 |
46 | 3. To convert your customized model, please use -f:
47 |
48 | ```shell
49 | python3 tools/export_onnx.py --output-name your_yolox.onnx -f exps/your_dir/your_yolox.py -c your_yolox.pth.tar
50 | ```
51 |
52 | ### ONNXRuntime Demo
53 |
54 | Step1.
55 | ```shell
56 | cd /demo/ONNXRuntime
57 | ```
58 |
59 | Step2.
60 | ```shell
61 | python3 onnx_inference.py -m -i -o -s 0.3 --input_shape 640,640
62 | ```
63 | Notes:
64 | * -m: your converted onnx model
65 | * -i: input_image
66 | * -s: score threshold for visualization.
67 | * --input_shape: should be consistent with the shape you used for onnx convertion.
68 |
--------------------------------------------------------------------------------
/yolox/models/yolo_pafpn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import torch
6 | import torch.nn as nn
7 |
8 | from .darknet import CSPDarknet
9 | from .network_blocks import BaseConv, CSPLayer, DWConv
10 |
11 |
12 | class YOLOPAFPN(nn.Module):
13 | """
14 | YOLOv3 model. Darknet 53 is the default backbone of this model.
15 | """
16 |
17 | def __init__(
18 | self, depth=1.0, width=1.0, in_features=("dark3", "dark4", "dark5"),
19 | in_channels=[256, 512, 1024], depthwise=False, act="silu",
20 | ):
21 | super().__init__()
22 | self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
23 | self.in_features = in_features
24 | self.in_channels = in_channels
25 | Conv = DWConv if depthwise else BaseConv
26 |
27 | self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
28 | self.lateral_conv0 = BaseConv(
29 | int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act
30 | )
31 | self.C3_p4 = CSPLayer(
32 | int(2 * in_channels[1] * width),
33 | int(in_channels[1] * width),
34 | round(3 * depth),
35 | False,
36 | depthwise=depthwise,
37 | act=act,
38 | ) # cat
39 |
40 | self.reduce_conv1 = BaseConv(
41 | int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act
42 | )
43 | self.C3_p3 = CSPLayer(
44 | int(2 * in_channels[0] * width),
45 | int(in_channels[0] * width),
46 | round(3 * depth),
47 | False,
48 | depthwise=depthwise,
49 | act=act,
50 | )
51 |
52 | # bottom-up conv
53 | self.bu_conv2 = Conv(
54 | int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act
55 | )
56 | self.C3_n3 = CSPLayer(
57 | int(2 * in_channels[0] * width),
58 | int(in_channels[1] * width),
59 | round(3 * depth),
60 | False,
61 | depthwise=depthwise,
62 | act=act,
63 | )
64 |
65 | # bottom-up conv
66 | self.bu_conv1 = Conv(
67 | int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act
68 | )
69 | self.C3_n4 = CSPLayer(
70 | int(2 * in_channels[1] * width),
71 | int(in_channels[2] * width),
72 | round(3 * depth),
73 | False,
74 | depthwise=depthwise,
75 | act=act,
76 | )
77 |
78 | def forward(self, input):
79 | """
80 | Args:
81 | inputs: input images.
82 |
83 | Returns:
84 | Tuple[Tensor]: FPN feature.
85 | """
86 |
87 | # backbone
88 | out_features = self.backbone(input)
89 | features = [out_features[f] for f in self.in_features]
90 | [x2, x1, x0] = features
91 |
92 | fpn_out0 = self.lateral_conv0(x0) # 1024->512/32
93 | f_out0 = self.upsample(fpn_out0) # 512/16
94 | f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16
95 | f_out0 = self.C3_p4(f_out0) # 1024->512/16
96 |
97 | fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16
98 | f_out1 = self.upsample(fpn_out1) # 256/8
99 | f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8
100 | pan_out2 = self.C3_p3(f_out1) # 512->256/8
101 |
102 | p_out1 = self.bu_conv2(pan_out2) # 256->256/16
103 | p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16
104 | pan_out1 = self.C3_n3(p_out1) # 512->512/16
105 |
106 | p_out0 = self.bu_conv1(pan_out1) # 512->512/32
107 | p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32
108 | pan_out0 = self.C3_n4(p_out0) # 1024->1024/32
109 |
110 | outputs = (pan_out2, pan_out1, pan_out0)
111 | return outputs
112 |
--------------------------------------------------------------------------------
/yolox/core/launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Code are based on
4 | # https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py
5 | # Copyright (c) Facebook, Inc. and its affiliates.
6 | # Copyright (c) Megvii, Inc. and its affiliates.
7 |
8 | from loguru import logger
9 |
10 | import torch
11 | import torch.distributed as dist
12 | import torch.multiprocessing as mp
13 |
14 | import yolox.utils.dist as comm
15 |
16 | __all__ = ["launch"]
17 |
18 |
19 | def _find_free_port():
20 | """
21 | Find an available port of current machine / node.
22 | """
23 | import socket
24 |
25 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
26 | # Binding to port 0 will cause the OS to find an available port for us
27 | sock.bind(("", 0))
28 | port = sock.getsockname()[1]
29 | sock.close()
30 | # NOTE: there is still a chance the port could be taken by other processes.
31 | return port
32 |
33 |
34 | def launch(
35 | main_func, num_gpus_per_machine, num_machines=1, machine_rank=0,
36 | backend="nccl", dist_url=None, args=()
37 | ):
38 | """
39 | Args:
40 | main_func: a function that will be called by `main_func(*args)`
41 | num_machines (int): the total number of machines
42 | machine_rank (int): the rank of this machine (one per machine)
43 | dist_url (str): url to connect to for distributed training, including protocol
44 | e.g. "tcp://127.0.0.1:8686".
45 | Can be set to auto to automatically select a free port on localhost
46 | args (tuple): arguments passed to main_func
47 | """
48 | world_size = num_machines * num_gpus_per_machine
49 | if world_size > 1:
50 | # https://github.com/pytorch/pytorch/pull/14391
51 | # TODO prctl in spawned processes
52 |
53 | if dist_url == "auto":
54 | assert num_machines == 1, "dist_url=auto cannot work with distributed training."
55 | port = _find_free_port()
56 | dist_url = f"tcp://127.0.0.1:{port}"
57 |
58 | mp.spawn(
59 | _distributed_worker,
60 | nprocs=num_gpus_per_machine,
61 | args=(
62 | main_func, world_size, num_gpus_per_machine,
63 | machine_rank, backend, dist_url, args
64 | ),
65 | daemon=False,
66 | )
67 | else:
68 | main_func(*args)
69 |
70 |
71 | def _distributed_worker(
72 | local_rank, main_func, world_size, num_gpus_per_machine,
73 | machine_rank, backend, dist_url, args
74 | ):
75 | assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
76 | global_rank = machine_rank * num_gpus_per_machine + local_rank
77 | logger.info("Rank {} initialization finished.".format(global_rank))
78 | try:
79 | dist.init_process_group(
80 | backend=backend,
81 | init_method=dist_url,
82 | world_size=world_size,
83 | rank=global_rank,
84 | )
85 | except Exception:
86 | logger.error("Process group URL: {}".format(dist_url))
87 | raise
88 | # synchronize is needed here to prevent a possible timeout after calling init_process_group
89 | # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
90 | comm.synchronize()
91 |
92 | assert num_gpus_per_machine <= torch.cuda.device_count()
93 | torch.cuda.set_device(local_rank)
94 |
95 | # Setup the local process group (which contains ranks within the same machine)
96 | assert comm._LOCAL_PROCESS_GROUP is None
97 | num_machines = world_size // num_gpus_per_machine
98 | for i in range(num_machines):
99 | ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
100 | pg = dist.new_group(ranks_on_i)
101 | if i == machine_rank:
102 | comm._LOCAL_PROCESS_GROUP = pg
103 |
104 | main_func(*args)
105 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/models/yolo_pafpn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- encoding: utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import megengine.module as M
6 | import megengine.functional as F
7 |
8 | from .darknet import CSPDarknet
9 | from .network_blocks import BaseConv, CSPLayer, DWConv, UpSample
10 |
11 |
12 | class YOLOPAFPN(M.Module):
13 | """
14 | YOLOv3 model. Darknet 53 is the default backbone of this model.
15 | """
16 |
17 | def __init__(
18 | self, depth=1.0, width=1.0, in_features=("dark3", "dark4", "dark5"),
19 | in_channels=[256, 512, 1024], depthwise=False, act="silu",
20 | ):
21 | super().__init__()
22 | self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
23 | self.in_features = in_features
24 | self.in_channels = in_channels
25 | Conv = DWConv if depthwise else BaseConv
26 |
27 | self.upsample = UpSample(scale_factor=2, mode="bilinear")
28 | self.lateral_conv0 = BaseConv(
29 | int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act
30 | )
31 | self.C3_p4 = CSPLayer(
32 | int(2 * in_channels[1] * width),
33 | int(in_channels[1] * width),
34 | round(3 * depth),
35 | False,
36 | depthwise=depthwise,
37 | act=act,
38 | ) # cat
39 |
40 | self.reduce_conv1 = BaseConv(
41 | int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act
42 | )
43 | self.C3_p3 = CSPLayer(
44 | int(2 * in_channels[0] * width),
45 | int(in_channels[0] * width),
46 | round(3 * depth),
47 | False,
48 | depthwise=depthwise,
49 | act=act,
50 | )
51 |
52 | # bottom-up conv
53 | self.bu_conv2 = Conv(
54 | int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act
55 | )
56 | self.C3_n3 = CSPLayer(
57 | int(2 * in_channels[0] * width),
58 | int(in_channels[1] * width),
59 | round(3 * depth),
60 | False,
61 | depthwise=depthwise,
62 | act=act,
63 | )
64 |
65 | # bottom-up conv
66 | self.bu_conv1 = Conv(
67 | int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act
68 | )
69 | self.C3_n4 = CSPLayer(
70 | int(2 * in_channels[1] * width),
71 | int(in_channels[2] * width),
72 | round(3 * depth),
73 | False,
74 | depthwise=depthwise,
75 | act=act,
76 | )
77 |
78 | def forward(self, input):
79 | """
80 | Args:
81 | inputs: input images.
82 |
83 | Returns:
84 | Tuple[Tensor]: FPN feature.
85 | """
86 |
87 | # backbone
88 | out_features = self.backbone(input)
89 | features = [out_features[f] for f in self.in_features]
90 | [x2, x1, x0] = features
91 |
92 | fpn_out0 = self.lateral_conv0(x0) # 1024->512/32
93 | f_out0 = self.upsample(fpn_out0) # 512/16
94 | f_out0 = F.concat([f_out0, x1], 1) # 512->1024/16
95 | f_out0 = self.C3_p4(f_out0) # 1024->512/16
96 |
97 | fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16
98 | f_out1 = self.upsample(fpn_out1) # 256/8
99 | f_out1 = F.concat([f_out1, x2], 1) # 256->512/8
100 | pan_out2 = self.C3_p3(f_out1) # 512->256/8
101 |
102 | p_out1 = self.bu_conv2(pan_out2) # 256->256/16
103 | p_out1 = F.concat([p_out1, fpn_out1], 1) # 256->512/16
104 | pan_out1 = self.C3_n3(p_out1) # 512->512/16
105 |
106 | p_out0 = self.bu_conv1(pan_out1) # 512->512/32
107 | p_out0 = F.concat([p_out0, fpn_out0], 1) # 512->1024/32
108 | pan_out0 = self.C3_n4(p_out0) # 1024->1024/32
109 |
110 | outputs = (pan_out2, pan_out1, pan_out0)
111 | return outputs
112 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/Makefile2:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "MinGW Makefiles" Generator, CMake Version 3.20
3 |
4 | # Default target executed when no arguments are given to make.
5 | default_target: all
6 | .PHONY : default_target
7 |
8 | #=============================================================================
9 | # Special targets provided by cmake.
10 |
11 | # Disable implicit rules so canonical targets will work.
12 | .SUFFIXES:
13 |
14 | # Disable VCS-based implicit rules.
15 | % : %,v
16 |
17 | # Disable VCS-based implicit rules.
18 | % : RCS/%
19 |
20 | # Disable VCS-based implicit rules.
21 | % : RCS/%,v
22 |
23 | # Disable VCS-based implicit rules.
24 | % : SCCS/s.%
25 |
26 | # Disable VCS-based implicit rules.
27 | % : s.%
28 |
29 | .SUFFIXES: .hpux_make_needs_suffix_list
30 |
31 | # Command-line flag to silence nested $(MAKE).
32 | $(VERBOSE)MAKESILENT = -s
33 |
34 | #Suppress display of executed commands.
35 | $(VERBOSE).SILENT:
36 |
37 | # A target that is always out of date.
38 | cmake_force:
39 | .PHONY : cmake_force
40 |
41 | #=============================================================================
42 | # Set environment variables for the build.
43 |
44 | SHELL = cmd.exe
45 |
46 | # The CMake executable.
47 | CMAKE_COMMAND = "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe"
48 |
49 | # The command to remove a file.
50 | RM = "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" -E rm -f
51 |
52 | # Escaping for special characters.
53 | EQUALS = =
54 |
55 | # The top-level source directory on which CMake was run.
56 | CMAKE_SOURCE_DIR = E:\YOLOX-main\demo\TensorRT\cpp
57 |
58 | # The top-level build directory on which CMake was run.
59 | CMAKE_BINARY_DIR = E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug
60 |
61 | #=============================================================================
62 | # Directory level rules for the build root directory
63 |
64 | # The main recursive "all" target.
65 | all: CMakeFiles/yolox.dir/all
66 | .PHONY : all
67 |
68 | # The main recursive "preinstall" target.
69 | preinstall:
70 | .PHONY : preinstall
71 |
72 | # The main recursive "clean" target.
73 | clean: CMakeFiles/yolox.dir/clean
74 | .PHONY : clean
75 |
76 | #=============================================================================
77 | # Target rules for target CMakeFiles/yolox.dir
78 |
79 | # All Build rule for target.
80 | CMakeFiles/yolox.dir/all:
81 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/depend
82 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/build
83 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles --progress-num=1,2 "Built target yolox"
84 | .PHONY : CMakeFiles/yolox.dir/all
85 |
86 | # Build rule for subdir invocation for target.
87 | CMakeFiles/yolox.dir/rule: cmake_check_build_system
88 | $(CMAKE_COMMAND) -E cmake_progress_start E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles 2
89 | $(MAKE) $(MAKESILENT) -f CMakeFiles\Makefile2 CMakeFiles/yolox.dir/all
90 | $(CMAKE_COMMAND) -E cmake_progress_start E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles 0
91 | .PHONY : CMakeFiles/yolox.dir/rule
92 |
93 | # Convenience name for target.
94 | yolox: CMakeFiles/yolox.dir/rule
95 | .PHONY : yolox
96 |
97 | # clean rule for target.
98 | CMakeFiles/yolox.dir/clean:
99 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/clean
100 | .PHONY : CMakeFiles/yolox.dir/clean
101 |
102 | #=============================================================================
103 | # Special targets to cleanup operation of make.
104 |
105 | # Special rule to run CMake to check the build system integrity.
106 | # No rule that depends on this can have commands that come from listfiles
107 | # because they might be regenerated.
108 | cmake_check_build_system:
109 | $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles\Makefile.cmake 0
110 | .PHONY : cmake_check_build_system
111 |
112 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import argparse
6 | import random
7 | import warnings
8 | from loguru import logger
9 | import os
10 | import torch
11 | import torch.backends.cudnn as cudnn
12 |
13 | from yolox.core import Trainer, launch
14 | from yolox.exp import get_exp
15 | from yolox.utils import configure_nccl
16 | os.environ['CUDA_VISIBLE_DEVICES']='1,2'
17 |
18 | def make_parser():
19 | parser = argparse.ArgumentParser("YOLOX train parser")
20 | parser.add_argument("-expn", "--experiment-name", type=str, default=None)
21 | parser.add_argument("-n", "--name", type=str, default=None, help="model name")
22 |
23 | # distributed
24 | parser.add_argument(
25 | "--dist-backend", default="nccl", type=str, help="distributed backend"
26 | )
27 | parser.add_argument(
28 | "--dist-url", default=None, type=str, help="url used to set up distributed training"
29 | )
30 | parser.add_argument("-b", "--batch-size", type=int, default=32, help="batch size")
31 | parser.add_argument(
32 | "-d", "--devices", default=2, type=int, help="device for training"
33 | )
34 | parser.add_argument(
35 | "--local_rank", default=0, type=int, help="local rank for dist training"
36 | )
37 | parser.add_argument(
38 | "-f",
39 | "--exp_file",
40 | default='/home/meprint/sunanlin_folder/YOLOX-main/yolox/exp/yolox_base.py',
41 | type=str,
42 | help="plz input your expriment description file",
43 | )
44 | parser.add_argument(
45 | "--resume", default=False, action="store_true", help="resume training"
46 | )
47 | parser.add_argument("-c", "--ckpt", default='yoloxl_.pth.tar', type=str, help="pre checkpoint file")
48 | parser.add_argument(
49 | "-e", "--start_epoch", default=None, type=int, help="resume training start epoch"
50 | )
51 | parser.add_argument(
52 | "--num_machine", default=1, type=int, help="num of node for training"
53 | )
54 | parser.add_argument(
55 | "--machine_rank", default=0, type=int, help="node rank for multi-node training"
56 | )
57 | parser.add_argument(
58 | "--fp16",
59 | dest="fp16",
60 | default=True,
61 | action="store_true",
62 | help="Adopting mix precision training.",
63 | )
64 | parser.add_argument(
65 | "-o",
66 | "--occupy",
67 | dest="occupy",
68 | default=True,
69 | action="store_true",
70 | help="occupy GPU memory first for training.",
71 | )
72 | parser.add_argument(
73 | "opts",
74 | help="Modify config options using the command-line",
75 | default=None,
76 | nargs=argparse.REMAINDER,
77 | )
78 | return parser
79 |
80 |
81 | @logger.catch
82 | def main(exp, args):
83 | if not args.experiment_name:
84 | args.experiment_name = exp.exp_name
85 |
86 | if exp.seed is not None:
87 | random.seed(exp.seed)
88 | torch.manual_seed(exp.seed)
89 | cudnn.deterministic = True
90 | warnings.warn(
91 | "You have chosen to seed training. This will turn on the CUDNN deterministic setting, "
92 | "which can slow down your training considerably! You may see unexpected behavior "
93 | "when restarting from checkpoints."
94 | )
95 |
96 | # set environment variables for distributed training
97 | configure_nccl()
98 | cudnn.benchmark = True
99 |
100 | trainer = Trainer(exp, args)
101 | trainer.train()
102 |
103 |
104 | if __name__ == "__main__":
105 | args = make_parser().parse_args()
106 | exp = get_exp(args.exp_file, args.name)
107 | exp.merge(args.opts)
108 |
109 | num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
110 | assert num_gpu <= torch.cuda.device_count()
111 |
112 | dist_url = "auto" if args.dist_url is None else args.dist_url
113 | launch(
114 | main, num_gpu, args.num_machine, backend=args.dist_backend,
115 | dist_url=dist_url, args=(exp, args)
116 | )
117 |
--------------------------------------------------------------------------------
/yolox/utils/visualize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import cv2
6 | import numpy as np
7 |
8 | __all__ = ["vis"]
9 |
10 |
11 | def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
12 |
13 | for i in range(len(boxes)):
14 | box = boxes[i]
15 | cls_id = int(cls_ids[i])
16 | score = scores[i]
17 | if score < conf:
18 | continue
19 | x0 = int(box[0])
20 | y0 = int(box[1])
21 | x1 = int(box[2])
22 | y1 = int(box[3])
23 |
24 | color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
25 | text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
26 | txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
27 | font = cv2.FONT_HERSHEY_SIMPLEX
28 |
29 | txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
30 | cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
31 |
32 | txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
33 | cv2.rectangle(
34 | img,
35 | (x0, y0 + 1),
36 | (x0 + txt_size[0] + 1, y0 + int(1.5*txt_size[1])),
37 | txt_bk_color,
38 | -1
39 | )
40 | cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)
41 |
42 | return img
43 |
44 |
45 | _COLORS = np.array(
46 | [
47 | 0.000, 0.447, 0.741,
48 | 0.850, 0.325, 0.098,
49 | 0.929, 0.694, 0.125,
50 | 0.494, 0.184, 0.556,
51 | 0.466, 0.674, 0.188,
52 | 0.301, 0.745, 0.933,
53 | 0.635, 0.078, 0.184,
54 | 0.300, 0.300, 0.300,
55 | 0.600, 0.600, 0.600,
56 | 1.000, 0.000, 0.000,
57 | 1.000, 0.500, 0.000,
58 | 0.749, 0.749, 0.000,
59 | 0.000, 1.000, 0.000,
60 | 0.000, 0.000, 1.000,
61 | 0.667, 0.000, 1.000,
62 | 0.333, 0.333, 0.000,
63 | 0.333, 0.667, 0.000,
64 | 0.333, 1.000, 0.000,
65 | 0.667, 0.333, 0.000,
66 | 0.667, 0.667, 0.000,
67 | 0.667, 1.000, 0.000,
68 | 1.000, 0.333, 0.000,
69 | 1.000, 0.667, 0.000,
70 | 1.000, 1.000, 0.000,
71 | 0.000, 0.333, 0.500,
72 | 0.000, 0.667, 0.500,
73 | 0.000, 1.000, 0.500,
74 | 0.333, 0.000, 0.500,
75 | 0.333, 0.333, 0.500,
76 | 0.333, 0.667, 0.500,
77 | 0.333, 1.000, 0.500,
78 | 0.667, 0.000, 0.500,
79 | 0.667, 0.333, 0.500,
80 | 0.667, 0.667, 0.500,
81 | 0.667, 1.000, 0.500,
82 | 1.000, 0.000, 0.500,
83 | 1.000, 0.333, 0.500,
84 | 1.000, 0.667, 0.500,
85 | 1.000, 1.000, 0.500,
86 | 0.000, 0.333, 1.000,
87 | 0.000, 0.667, 1.000,
88 | 0.000, 1.000, 1.000,
89 | 0.333, 0.000, 1.000,
90 | 0.333, 0.333, 1.000,
91 | 0.333, 0.667, 1.000,
92 | 0.333, 1.000, 1.000,
93 | 0.667, 0.000, 1.000,
94 | 0.667, 0.333, 1.000,
95 | 0.667, 0.667, 1.000,
96 | 0.667, 1.000, 1.000,
97 | 1.000, 0.000, 1.000,
98 | 1.000, 0.333, 1.000,
99 | 1.000, 0.667, 1.000,
100 | 0.333, 0.000, 0.000,
101 | 0.500, 0.000, 0.000,
102 | 0.667, 0.000, 0.000,
103 | 0.833, 0.000, 0.000,
104 | 1.000, 0.000, 0.000,
105 | 0.000, 0.167, 0.000,
106 | 0.000, 0.333, 0.000,
107 | 0.000, 0.500, 0.000,
108 | 0.000, 0.667, 0.000,
109 | 0.000, 0.833, 0.000,
110 | 0.000, 1.000, 0.000,
111 | 0.000, 0.000, 0.167,
112 | 0.000, 0.000, 0.333,
113 | 0.000, 0.000, 0.500,
114 | 0.000, 0.000, 0.667,
115 | 0.000, 0.000, 0.833,
116 | 0.000, 0.000, 1.000,
117 | 0.000, 0.000, 0.000,
118 | 0.143, 0.143, 0.143,
119 | 0.286, 0.286, 0.286,
120 | 0.429, 0.429, 0.429,
121 | 0.571, 0.571, 0.571,
122 | 0.714, 0.714, 0.714,
123 | 0.857, 0.857, 0.857,
124 | 0.000, 0.447, 0.741,
125 | 0.314, 0.717, 0.741,
126 | 0.50, 0.5, 0
127 | ]
128 | ).astype(np.float32).reshape(-1, 3)
129 |
--------------------------------------------------------------------------------
/demo/MegEngine/python/visualize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import cv2
6 | import numpy as np
7 |
8 | __all__ = ["vis"]
9 |
10 |
11 | def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
12 |
13 | for i in range(len(boxes)):
14 | box = boxes[i]
15 | cls_id = int(cls_ids[i])
16 | score = scores[i]
17 | if score < conf:
18 | continue
19 | x0 = int(box[0])
20 | y0 = int(box[1])
21 | x1 = int(box[2])
22 | y1 = int(box[3])
23 |
24 | color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
25 | text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
26 | txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
27 | font = cv2.FONT_HERSHEY_SIMPLEX
28 |
29 | txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
30 | cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
31 |
32 | txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
33 | cv2.rectangle(
34 | img,
35 | (x0, y0 + 1),
36 | (x0 + txt_size[0] + 1, y0 + int(1.5*txt_size[1])),
37 | txt_bk_color,
38 | -1
39 | )
40 | cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)
41 |
42 | return img
43 |
44 |
45 | _COLORS = np.array(
46 | [
47 | 0.000, 0.447, 0.741,
48 | 0.850, 0.325, 0.098,
49 | 0.929, 0.694, 0.125,
50 | 0.494, 0.184, 0.556,
51 | 0.466, 0.674, 0.188,
52 | 0.301, 0.745, 0.933,
53 | 0.635, 0.078, 0.184,
54 | 0.300, 0.300, 0.300,
55 | 0.600, 0.600, 0.600,
56 | 1.000, 0.000, 0.000,
57 | 1.000, 0.500, 0.000,
58 | 0.749, 0.749, 0.000,
59 | 0.000, 1.000, 0.000,
60 | 0.000, 0.000, 1.000,
61 | 0.667, 0.000, 1.000,
62 | 0.333, 0.333, 0.000,
63 | 0.333, 0.667, 0.000,
64 | 0.333, 1.000, 0.000,
65 | 0.667, 0.333, 0.000,
66 | 0.667, 0.667, 0.000,
67 | 0.667, 1.000, 0.000,
68 | 1.000, 0.333, 0.000,
69 | 1.000, 0.667, 0.000,
70 | 1.000, 1.000, 0.000,
71 | 0.000, 0.333, 0.500,
72 | 0.000, 0.667, 0.500,
73 | 0.000, 1.000, 0.500,
74 | 0.333, 0.000, 0.500,
75 | 0.333, 0.333, 0.500,
76 | 0.333, 0.667, 0.500,
77 | 0.333, 1.000, 0.500,
78 | 0.667, 0.000, 0.500,
79 | 0.667, 0.333, 0.500,
80 | 0.667, 0.667, 0.500,
81 | 0.667, 1.000, 0.500,
82 | 1.000, 0.000, 0.500,
83 | 1.000, 0.333, 0.500,
84 | 1.000, 0.667, 0.500,
85 | 1.000, 1.000, 0.500,
86 | 0.000, 0.333, 1.000,
87 | 0.000, 0.667, 1.000,
88 | 0.000, 1.000, 1.000,
89 | 0.333, 0.000, 1.000,
90 | 0.333, 0.333, 1.000,
91 | 0.333, 0.667, 1.000,
92 | 0.333, 1.000, 1.000,
93 | 0.667, 0.000, 1.000,
94 | 0.667, 0.333, 1.000,
95 | 0.667, 0.667, 1.000,
96 | 0.667, 1.000, 1.000,
97 | 1.000, 0.000, 1.000,
98 | 1.000, 0.333, 1.000,
99 | 1.000, 0.667, 1.000,
100 | 0.333, 0.000, 0.000,
101 | 0.500, 0.000, 0.000,
102 | 0.667, 0.000, 0.000,
103 | 0.833, 0.000, 0.000,
104 | 1.000, 0.000, 0.000,
105 | 0.000, 0.167, 0.000,
106 | 0.000, 0.333, 0.000,
107 | 0.000, 0.500, 0.000,
108 | 0.000, 0.667, 0.000,
109 | 0.000, 0.833, 0.000,
110 | 0.000, 1.000, 0.000,
111 | 0.000, 0.000, 0.167,
112 | 0.000, 0.000, 0.333,
113 | 0.000, 0.000, 0.500,
114 | 0.000, 0.000, 0.667,
115 | 0.000, 0.000, 0.833,
116 | 0.000, 0.000, 1.000,
117 | 0.000, 0.000, 0.000,
118 | 0.143, 0.143, 0.143,
119 | 0.286, 0.286, 0.286,
120 | 0.429, 0.429, 0.429,
121 | 0.571, 0.571, 0.571,
122 | 0.714, 0.714, 0.714,
123 | 0.857, 0.857, 0.857,
124 | 0.000, 0.447, 0.741,
125 | 0.314, 0.717, 0.741,
126 | 0.50, 0.5, 0
127 | ]
128 | ).astype(np.float32).reshape(-1, 3)
129 |
--------------------------------------------------------------------------------
/demo/OpenVINO/cpp/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-OpenVINO in C++
2 |
3 | This toturial includes a C++ demo for OpenVINO, as well as some converted models.
4 |
5 | ### Download OpenVINO models.
6 | | Model | Parameters | GFLOPs | Test Size | mAP | Weights |
7 | |:------| :----: | :----: | :---: | :---: | :---: |
8 | | [YOLOX-Nano](../../../exps/nano.py) | 0.91M | 1.08 | 416x416 | 25.3 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EeWY57o5wQZFtXYd1KJw6Z8B4vxZru649XxQHYIFgio3Qw?e=ZS81ce)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano_openvino.tar.gz) |
9 | | [YOLOX-Tiny](../../../exps/yolox_tiny.py) | 5.06M | 6.45 | 416x416 |31.7 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ETfvOoCXdVZNinoSpKA_sEYBIQVqfjjF5_M6VvHRnLVcsA?e=STL1pi)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny_openvino.tar.gz) |
10 | | [YOLOX-S](../../../exps/yolox_s.py) | 9.0M | 26.8 | 640x640 |39.6 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EXUjf3PQnbBLrxNrXPueqaIBzVZOrYQOnJpLK1Fytj5ssA?e=GK0LOM)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s_openvino.tar.gz) |
11 | | [YOLOX-M](../../../exps/yolox_m.py) | 25.3M | 73.8 | 640x640 |46.4 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EcoT1BPpeRpLvE_4c441zn8BVNCQ2naxDH3rho7WqdlgLQ?e=95VaM9)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_m_openvino.tar.gz) |
12 | | [YOLOX-L](../../../exps/yolox_l.py) | 54.2M | 155.6 | 640x640 |50.0 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EZvmn-YLRuVPh0GAP_w3xHMB2VGvrKqQXyK_Cv5yi_DXUg?e=YRh6Eq)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_l_openvino.tar.gz) |
13 | | [YOLOX-Darknet53](../../../exps/yolov3.py) | 63.72M | 185.3 | 640x640 |47.3 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EQP8LSroikFHuwX0jFRetmcBOCDWSFmylHxolV7ezUPXGw?e=bEw5iq)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_darknet53_openvino.tar.gz) |
14 | | [YOLOX-X](../../../exps/yolox_x.py) | 99.1M | 281.9 | 640x640 |51.2 | [Download](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EZFPnLqiD-xIlt7rcZYDjQgB4YXE9wnq1qaSXQwJrsKbdg?e=83nwEz)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_x_openvino.tar.gz) |
15 |
16 | ## Install OpenVINO Toolkit
17 |
18 | Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_started_guides.html) for more details.
19 |
20 | ## Set up the Environment
21 |
22 | ### For Linux
23 |
24 | **Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.**
25 |
26 | ```shell
27 | source /opt/intel/openvino_2021/bin/setupvars.sh
28 | ```
29 |
30 | **Option2. Set up the environment permenantly.**
31 |
32 | *Step1.* For Linux:
33 | ```shell
34 | vim ~/.bashrc
35 | ```
36 |
37 | *Step2.* Add the following line into your file:
38 |
39 | ```shell
40 | source /opt/intel/openvino_2021/bin/setupvars.sh
41 | ```
42 |
43 | *Step3.* Save and exit the file, then run:
44 |
45 | ```shell
46 | source ~/.bashrc
47 | ```
48 |
49 |
50 | ## Convert model
51 |
52 | 1. Export ONNX model
53 |
54 | Please refer to the [ONNX toturial](../../ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.**
55 |
56 | 2. Convert ONNX to OpenVINO
57 |
58 | ``` shell
59 | cd /openvino_2021/deployment_tools/model_optimizer
60 | ```
61 |
62 | Install requirements for convert tool
63 |
64 | ```shell
65 | sudo ./install_prerequisites/install_prerequisites_onnx.sh
66 | ```
67 |
68 | Then convert model.
69 | ```shell
70 | python3 mo.py --input_model --input_shape [--data_type FP16]
71 | ```
72 | For example:
73 | ```shell
74 | python3 mo.py --input_model yolox.onnx --input_shape (1,3,640,640) --data_type FP16
75 | ```
76 |
77 | ## Build
78 |
79 | ### Linux
80 | ```shell
81 | source /opt/intel/openvino_2021/bin/setupvars.sh
82 | mkdir build
83 | cd build
84 | cmake ..
85 | make
86 | ```
87 |
88 | ## Demo
89 |
90 | ### c++
91 |
92 | ```shell
93 | ./yolox_openvino
94 | ```
95 |
--------------------------------------------------------------------------------
/demo/OpenVINO/python/README.md:
--------------------------------------------------------------------------------
1 | # YOLOX-OpenVINO in Python
2 |
3 | This toturial includes a Python demo for OpenVINO, as well as some converted models.
4 |
5 | ### Download OpenVINO models.
6 | | Model | Parameters | GFLOPs | Test Size | mAP | Weights |
7 | |:------| :----: | :----: | :---: | :---: | :---: |
8 | | [YOLOX-Nano](../../../exps/default/nano.py) | 0.91M | 1.08 | 416x416 | 25.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EeWY57o5wQZFtXYd1KJw6Z8B4vxZru649XxQHYIFgio3Qw?e=ZS81ce)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano_openvino.tar.gz) |
9 | | [YOLOX-Tiny](../../../exps/default/yolox_tiny.py) | 5.06M | 6.45 | 416x416 |31.7 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ETfvOoCXdVZNinoSpKA_sEYBIQVqfjjF5_M6VvHRnLVcsA?e=STL1pi)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny_openvino.tar.gz) |
10 | | [YOLOX-S](../../../exps/default/yolox_s.py) | 9.0M | 26.8 | 640x640 |39.6 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EXUjf3PQnbBLrxNrXPueqaIBzVZOrYQOnJpLK1Fytj5ssA?e=GK0LOM)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s_openvino.tar.gz) |
11 | | [YOLOX-M](../../../exps/default/yolox_m.py) | 25.3M | 73.8 | 640x640 |46.4 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EcoT1BPpeRpLvE_4c441zn8BVNCQ2naxDH3rho7WqdlgLQ?e=95VaM9)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_m_openvino.tar.gz) |
12 | | [YOLOX-L](../../../exps/default/yolox_l.py) | 54.2M | 155.6 | 640x640 |50.0 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EZvmn-YLRuVPh0GAP_w3xHMB2VGvrKqQXyK_Cv5yi_DXUg?e=YRh6Eq)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_l_openvino.tar.gz) |
13 | | [YOLOX-Darknet53](../../../exps/default/yolov3.py) | 63.72M | 185.3 | 640x640 |47.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EQP8LSroikFHuwX0jFRetmcBOCDWSFmylHxolV7ezUPXGw?e=bEw5iq)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_darknet53_openvino.tar.gz) |
14 | | [YOLOX-X](../../../exps/default/yolox_x.py) | 99.1M | 281.9 | 640x640 |51.2 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EZFPnLqiD-xIlt7rcZYDjQgB4YXE9wnq1qaSXQwJrsKbdg?e=83nwEz)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_x_openvino.tar.gz) |
15 |
16 | ## Install OpenVINO Toolkit
17 |
18 | Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_started_guides.html) for more details.
19 |
20 | ## Set up the Environment
21 |
22 | ### For Linux
23 |
24 | **Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.**
25 |
26 | ```shell
27 | source /opt/intel/openvino_2021/bin/setupvars.sh
28 | ```
29 |
30 | **Option2. Set up the environment permenantly.**
31 |
32 | *Step1.* For Linux:
33 | ```shell
34 | vim ~/.bashrc
35 | ```
36 |
37 | *Step2.* Add the following line into your file:
38 |
39 | ```shell
40 | source /opt/intel/openvino_2021/bin/setupvars.sh
41 | ```
42 |
43 | *Step3.* Save and exit the file, then run:
44 |
45 | ```shell
46 | source ~/.bashrc
47 | ```
48 |
49 |
50 | ## Convert model
51 |
52 | 1. Export ONNX model
53 |
54 | Please refer to the [ONNX toturial](../../ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.**
55 |
56 | 2. Convert ONNX to OpenVINO
57 |
58 | ``` shell
59 | cd /openvino_2021/deployment_tools/model_optimizer
60 | ```
61 |
62 | Install requirements for convert tool
63 |
64 | ```shell
65 | sudo ./install_prerequisites/install_prerequisites_onnx.sh
66 | ```
67 |
68 | Then convert model.
69 | ```shell
70 | python3 mo.py --input_model --input_shape [--data_type FP16]
71 | ```
72 | For example:
73 | ```shell
74 | python3 mo.py --input_model yolox.onnx --input_shape [1,3,640,640] --data_type FP16 --output_dir converted_output
75 | ```
76 |
77 | ## Demo
78 |
79 | ### python
80 |
81 | ```shell
82 | python openvino_inference.py -m -i
83 | ```
84 | or
85 | ```shell
86 | python openvino_inference.py -m -i -o -s -d
87 | ```
88 |
89 |
--------------------------------------------------------------------------------
/exps/example/yolox_voc/yolox_voc_s.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | import os
3 | import random
4 | import torch
5 | import torch.nn as nn
6 | import torch.distributed as dist
7 |
8 | from yolox.exp import Exp as MyExp
9 | from yolox.data import get_yolox_datadir
10 |
11 | class Exp(MyExp):
12 | def __init__(self):
13 | super(Exp, self).__init__()
14 | self.num_classes = 20
15 | self.depth = 0.33
16 | self.width = 0.50
17 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
18 |
19 | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
20 | from yolox.data import (
21 | VOCDetection,
22 | TrainTransform,
23 | YoloBatchSampler,
24 | DataLoader,
25 | InfiniteSampler,
26 | MosaicDetection,
27 | )
28 |
29 | dataset = VOCDetection(
30 | data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
31 | image_sets=[('2007', 'trainval'), ('2012', 'trainval')],
32 | img_size=self.input_size,
33 | preproc=TrainTransform(
34 | rgb_means=(0.485, 0.456, 0.406),
35 | std=(0.229, 0.224, 0.225),
36 | max_labels=50,
37 | ),
38 | )
39 |
40 | dataset = MosaicDetection(
41 | dataset,
42 | mosaic=not no_aug,
43 | img_size=self.input_size,
44 | preproc=TrainTransform(
45 | rgb_means=(0.485, 0.456, 0.406),
46 | std=(0.229, 0.224, 0.225),
47 | max_labels=120,
48 | ),
49 | degrees=self.degrees,
50 | translate=self.translate,
51 | scale=self.scale,
52 | shear=self.shear,
53 | perspective=self.perspective,
54 | enable_mixup=self.enable_mixup,
55 | )
56 |
57 | self.dataset = dataset
58 |
59 | if is_distributed:
60 | batch_size = batch_size // dist.get_world_size()
61 |
62 | sampler = InfiniteSampler(
63 | len(self.dataset), seed=self.seed if self.seed else 0
64 | )
65 |
66 | batch_sampler = YoloBatchSampler(
67 | sampler=sampler,
68 | batch_size=batch_size,
69 | drop_last=False,
70 | input_dimension=self.input_size,
71 | mosaic=not no_aug,
72 | )
73 |
74 | dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
75 | dataloader_kwargs["batch_sampler"] = batch_sampler
76 | train_loader = DataLoader(self.dataset, **dataloader_kwargs)
77 |
78 | return train_loader
79 |
80 | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
81 | from yolox.data import VOCDetection, ValTransform
82 |
83 | valdataset = VOCDetection(
84 | data_dir=os.path.join(get_yolox_datadir(), "VOCdevkit"),
85 | image_sets=[('2007', 'test')],
86 | img_size=self.test_size,
87 | preproc=ValTransform(
88 | rgb_means=(0.485, 0.456, 0.406),
89 | std=(0.229, 0.224, 0.225),
90 | ),
91 | )
92 |
93 | if is_distributed:
94 | batch_size = batch_size // dist.get_world_size()
95 | sampler = torch.utils.data.distributed.DistributedSampler(
96 | valdataset, shuffle=False
97 | )
98 | else:
99 | sampler = torch.utils.data.SequentialSampler(valdataset)
100 |
101 | dataloader_kwargs = {
102 | "num_workers": self.data_num_workers,
103 | "pin_memory": True,
104 | "sampler": sampler,
105 | }
106 | dataloader_kwargs["batch_size"] = batch_size
107 | val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
108 |
109 | return val_loader
110 |
111 | def get_evaluator(self, batch_size, is_distributed, testdev=False):
112 | from yolox.evaluators import VOCEvaluator
113 |
114 | val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
115 | evaluator = VOCEvaluator(
116 | dataloader=val_loader,
117 | img_size=self.test_size,
118 | confthre=self.test_conf,
119 | nmsthre=self.nmsthre,
120 | num_classes=self.num_classes,
121 | )
122 | return evaluator
123 |
--------------------------------------------------------------------------------
/yolox/data/datasets/datasets_wrapper.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) Megvii, Inc. and its affiliates.
4 |
5 | import bisect
6 | from functools import wraps
7 |
8 | from torch.utils.data.dataset import ConcatDataset as torchConcatDataset
9 | from torch.utils.data.dataset import Dataset as torchDataset
10 |
11 |
12 | class ConcatDataset(torchConcatDataset):
13 | def __init__(self, datasets):
14 | super(ConcatDataset, self).__init__(datasets)
15 | if hasattr(self.datasets[0], "input_dim"):
16 | self._input_dim = self.datasets[0].input_dim
17 | self.input_dim = self.datasets[0].input_dim
18 |
19 | def pull_item(self, idx):
20 | if idx < 0:
21 | if -idx > len(self):
22 | raise ValueError(
23 | "absolute value of index should not exceed dataset length"
24 | )
25 | idx = len(self) + idx
26 | dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
27 | if dataset_idx == 0:
28 | sample_idx = idx
29 | else:
30 | sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
31 | return self.datasets[dataset_idx].pull_item(sample_idx)
32 |
33 |
34 | class MixConcatDataset(torchConcatDataset):
35 | def __init__(self, datasets):
36 | super(MixConcatDataset, self).__init__(datasets)
37 | if hasattr(self.datasets[0], "input_dim"):
38 | self._input_dim = self.datasets[0].input_dim
39 | self.input_dim = self.datasets[0].input_dim
40 |
41 | def __getitem__(self, index):
42 |
43 | if not isinstance(index, int):
44 | idx = index[1]
45 | if idx < 0:
46 | if -idx > len(self):
47 | raise ValueError(
48 | "absolute value of index should not exceed dataset length"
49 | )
50 | idx = len(self) + idx
51 | dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
52 | if dataset_idx == 0:
53 | sample_idx = idx
54 | else:
55 | sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
56 | if not isinstance(index, int):
57 | index = (index[0], sample_idx, index[2])
58 |
59 | return self.datasets[dataset_idx][index]
60 |
61 |
62 | class Dataset(torchDataset):
63 | """ This class is a subclass of the base :class:`torch.utils.data.Dataset`,
64 | that enables on the fly resizing of the ``input_dim``.
65 |
66 | Args:
67 | input_dimension (tuple): (width,height) tuple with default dimensions of the network
68 | """
69 |
70 | def __init__(self, input_dimension, mosaic=True):
71 | super().__init__()
72 | self.__input_dim = input_dimension[:2]
73 | self._mosaic = mosaic
74 |
75 | @property
76 | def input_dim(self):
77 | """
78 | Dimension that can be used by transforms to set the correct image size, etc.
79 | This allows transforms to have a single source of truth
80 | for the input dimension of the network.
81 |
82 | Return:
83 | list: Tuple containing the current width,height
84 | """
85 | if hasattr(self, "_input_dim"):
86 | return self._input_dim
87 | return self.__input_dim
88 |
89 | @staticmethod
90 | def resize_getitem(getitem_fn):
91 | """
92 | Decorator method that needs to be used around the ``__getitem__`` method. |br|
93 | This decorator enables the on the fly resizing of
94 | the ``input_dim`` with our :class:`~lightnet.data.DataLoader` class.
95 |
96 | Example:
97 | >>> class CustomSet(ln.data.Dataset):
98 | ... def __len__(self):
99 | ... return 10
100 | ... @ln.data.Dataset.resize_getitem
101 | ... def __getitem__(self, index):
102 | ... # Should return (image, anno) but here we return input_dim
103 | ... return self.input_dim
104 | >>> data = CustomSet((200,200))
105 | >>> data[0]
106 | (200, 200)
107 | >>> data[(480,320), 0]
108 | (480, 320)
109 | """
110 |
111 | @wraps(getitem_fn)
112 | def wrapper(self, index):
113 | if not isinstance(index, int):
114 | has_dim = True
115 | self._input_dim = index[0]
116 | self._mosaic = index[2]
117 | index = index[1]
118 | else:
119 | has_dim = False
120 |
121 | ret_val = getitem_fn(self, index)
122 |
123 | if has_dim:
124 | del self._input_dim
125 |
126 | return ret_val
127 |
128 | return wrapper
129 |
--------------------------------------------------------------------------------
/yolox/utils/boxes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
4 |
5 | import numpy as np
6 |
7 | import torch
8 | import torchvision
9 |
10 | __all__ = [
11 | "filter_box", "postprocess", "bboxes_iou", "matrix_iou",
12 | "adjust_box_anns", "xyxy2xywh",
13 | ]
14 |
15 |
16 | def filter_box(output, scale_range):
17 | """
18 | output: (N, 5+class) shape
19 | """
20 | min_scale, max_scale = scale_range
21 | w = output[:, 2] - output[:, 0]
22 | h = output[:, 3] - output[:, 1]
23 | keep = (w * h > min_scale * min_scale) & (w * h < max_scale * max_scale)
24 | return output[keep]
25 |
26 |
27 | def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
28 | #将预测输出的,x0,y0,w,h 化为 (x1,y1,x2,y2)
29 | box_corner = prediction.new(prediction.shape) # array.new((array.shape))创造同类型,同尺寸的新数组
30 | box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2 #x1 prediction[:,:,0] = x0 ,prediction[:,:,2] = w
31 | box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2 #y1 prediction[:,:,:] [三个尺寸的特征图:所有batch:所有数组索引]
32 | box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2 #x2
33 | box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2 #y2
34 | prediction[:, :, :4] = box_corner[:, :, :4]
35 |
36 | output = [None for _ in range(len(prediction))] #len(prediction) 为三个尺度
37 | for i, image_pred in enumerate(prediction):
38 |
39 | # If none are remaining => process next image
40 | if not image_pred.size(0):
41 | continue
42 | # Get score and class with highest confidence
43 | class_conf, class_pred = torch.max(image_pred[:, 5: 5 + num_classes], 1, keepdim=True)
44 |
45 | conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze() #True
46 | # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000)
47 | # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
48 | detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)
49 | detections = detections[conf_mask] #判断是否有目标,当conf_mask为True时,detections = detections=[(x1,y1,x2,y2,obj_conf,class_conf,class_pred_index)]
50 | if not detections.size(0):
51 | continue
52 |
53 | nms_out_index = torchvision.ops.batched_nms(
54 | detections[:, :4],
55 | detections[:, 4] * detections[:, 5],
56 | detections[:, 6],
57 | nms_thre,
58 | )
59 | detections = detections[nms_out_index]
60 | if output[i] is None:
61 | output[i] = detections
62 | else:
63 | output[i] = torch.cat((output[i], detections))
64 |
65 | return output #[尺度1数组,尺度2数组,尺度3数组] ,每个尺度数组=detections
66 |
67 |
68 | def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
69 | if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:
70 | raise IndexError
71 |
72 | if xyxy:
73 | tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])
74 | br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:])
75 | area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)
76 | area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)
77 | else:
78 | tl = torch.max(
79 | (bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),
80 | (bboxes_b[:, :2] - bboxes_b[:, 2:] / 2),
81 | )
82 | br = torch.min(
83 | (bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),
84 | (bboxes_b[:, :2] + bboxes_b[:, 2:] / 2),
85 | )
86 |
87 | area_a = torch.prod(bboxes_a[:, 2:], 1)
88 | area_b = torch.prod(bboxes_b[:, 2:], 1)
89 | en = (tl < br).type(tl.type()).prod(dim=2)
90 | area_i = torch.prod(br - tl, 2) * en # * ((tl < br).all())
91 | return area_i / (area_a[:, None] + area_b - area_i)
92 |
93 |
94 | def matrix_iou(a, b):
95 | """
96 | return iou of a and b, numpy version for data augenmentation
97 | """
98 | lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
99 | rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
100 |
101 | area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
102 | area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
103 | area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
104 | return area_i / (area_a[:, np.newaxis] + area_b - area_i + 1e-12)
105 |
106 |
107 | def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):
108 | bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)
109 | bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max)
110 | return bbox
111 |
112 |
113 | def xyxy2xywh(bboxes):
114 | bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]
115 | bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]
116 | return bboxes
117 |
--------------------------------------------------------------------------------
/demo/ncnn/android/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Attempt to set APP_HOME
10 | # Resolve links: $0 may be a link
11 | PRG="$0"
12 | # Need this for relative symlinks.
13 | while [ -h "$PRG" ] ; do
14 | ls=`ls -ld "$PRG"`
15 | link=`expr "$ls" : '.*-> \(.*\)$'`
16 | if expr "$link" : '/.*' > /dev/null; then
17 | PRG="$link"
18 | else
19 | PRG=`dirname "$PRG"`"/$link"
20 | fi
21 | done
22 | SAVED="`pwd`"
23 | cd "`dirname \"$PRG\"`/" >/dev/null
24 | APP_HOME="`pwd -P`"
25 | cd "$SAVED" >/dev/null
26 |
27 | APP_NAME="Gradle"
28 | APP_BASE_NAME=`basename "$0"`
29 |
30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31 | DEFAULT_JVM_OPTS=""
32 |
33 | # Use the maximum available, or set MAX_FD != -1 to use that value.
34 | MAX_FD="maximum"
35 |
36 | warn () {
37 | echo "$*"
38 | }
39 |
40 | die () {
41 | echo
42 | echo "$*"
43 | echo
44 | exit 1
45 | }
46 |
47 | # OS specific support (must be 'true' or 'false').
48 | cygwin=false
49 | msys=false
50 | darwin=false
51 | nonstop=false
52 | case "`uname`" in
53 | CYGWIN* )
54 | cygwin=true
55 | ;;
56 | Darwin* )
57 | darwin=true
58 | ;;
59 | MINGW* )
60 | msys=true
61 | ;;
62 | NONSTOP* )
63 | nonstop=true
64 | ;;
65 | esac
66 |
67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68 |
69 | # Determine the Java command to use to start the JVM.
70 | if [ -n "$JAVA_HOME" ] ; then
71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72 | # IBM's JDK on AIX uses strange locations for the executables
73 | JAVACMD="$JAVA_HOME/jre/sh/java"
74 | else
75 | JAVACMD="$JAVA_HOME/bin/java"
76 | fi
77 | if [ ! -x "$JAVACMD" ] ; then
78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79 |
80 | Please set the JAVA_HOME variable in your environment to match the
81 | location of your Java installation."
82 | fi
83 | else
84 | JAVACMD="java"
85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86 |
87 | Please set the JAVA_HOME variable in your environment to match the
88 | location of your Java installation."
89 | fi
90 |
91 | # Increase the maximum file descriptors if we can.
92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93 | MAX_FD_LIMIT=`ulimit -H -n`
94 | if [ $? -eq 0 ] ; then
95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96 | MAX_FD="$MAX_FD_LIMIT"
97 | fi
98 | ulimit -n $MAX_FD
99 | if [ $? -ne 0 ] ; then
100 | warn "Could not set maximum file descriptor limit: $MAX_FD"
101 | fi
102 | else
103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104 | fi
105 | fi
106 |
107 | # For Darwin, add options to specify how the application appears in the dock
108 | if $darwin; then
109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110 | fi
111 |
112 | # For Cygwin, switch paths to Windows format before running java
113 | if $cygwin ; then
114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116 | JAVACMD=`cygpath --unix "$JAVACMD"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Escape application args
158 | save () {
159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160 | echo " "
161 | }
162 | APP_ARGS=$(save "$@")
163 |
164 | # Collect all arguments for the java command, following the shell quoting and substitution rules
165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166 |
167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169 | cd "$(dirname "$0")"
170 | fi
171 |
172 | exec "$JAVACMD" "$@"
173 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/Makefile:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "MinGW Makefiles" Generator, CMake Version 3.20
3 |
4 | # Default target executed when no arguments are given to make.
5 | default_target: all
6 | .PHONY : default_target
7 |
8 | # Allow only one "make -f Makefile2" at a time, but pass parallelism.
9 | .NOTPARALLEL:
10 |
11 | #=============================================================================
12 | # Special targets provided by cmake.
13 |
14 | # Disable implicit rules so canonical targets will work.
15 | .SUFFIXES:
16 |
17 | # Disable VCS-based implicit rules.
18 | % : %,v
19 |
20 | # Disable VCS-based implicit rules.
21 | % : RCS/%
22 |
23 | # Disable VCS-based implicit rules.
24 | % : RCS/%,v
25 |
26 | # Disable VCS-based implicit rules.
27 | % : SCCS/s.%
28 |
29 | # Disable VCS-based implicit rules.
30 | % : s.%
31 |
32 | .SUFFIXES: .hpux_make_needs_suffix_list
33 |
34 | # Command-line flag to silence nested $(MAKE).
35 | $(VERBOSE)MAKESILENT = -s
36 |
37 | #Suppress display of executed commands.
38 | $(VERBOSE).SILENT:
39 |
40 | # A target that is always out of date.
41 | cmake_force:
42 | .PHONY : cmake_force
43 |
44 | #=============================================================================
45 | # Set environment variables for the build.
46 |
47 | SHELL = cmd.exe
48 |
49 | # The CMake executable.
50 | CMAKE_COMMAND = "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe"
51 |
52 | # The command to remove a file.
53 | RM = "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" -E rm -f
54 |
55 | # Escaping for special characters.
56 | EQUALS = =
57 |
58 | # The top-level source directory on which CMake was run.
59 | CMAKE_SOURCE_DIR = E:\YOLOX-main\demo\TensorRT\cpp
60 |
61 | # The top-level build directory on which CMake was run.
62 | CMAKE_BINARY_DIR = E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug
63 |
64 | #=============================================================================
65 | # Targets provided globally by CMake.
66 |
67 | # Special rule for the target edit_cache
68 | edit_cache:
69 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "No interactive CMake dialog available..."
70 | "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" -E echo "No interactive CMake dialog available."
71 | .PHONY : edit_cache
72 |
73 | # Special rule for the target edit_cache
74 | edit_cache/fast: edit_cache
75 | .PHONY : edit_cache/fast
76 |
77 | # Special rule for the target rebuild_cache
78 | rebuild_cache:
79 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..."
80 | "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" --regenerate-during-build -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)
81 | .PHONY : rebuild_cache
82 |
83 | # Special rule for the target rebuild_cache
84 | rebuild_cache/fast: rebuild_cache
85 | .PHONY : rebuild_cache/fast
86 |
87 | # The main all target
88 | all: cmake_check_build_system
89 | $(CMAKE_COMMAND) -E cmake_progress_start E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\\CMakeFiles\progress.marks
90 | $(MAKE) $(MAKESILENT) -f CMakeFiles\Makefile2 all
91 | $(CMAKE_COMMAND) -E cmake_progress_start E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles 0
92 | .PHONY : all
93 |
94 | # The main clean target
95 | clean:
96 | $(MAKE) $(MAKESILENT) -f CMakeFiles\Makefile2 clean
97 | .PHONY : clean
98 |
99 | # The main clean target
100 | clean/fast: clean
101 | .PHONY : clean/fast
102 |
103 | # Prepare targets for installation.
104 | preinstall: all
105 | $(MAKE) $(MAKESILENT) -f CMakeFiles\Makefile2 preinstall
106 | .PHONY : preinstall
107 |
108 | # Prepare targets for installation.
109 | preinstall/fast:
110 | $(MAKE) $(MAKESILENT) -f CMakeFiles\Makefile2 preinstall
111 | .PHONY : preinstall/fast
112 |
113 | # clear depends
114 | depend:
115 | $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles\Makefile.cmake 1
116 | .PHONY : depend
117 |
118 | #=============================================================================
119 | # Target rules for targets named yolox
120 |
121 | # Build rule for target.
122 | yolox: cmake_check_build_system
123 | $(MAKE) $(MAKESILENT) -f CMakeFiles\Makefile2 yolox
124 | .PHONY : yolox
125 |
126 | # fast build rule for target.
127 | yolox/fast:
128 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/build
129 | .PHONY : yolox/fast
130 |
131 | yolox.obj: yolox.cpp.obj
132 | .PHONY : yolox.obj
133 |
134 | # target to build an object file
135 | yolox.cpp.obj:
136 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/yolox.cpp.obj
137 | .PHONY : yolox.cpp.obj
138 |
139 | yolox.i: yolox.cpp.i
140 | .PHONY : yolox.i
141 |
142 | # target to preprocess a source file
143 | yolox.cpp.i:
144 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/yolox.cpp.i
145 | .PHONY : yolox.cpp.i
146 |
147 | yolox.s: yolox.cpp.s
148 | .PHONY : yolox.s
149 |
150 | # target to generate assembly for a file
151 | yolox.cpp.s:
152 | $(MAKE) $(MAKESILENT) -f CMakeFiles\yolox.dir\build.make CMakeFiles/yolox.dir/yolox.cpp.s
153 | .PHONY : yolox.cpp.s
154 |
155 | # Help Target
156 | help:
157 | @echo The following are some of the valid targets for this Makefile:
158 | @echo ... all (the default if no target is provided)
159 | @echo ... clean
160 | @echo ... depend
161 | @echo ... edit_cache
162 | @echo ... rebuild_cache
163 | @echo ... yolox
164 | @echo ... yolox.obj
165 | @echo ... yolox.i
166 | @echo ... yolox.s
167 | .PHONY : help
168 |
169 |
170 |
171 | #=============================================================================
172 | # Special targets to cleanup operation of make.
173 |
174 | # Special rule to run CMake to check the build system integrity.
175 | # No rule that depends on this can have commands that come from listfiles
176 | # because they might be regenerated.
177 | cmake_check_build_system:
178 | $(CMAKE_COMMAND) -S$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles\Makefile.cmake 0
179 | .PHONY : cmake_check_build_system
180 |
181 |
--------------------------------------------------------------------------------
/demo/TensorRT/cpp/cmake-build-debug/CMakeFiles/yolox.dir/build.make:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "MinGW Makefiles" Generator, CMake Version 3.20
3 |
4 | # Delete rule output on recipe failure.
5 | .DELETE_ON_ERROR:
6 |
7 | #=============================================================================
8 | # Special targets provided by cmake.
9 |
10 | # Disable implicit rules so canonical targets will work.
11 | .SUFFIXES:
12 |
13 | # Disable VCS-based implicit rules.
14 | % : %,v
15 |
16 | # Disable VCS-based implicit rules.
17 | % : RCS/%
18 |
19 | # Disable VCS-based implicit rules.
20 | % : RCS/%,v
21 |
22 | # Disable VCS-based implicit rules.
23 | % : SCCS/s.%
24 |
25 | # Disable VCS-based implicit rules.
26 | % : s.%
27 |
28 | .SUFFIXES: .hpux_make_needs_suffix_list
29 |
30 | # Command-line flag to silence nested $(MAKE).
31 | $(VERBOSE)MAKESILENT = -s
32 |
33 | #Suppress display of executed commands.
34 | $(VERBOSE).SILENT:
35 |
36 | # A target that is always out of date.
37 | cmake_force:
38 | .PHONY : cmake_force
39 |
40 | #=============================================================================
41 | # Set environment variables for the build.
42 |
43 | SHELL = cmd.exe
44 |
45 | # The CMake executable.
46 | CMAKE_COMMAND = "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe"
47 |
48 | # The command to remove a file.
49 | RM = "D:\CLion 2021.2\bin\cmake\win\bin\cmake.exe" -E rm -f
50 |
51 | # Escaping for special characters.
52 | EQUALS = =
53 |
54 | # The top-level source directory on which CMake was run.
55 | CMAKE_SOURCE_DIR = E:\YOLOX-main\demo\TensorRT\cpp
56 |
57 | # The top-level build directory on which CMake was run.
58 | CMAKE_BINARY_DIR = E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug
59 |
60 | # Include any dependencies generated for this target.
61 | include CMakeFiles/yolox.dir/depend.make
62 | # Include the progress variables for this target.
63 | include CMakeFiles/yolox.dir/progress.make
64 |
65 | # Include the compile flags for this target's objects.
66 | include CMakeFiles/yolox.dir/flags.make
67 |
68 | CMakeFiles/yolox.dir/yolox.cpp.obj: CMakeFiles/yolox.dir/flags.make
69 | CMakeFiles/yolox.dir/yolox.cpp.obj: CMakeFiles/yolox.dir/includes_CXX.rsp
70 | CMakeFiles/yolox.dir/yolox.cpp.obj: ../yolox.cpp
71 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles --progress-num=$(CMAKE_PROGRESS_1) "Building CXX object CMakeFiles/yolox.dir/yolox.cpp.obj"
72 | C:\PROGRA~1\mingw64\bin\G__~1.EXE $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles\yolox.dir\yolox.cpp.obj -c E:\YOLOX-main\demo\TensorRT\cpp\yolox.cpp
73 |
74 | CMakeFiles/yolox.dir/yolox.cpp.i: cmake_force
75 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/yolox.dir/yolox.cpp.i"
76 | C:\PROGRA~1\mingw64\bin\G__~1.EXE $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E E:\YOLOX-main\demo\TensorRT\cpp\yolox.cpp > CMakeFiles\yolox.dir\yolox.cpp.i
77 |
78 | CMakeFiles/yolox.dir/yolox.cpp.s: cmake_force
79 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/yolox.dir/yolox.cpp.s"
80 | C:\PROGRA~1\mingw64\bin\G__~1.EXE $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S E:\YOLOX-main\demo\TensorRT\cpp\yolox.cpp -o CMakeFiles\yolox.dir\yolox.cpp.s
81 |
82 | # Object files for target yolox
83 | yolox_OBJECTS = \
84 | "CMakeFiles/yolox.dir/yolox.cpp.obj"
85 |
86 | # External object files for target yolox
87 | yolox_EXTERNAL_OBJECTS =
88 |
89 | yolox.exe: CMakeFiles/yolox.dir/yolox.cpp.obj
90 | yolox.exe: CMakeFiles/yolox.dir/build.make
91 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_dnn348.dll.a
92 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_highgui348.dll.a
93 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_ml348.dll.a
94 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_objdetect348.dll.a
95 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_shape348.dll.a
96 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_stitching348.dll.a
97 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_superres348.dll.a
98 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_videostab348.dll.a
99 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_calib3d348.dll.a
100 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_features2d348.dll.a
101 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_flann348.dll.a
102 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_photo348.dll.a
103 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_video348.dll.a
104 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_videoio348.dll.a
105 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_imgcodecs348.dll.a
106 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_imgproc348.dll.a
107 | yolox.exe: D:/opencv/build/x64/MinGW/install/x64/mingw/lib/libopencv_core348.dll.a
108 | yolox.exe: CMakeFiles/yolox.dir/linklibs.rsp
109 | yolox.exe: CMakeFiles/yolox.dir/objects1.rsp
110 | yolox.exe: CMakeFiles/yolox.dir/link.txt
111 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Linking CXX executable yolox.exe"
112 | $(CMAKE_COMMAND) -E cmake_link_script CMakeFiles\yolox.dir\link.txt --verbose=$(VERBOSE)
113 |
114 | # Rule to build all files generated by this target.
115 | CMakeFiles/yolox.dir/build: yolox.exe
116 | .PHONY : CMakeFiles/yolox.dir/build
117 |
118 | CMakeFiles/yolox.dir/clean:
119 | $(CMAKE_COMMAND) -P CMakeFiles\yolox.dir\cmake_clean.cmake
120 | .PHONY : CMakeFiles/yolox.dir/clean
121 |
122 | CMakeFiles/yolox.dir/depend:
123 | $(CMAKE_COMMAND) -E cmake_depends "MinGW Makefiles" E:\YOLOX-main\demo\TensorRT\cpp E:\YOLOX-main\demo\TensorRT\cpp E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug E:\YOLOX-main\demo\TensorRT\cpp\cmake-build-debug\CMakeFiles\yolox.dir\DependInfo.cmake --color=$(COLOR)
124 | .PHONY : CMakeFiles/yolox.dir/depend
125 |
126 |
--------------------------------------------------------------------------------