├── Detector
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── best.pt
├── custom.yaml
├── docker
│ ├── Dockerfile
│ ├── Dockerfile-arm64
│ ├── Dockerfile-cpu
│ ├── Dockerfile-jetson
│ └── Dockerfile-python
├── docs
│ ├── CNAME
│ ├── README.md
│ ├── SECURITY.md
│ ├── assets
│ │ └── favicon.ico
│ ├── build_reference.py
│ ├── datasets
│ │ ├── classify
│ │ │ ├── caltech101.md
│ │ │ ├── caltech256.md
│ │ │ ├── cifar10.md
│ │ │ ├── cifar100.md
│ │ │ ├── fashion-mnist.md
│ │ │ ├── imagenet.md
│ │ │ ├── imagenet10.md
│ │ │ ├── imagenette.md
│ │ │ ├── imagewoof.md
│ │ │ ├── index.md
│ │ │ └── mnist.md
│ │ ├── detect
│ │ │ ├── argoverse.md
│ │ │ ├── coco.md
│ │ │ ├── coco8.md
│ │ │ ├── globalwheat2020.md
│ │ │ ├── index.md
│ │ │ ├── objects365.md
│ │ │ ├── open-images-v7.md
│ │ │ ├── sku-110k.md
│ │ │ ├── visdrone.md
│ │ │ ├── voc.md
│ │ │ └── xview.md
│ │ ├── index.md
│ │ ├── pose
│ │ │ ├── coco.md
│ │ │ ├── coco8-pose.md
│ │ │ └── index.md
│ │ ├── segment
│ │ │ ├── coco.md
│ │ │ ├── coco8-seg.md
│ │ │ └── index.md
│ │ └── track
│ │ │ └── index.md
│ ├── guides
│ │ ├── index.md
│ │ └── kfold-cross-validation.md
│ ├── help
│ │ ├── CI.md
│ │ ├── CLA.md
│ │ ├── FAQ.md
│ │ ├── code_of_conduct.md
│ │ ├── contributing.md
│ │ ├── environmental-health-safety.md
│ │ ├── index.md
│ │ └── minimum_reproducible_example.md
│ ├── hub
│ │ ├── app
│ │ │ ├── android.md
│ │ │ ├── index.md
│ │ │ └── ios.md
│ │ ├── datasets.md
│ │ ├── index.md
│ │ ├── inference_api.md
│ │ ├── integrations.md
│ │ ├── models.md
│ │ ├── projects.md
│ │ └── quickstart.md
│ ├── index.md
│ ├── integrations
│ │ ├── index.md
│ │ ├── openvino.md
│ │ └── ray-tune.md
│ ├── models
│ │ ├── fast-sam.md
│ │ ├── index.md
│ │ ├── mobile-sam.md
│ │ ├── rtdetr.md
│ │ ├── sam.md
│ │ ├── yolo-nas.md
│ │ ├── yolov3.md
│ │ ├── yolov4.md
│ │ ├── yolov5.md
│ │ ├── yolov6.md
│ │ ├── yolov7.md
│ │ └── yolov8.md
│ ├── modes
│ │ ├── benchmark.md
│ │ ├── export.md
│ │ ├── index.md
│ │ ├── predict.md
│ │ ├── track.md
│ │ ├── train.md
│ │ └── val.md
│ ├── overrides
│ │ └── partials
│ │ │ ├── comments.html
│ │ │ └── source-file.html
│ ├── quickstart.md
│ ├── reference
│ │ ├── cfg
│ │ │ └── __init__.md
│ │ ├── data
│ │ │ ├── annotator.md
│ │ │ ├── augment.md
│ │ │ ├── base.md
│ │ │ ├── build.md
│ │ │ ├── converter.md
│ │ │ ├── dataset.md
│ │ │ ├── loaders.md
│ │ │ └── utils.md
│ │ ├── engine
│ │ │ ├── exporter.md
│ │ │ ├── model.md
│ │ │ ├── predictor.md
│ │ │ ├── results.md
│ │ │ ├── trainer.md
│ │ │ └── validator.md
│ │ ├── hub
│ │ │ ├── __init__.md
│ │ │ ├── auth.md
│ │ │ ├── session.md
│ │ │ └── utils.md
│ │ ├── models
│ │ │ ├── fastsam
│ │ │ │ ├── model.md
│ │ │ │ ├── predict.md
│ │ │ │ ├── prompt.md
│ │ │ │ ├── utils.md
│ │ │ │ └── val.md
│ │ │ ├── nas
│ │ │ │ ├── model.md
│ │ │ │ ├── predict.md
│ │ │ │ └── val.md
│ │ │ ├── rtdetr
│ │ │ │ ├── model.md
│ │ │ │ ├── predict.md
│ │ │ │ ├── train.md
│ │ │ │ └── val.md
│ │ │ ├── sam
│ │ │ │ ├── amg.md
│ │ │ │ ├── build.md
│ │ │ │ ├── model.md
│ │ │ │ ├── modules
│ │ │ │ │ ├── decoders.md
│ │ │ │ │ ├── encoders.md
│ │ │ │ │ ├── sam.md
│ │ │ │ │ ├── tiny_encoder.md
│ │ │ │ │ └── transformer.md
│ │ │ │ └── predict.md
│ │ │ ├── utils
│ │ │ │ ├── loss.md
│ │ │ │ └── ops.md
│ │ │ └── yolo
│ │ │ │ ├── classify
│ │ │ │ ├── predict.md
│ │ │ │ ├── train.md
│ │ │ │ └── val.md
│ │ │ │ ├── detect
│ │ │ │ ├── predict.md
│ │ │ │ ├── train.md
│ │ │ │ └── val.md
│ │ │ │ ├── model.md
│ │ │ │ ├── pose
│ │ │ │ ├── predict.md
│ │ │ │ ├── train.md
│ │ │ │ └── val.md
│ │ │ │ └── segment
│ │ │ │ ├── predict.md
│ │ │ │ ├── train.md
│ │ │ │ └── val.md
│ │ ├── nn
│ │ │ ├── autobackend.md
│ │ │ ├── modules
│ │ │ │ ├── block.md
│ │ │ │ ├── conv.md
│ │ │ │ ├── head.md
│ │ │ │ ├── transformer.md
│ │ │ │ └── utils.md
│ │ │ └── tasks.md
│ │ ├── trackers
│ │ │ ├── basetrack.md
│ │ │ ├── bot_sort.md
│ │ │ ├── byte_tracker.md
│ │ │ ├── track.md
│ │ │ └── utils
│ │ │ │ ├── gmc.md
│ │ │ │ ├── kalman_filter.md
│ │ │ │ └── matching.md
│ │ └── utils
│ │ │ ├── __init__.md
│ │ │ ├── autobatch.md
│ │ │ ├── benchmarks.md
│ │ │ ├── callbacks
│ │ │ ├── base.md
│ │ │ ├── clearml.md
│ │ │ ├── comet.md
│ │ │ ├── dvc.md
│ │ │ ├── hub.md
│ │ │ ├── mlflow.md
│ │ │ ├── neptune.md
│ │ │ ├── raytune.md
│ │ │ ├── tensorboard.md
│ │ │ └── wb.md
│ │ │ ├── checks.md
│ │ │ ├── dist.md
│ │ │ ├── downloads.md
│ │ │ ├── errors.md
│ │ │ ├── files.md
│ │ │ ├── instance.md
│ │ │ ├── loss.md
│ │ │ ├── metrics.md
│ │ │ ├── ops.md
│ │ │ ├── patches.md
│ │ │ ├── plotting.md
│ │ │ ├── tal.md
│ │ │ ├── torch_utils.md
│ │ │ └── tuner.md
│ ├── robots.txt
│ ├── stylesheets
│ │ └── style.css
│ ├── tasks
│ │ ├── classify.md
│ │ ├── detect.md
│ │ ├── index.md
│ │ ├── pose.md
│ │ └── segment.md
│ ├── usage
│ │ ├── callbacks.md
│ │ ├── cfg.md
│ │ ├── cli.md
│ │ ├── engine.md
│ │ └── python.md
│ └── yolov5
│ │ ├── environments
│ │ ├── aws_quickstart_tutorial.md
│ │ ├── docker_image_quickstart_tutorial.md
│ │ └── google_cloud_quickstart_tutorial.md
│ │ ├── index.md
│ │ ├── quickstart_tutorial.md
│ │ └── tutorials
│ │ ├── architecture_description.md
│ │ ├── clearml_logging_integration.md
│ │ ├── comet_logging_integration.md
│ │ ├── hyperparameter_evolution.md
│ │ ├── model_ensembling.md
│ │ ├── model_export.md
│ │ ├── model_pruning_and_sparsity.md
│ │ ├── multi_gpu_training.md
│ │ ├── neural_magic_pruning_quantization.md
│ │ ├── pytorch_hub_model_loading.md
│ │ ├── roboflow_datasets_integration.md
│ │ ├── running_on_jetson_nano.md
│ │ ├── test_time_augmentation.md
│ │ ├── tips_for_best_training_results.md
│ │ ├── train_custom_data.md
│ │ └── transfer_learning_with_frozen_layers.md
├── examples
│ ├── README.md
│ ├── YOLOv8-CPP-Inference
│ │ ├── CMakeLists.txt
│ │ ├── README.md
│ │ ├── inference.cpp
│ │ ├── inference.h
│ │ └── main.cpp
│ ├── YOLOv8-ONNXRuntime-CPP
│ │ ├── README.md
│ │ ├── inference.cpp
│ │ ├── inference.h
│ │ └── main.cpp
│ ├── YOLOv8-ONNXRuntime
│ │ ├── README.md
│ │ └── main.py
│ ├── YOLOv8-OpenCV-ONNX-Python
│ │ ├── README.md
│ │ └── main.py
│ ├── hub.ipynb
│ └── tutorial.ipynb
├── mkdocs.yml
├── requirements.txt
├── setup.cfg
├── setup.py
├── tests
│ ├── conftest.py
│ ├── test_cli.py
│ ├── test_engine.py
│ └── test_python.py
├── ultralytics
│ ├── __init__.py
│ ├── assets
│ │ ├── bus.jpg
│ │ └── zidane.jpg
│ ├── cfg
│ │ ├── __init__.py
│ │ ├── datasets
│ │ │ ├── Argoverse.yaml
│ │ │ ├── GlobalWheat2020.yaml
│ │ │ ├── ImageNet.yaml
│ │ │ ├── Objects365.yaml
│ │ │ ├── SKU-110K.yaml
│ │ │ ├── VOC.yaml
│ │ │ ├── VisDrone.yaml
│ │ │ ├── coco-pose.yaml
│ │ │ ├── coco.yaml
│ │ │ ├── coco128-seg.yaml
│ │ │ ├── coco128.yaml
│ │ │ ├── coco8-pose.yaml
│ │ │ ├── coco8-seg.yaml
│ │ │ ├── coco8.yaml
│ │ │ ├── open-images-v7.yaml
│ │ │ └── xView.yaml
│ │ ├── default.yaml
│ │ ├── models
│ │ │ ├── README.md
│ │ │ ├── rt-detr
│ │ │ │ ├── rtdetr-l.yaml
│ │ │ │ └── rtdetr-x.yaml
│ │ │ ├── v3
│ │ │ │ ├── yolov3-spp.yaml
│ │ │ │ ├── yolov3-tiny.yaml
│ │ │ │ └── yolov3.yaml
│ │ │ ├── v5
│ │ │ │ ├── yolov5-p6.yaml
│ │ │ │ └── yolov5.yaml
│ │ │ ├── v6
│ │ │ │ └── yolov6.yaml
│ │ │ └── v8
│ │ │ │ ├── yolov8-cls.yaml
│ │ │ │ ├── yolov8-p2.yaml
│ │ │ │ ├── yolov8-p6.yaml
│ │ │ │ ├── yolov8-pose-p6.yaml
│ │ │ │ ├── yolov8-pose.yaml
│ │ │ │ ├── yolov8-rtdetr.yaml
│ │ │ │ ├── yolov8-seg.yaml
│ │ │ │ └── yolov8.yaml
│ │ └── trackers
│ │ │ ├── botsort.yaml
│ │ │ └── bytetrack.yaml
│ ├── data
│ │ ├── __init__.py
│ │ ├── annotator.py
│ │ ├── augment.py
│ │ ├── base.py
│ │ ├── build.py
│ │ ├── converter.py
│ │ ├── dataloaders
│ │ │ └── __init__.py
│ │ ├── dataset.py
│ │ ├── loaders.py
│ │ ├── scripts
│ │ │ ├── download_weights.sh
│ │ │ ├── get_coco.sh
│ │ │ ├── get_coco128.sh
│ │ │ └── get_imagenet.sh
│ │ └── utils.py
│ ├── engine
│ │ ├── __init__.py
│ │ ├── exporter.py
│ │ ├── model.py
│ │ ├── predictor.py
│ │ ├── results.py
│ │ ├── trainer.py
│ │ └── validator.py
│ ├── hub
│ │ ├── __init__.py
│ │ ├── auth.py
│ │ ├── session.py
│ │ └── utils.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── fastsam
│ │ │ ├── __init__.py
│ │ │ ├── model.py
│ │ │ ├── predict.py
│ │ │ ├── prompt.py
│ │ │ ├── utils.py
│ │ │ └── val.py
│ │ ├── nas
│ │ │ ├── __init__.py
│ │ │ ├── model.py
│ │ │ ├── predict.py
│ │ │ └── val.py
│ │ ├── rtdetr
│ │ │ ├── __init__.py
│ │ │ ├── model.py
│ │ │ ├── predict.py
│ │ │ ├── train.py
│ │ │ └── val.py
│ │ ├── sam
│ │ │ ├── __init__.py
│ │ │ ├── amg.py
│ │ │ ├── build.py
│ │ │ ├── model.py
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ ├── decoders.py
│ │ │ │ ├── encoders.py
│ │ │ │ ├── sam.py
│ │ │ │ ├── tiny_encoder.py
│ │ │ │ └── transformer.py
│ │ │ └── predict.py
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── loss.py
│ │ │ └── ops.py
│ │ └── yolo
│ │ │ ├── __init__.py
│ │ │ ├── classify
│ │ │ ├── __init__.py
│ │ │ ├── predict.py
│ │ │ ├── train.py
│ │ │ └── val.py
│ │ │ ├── detect
│ │ │ ├── __init__.py
│ │ │ ├── predict.py
│ │ │ ├── train.py
│ │ │ └── val.py
│ │ │ ├── model.py
│ │ │ ├── pose
│ │ │ ├── __init__.py
│ │ │ ├── predict.py
│ │ │ ├── train.py
│ │ │ └── val.py
│ │ │ └── segment
│ │ │ ├── __init__.py
│ │ │ ├── predict.py
│ │ │ ├── train.py
│ │ │ └── val.py
│ ├── nn
│ │ ├── __init__.py
│ │ ├── autobackend.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── block.py
│ │ │ ├── conv.py
│ │ │ ├── head.py
│ │ │ ├── transformer.py
│ │ │ └── utils.py
│ │ └── tasks.py
│ ├── trackers
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── basetrack.py
│ │ ├── bot_sort.py
│ │ ├── byte_tracker.py
│ │ ├── track.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── gmc.py
│ │ │ ├── kalman_filter.py
│ │ │ └── matching.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── autobatch.py
│ │ ├── benchmarks.py
│ │ ├── callbacks
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── clearml.py
│ │ │ ├── comet.py
│ │ │ ├── dvc.py
│ │ │ ├── hub.py
│ │ │ ├── mlflow.py
│ │ │ ├── neptune.py
│ │ │ ├── raytune.py
│ │ │ ├── tensorboard.py
│ │ │ └── wb.py
│ │ ├── checks.py
│ │ ├── dist.py
│ │ ├── downloads.py
│ │ ├── errors.py
│ │ ├── files.py
│ │ ├── instance.py
│ │ ├── loss.py
│ │ ├── metrics.py
│ │ ├── ops.py
│ │ ├── patches.py
│ │ ├── plotting.py
│ │ ├── tal.py
│ │ ├── torch_utils.py
│ │ └── tuner.py
│ └── yolo
│ │ ├── __init__.py
│ │ ├── cfg
│ │ └── __init__.py
│ │ ├── data
│ │ └── __init__.py
│ │ ├── engine
│ │ └── __init__.py
│ │ ├── utils
│ │ └── __init__.py
│ │ └── v8
│ │ └── __init__.py
└── yolov8n.pt
├── README.md
├── cldm
├── cldm.py
├── ddim_hacked.py
├── hack.py
├── logger.py
└── model.py
├── create_mask_overlapped_images.py
├── dataset_test_load.py
├── dataset_train_load.py
├── figures
├── .gitkeep
├── controlnet_diag1.svg
├── intro.png
└── intro2_controlnet.svg
├── ldm
├── data
│ ├── __init__.py
│ └── util.py
├── models
│ ├── autoencoder.py
│ └── diffusion
│ │ ├── __init__.py
│ │ ├── ddim.py
│ │ ├── ddpm.py
│ │ ├── dpm_solver
│ │ ├── __init__.py
│ │ ├── dpm_solver.py
│ │ └── sampler.py
│ │ ├── plms.py
│ │ └── sampling_util.py
├── modules
│ ├── attention.py
│ ├── diffusionmodules
│ │ ├── __init__.py
│ │ ├── model.py
│ │ ├── openaimodel.py
│ │ ├── upscaling.py
│ │ └── util.py
│ ├── distributions
│ │ ├── __init__.py
│ │ └── distributions.py
│ ├── ema.py
│ ├── encoders
│ │ ├── __init__.py
│ │ └── modules.py
│ └── image_degradation
│ │ ├── __init__.py
│ │ ├── bsrgan.py
│ │ ├── bsrgan_light.py
│ │ └── utils_image.py
└── util.py
├── mask_to_bb.py
├── models
└── cldm_v15.yaml
├── requirements.txt
├── sample_test.json
├── sample_train.json
├── tool_add_control.py
└── train.py
/Detector/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | preferred-citation:
3 | type: software
4 | message: If you use this software, please cite it as below.
5 | authors:
6 | - family-names: Jocher
7 | given-names: Glenn
8 | orcid: "https://orcid.org/0000-0001-5950-6979"
9 | - family-names: Chaurasia
10 | given-names: Ayush
11 | orcid: "https://orcid.org/0000-0002-7603-6750"
12 | - family-names: Qiu
13 | given-names: Jing
14 | orcid: "https://orcid.org/0000-0003-3783-7069"
15 | title: "YOLO by Ultralytics"
16 | version: 8.0.0
17 | # doi: 10.5281/zenodo.3908559 # TODO
18 | date-released: 2023-1-10
19 | license: AGPL-3.0
20 | url: "https://github.com/ultralytics/ultralytics"
21 |
--------------------------------------------------------------------------------
/Detector/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.md
2 | include requirements.txt
3 | include LICENSE
4 | include setup.py
5 | include ultralytics/assets/bus.jpg
6 | include ultralytics/assets/zidane.jpg
7 | recursive-include ultralytics *.yaml
8 | recursive-exclude __pycache__ *
9 |
--------------------------------------------------------------------------------
/Detector/best.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/best.pt
--------------------------------------------------------------------------------
/Detector/custom.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: /Detector/train_data/ # dataset root dir
12 | train: /Detector/train_data/train # train images (relative to 'path')
13 | val: /Detector/train_data/val # val images (relative to 'path')
14 | test: /Detector/train_data/test # test images (optional)
15 |
16 |
17 | # Classes
18 | nc: 1 # number of classes
19 | names: ['polyp'] # class names
20 |
--------------------------------------------------------------------------------
/Detector/docker/Dockerfile-arm64:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # Builds ultralytics/ultralytics:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM arm64v8/ubuntu:22.10
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
13 | RUN apt update \
14 | && apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
15 | # RUN alias python=python3
16 |
17 | # Create working directory
18 | WORKDIR /usr/src/ultralytics
19 |
20 | # Copy contents
21 | # COPY . /usr/src/app (issues as not a .git directory)
22 | RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
23 | ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt /usr/src/ultralytics/
24 |
25 | # Install pip packages
26 | RUN python3 -m pip install --upgrade pip wheel
27 | RUN pip install --no-cache -e . thop
28 |
29 |
30 | # Usage Examples -------------------------------------------------------------------------------------------------------
31 |
32 | # Build and Push
33 | # t=ultralytics/ultralytics:latest-arm64 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-arm64 -t $t . && sudo docker push $t
34 |
35 | # Run
36 | # t=ultralytics/ultralytics:latest-arm64 && sudo docker run -it --ipc=host $t
37 |
38 | # Pull and Run with local volume mounted
39 | # t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
40 |
--------------------------------------------------------------------------------
/Detector/docs/CNAME:
--------------------------------------------------------------------------------
1 | docs.ultralytics.com
2 |
--------------------------------------------------------------------------------
/Detector/docs/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Detector/docs/SECURITY.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover how Ultralytics ensures the safety of user data and systems. Check out the measures we have implemented, including Snyk and GitHub CodeQL Scanning.
3 | keywords: Ultralytics, Security Policy, data security, open-source projects, Snyk scanning, CodeQL scanning, vulnerability detection, threat prevention
4 | ---
5 |
6 | # Security Policy
7 |
8 | At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented several measures to detect and prevent security vulnerabilities.
9 |
10 | ## Snyk Scanning
11 |
12 | We use [Snyk](https://snyk.io/advisor/python/ultralytics) to regularly scan all Ultralytics repositories for vulnerabilities and security issues. Our goal is to identify and remediate any potential threats as soon as possible, to minimize any risks to our users.
13 |
14 | [](https://snyk.io/advisor/python/ultralytics)
15 |
16 | ## GitHub CodeQL Scanning
17 |
18 | In addition to our Snyk scans, we also use GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) scans to proactively identify and address security vulnerabilities across all Ultralytics repositories.
19 |
20 | [](https://github.com/ultralytics/ultralytics/actions/workflows/codeql.yaml)
21 |
22 | ## Reporting Security Issues
23 |
24 | If you suspect or discover a security vulnerability in any of our repositories, please let us know immediately. You can reach out to us directly via our [contact form](https://ultralytics.com/contact) or via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon as possible.
25 |
26 | We appreciate your help in keeping all Ultralytics open-source projects secure and safe for everyone.
27 |
--------------------------------------------------------------------------------
/Detector/docs/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/docs/assets/favicon.ico
--------------------------------------------------------------------------------
/Detector/docs/datasets/track/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | comments: true
3 | description: Understand multi-object tracking datasets, upcoming features and how to use them with YOLO in Python and CLI. Dive in now!.
4 | keywords: Ultralytics, YOLO, multi-object tracking, datasets, detection, segmentation, pose models, Python, CLI
5 | ---
6 |
7 | # Multi-object Tracking Datasets Overview
8 |
9 | ## Dataset Format (Coming Soon)
10 |
11 | Multi-Object Detector doesn't need standalone training and directly supports pre-trained detection, segmentation or Pose models.
12 | Support for training trackers alone is coming soon
13 |
14 | ## Usage
15 |
16 | !!! example ""
17 |
18 | === "Python"
19 |
20 | ```python
21 | from ultralytics import YOLO
22 |
23 | model = YOLO('yolov8n.pt')
24 | results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True)
25 | ```
26 | === "CLI"
27 |
28 | ```bash
29 | yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" conf=0.3, iou=0.5 show
30 | ```
31 |
--------------------------------------------------------------------------------
/Detector/docs/guides/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | comments: true
3 | description: In-depth exploration of Ultralytics' YOLO. Learn about the YOLO object detection model, how to train it on custom data, multi-GPU training, exporting, predicting, deploying, and more.
4 | keywords: Ultralytics, YOLO, Deep Learning, Object detection, PyTorch, Tutorial, Multi-GPU training, Custom data training
5 | ---
6 |
7 | # Comprehensive Tutorials to Ultralytics YOLO
8 |
9 | Welcome to the Ultralytics' YOLO 🚀 Guides! Our comprehensive tutorials cover various aspects of the YOLO object detection model, ranging from training and prediction to deployment. Built on PyTorch, YOLO stands out for its exceptional speed and accuracy in real-time object detection tasks.
10 |
11 | Whether you're a beginner or an expert in deep learning, our tutorials offer valuable insights into the implementation and optimization of YOLO for your computer vision projects. Let's dive in!
12 |
13 | ## Guides
14 |
15 | Here's a compilation of in-depth guides to help you master different aspects of Ultralytics YOLO.
16 |
17 | * [K-Fold Cross Validation](kfold-cross-validation.md) 🚀 NEW: Learn how to improve model generalization using K-Fold cross-validation technique.
18 |
19 | Note: More guides about training, exporting, predicting, and deploying with Ultralytics YOLO are coming soon. Stay tuned!
20 |
--------------------------------------------------------------------------------
/Detector/docs/hub/integrations.md:
--------------------------------------------------------------------------------
1 | ---
2 | comments: true
3 | ---
4 |
5 | # 🚧 Page Under Construction ⚒
6 |
7 | This page is currently under construction!️ 👷Please check back later for updates. 😃🔜
8 |
--------------------------------------------------------------------------------
/Detector/docs/hub/quickstart.md:
--------------------------------------------------------------------------------
1 | ---
2 | comments: true
3 | ---
4 |
5 | # 🚧 Page Under Construction ⚒
6 |
7 | This page is currently under construction!️ 👷Please check back later for updates. 😃🔜
8 |
--------------------------------------------------------------------------------
/Detector/docs/overrides/partials/comments.html:
--------------------------------------------------------------------------------
1 | {% if page.meta.comments %}
2 |
3 |
4 |
5 |
20 |
21 |
22 |
50 | {% endif %}
51 |
--------------------------------------------------------------------------------
/Detector/docs/overrides/partials/source-file.html:
--------------------------------------------------------------------------------
1 | {% import "partials/language.html" as lang with context %}
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | {% if page.meta.git_revision_date_localized %}
12 | 📅 {{ lang.t("source.file.date.updated") }}:
13 | {{ page.meta.git_revision_date_localized }}
14 | {% if page.meta.git_creation_date_localized %}
15 |
16 | 🎂 {{ lang.t("source.file.date.created") }}:
17 | {{ page.meta.git_creation_date_localized }}
18 | {% endif %}
19 |
20 |
21 | {% elif page.meta.revision_date %}
22 | 📅 {{ lang.t("source.file.date.updated") }}:
23 | {{ page.meta.revision_date }}
24 | {% endif %}
25 |
26 |
27 |
--------------------------------------------------------------------------------
/Detector/docs/reference/cfg/__init__.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics cfg functions like cfg2dict, handle_deprecation, merge_equal_args & more to handle YOLO settings and configurations efficiently.
3 | keywords: Ultralytics, YOLO, Configuration, cfg2dict, handle_deprecation, merge_equals_args, handle_yolo_settings, copy_default_cfg, Image Detection
4 | ---
5 |
6 | # Reference for `ultralytics/cfg/__init__.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/__init__.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/__init__.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.cfg.cfg2dict
14 |
15 |
16 | ---
17 | ## ::: ultralytics.cfg.get_cfg
18 |
19 |
20 | ---
21 | ## ::: ultralytics.cfg._handle_deprecation
22 |
23 |
24 | ---
25 | ## ::: ultralytics.cfg.check_dict_alignment
26 |
27 |
28 | ---
29 | ## ::: ultralytics.cfg.merge_equals_args
30 |
31 |
32 | ---
33 | ## ::: ultralytics.cfg.handle_yolo_hub
34 |
35 |
36 | ---
37 | ## ::: ultralytics.cfg.handle_yolo_settings
38 |
39 |
40 | ---
41 | ## ::: ultralytics.cfg.parse_key_value_pair
42 |
43 |
44 | ---
45 | ## ::: ultralytics.cfg.smart_value
46 |
47 |
48 | ---
49 | ## ::: ultralytics.cfg.entrypoint
50 |
51 |
52 | ---
53 | ## ::: ultralytics.cfg.copy_default_cfg
54 |
55 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/annotator.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Enhance your machine learning model with Ultralytics’ auto_annotate function. Simplify data annotation for improved model training.
3 | keywords: Ultralytics, Auto-Annotate, Machine Learning, AI, Annotation, Data Processing, Model Training
4 | ---
5 |
6 | # Reference for `ultralytics/data/annotator.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/annotator.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/annotator.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.annotator.auto_annotate
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/base.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore BaseDataset in Ultralytics docs. Learn how this implementation simplifies dataset creation and manipulation.
3 | keywords: Ultralytics, docs, BaseDataset, data manipulation, dataset creation
4 | ---
5 |
6 | # Reference for `ultralytics/data/base.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/base.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/base.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.base.BaseDataset
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/build.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the Ultralytics YOLO v3 data build procedures, including the InfiniteDataLoader, seed_worker, build_dataloader, and load_inference_source.
3 | keywords: Ultralytics, YOLO v3, Data build, DataLoader, InfiniteDataLoader, seed_worker, build_dataloader, load_inference_source
4 | ---
5 |
6 | # Reference for `ultralytics/data/build.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/build.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/build.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.build.InfiniteDataLoader
14 |
15 |
16 | ---
17 | ## ::: ultralytics.data.build._RepeatSampler
18 |
19 |
20 | ---
21 | ## ::: ultralytics.data.build.seed_worker
22 |
23 |
24 | ---
25 | ## ::: ultralytics.data.build.build_yolo_dataset
26 |
27 |
28 | ---
29 | ## ::: ultralytics.data.build.build_dataloader
30 |
31 |
32 | ---
33 | ## ::: ultralytics.data.build.check_source
34 |
35 |
36 | ---
37 | ## ::: ultralytics.data.build.load_inference_source
38 |
39 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/converter.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics data converter functions like coco91_to_coco80_class, merge_multi_segment, rle2polygon for efficient data handling.
3 | keywords: Ultralytics, Data Converter, coco91_to_coco80_class, merge_multi_segment, rle2polygon
4 | ---
5 |
6 | # Reference for `ultralytics/data/converter.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/converter.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/converter.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.converter.coco91_to_coco80_class
14 |
15 |
16 | ---
17 | ## ::: ultralytics.data.converter.convert_coco
18 |
19 |
20 | ---
21 | ## ::: ultralytics.data.converter.rle2polygon
22 |
23 |
24 | ---
25 | ## ::: ultralytics.data.converter.min_index
26 |
27 |
28 | ---
29 | ## ::: ultralytics.data.converter.merge_multi_segment
30 |
31 |
32 | ---
33 | ## ::: ultralytics.data.converter.delete_dsstore
34 |
35 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/dataset.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the YOLODataset and SemanticDataset classes in YOLO data. Learn how to efficiently handle and manipulate your data with Ultralytics.
3 | keywords: Ultralytics, YOLO, YOLODataset, SemanticDataset, data handling, data manipulation
4 | ---
5 |
6 | # Reference for `ultralytics/data/dataset.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/dataset.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/dataset.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.dataset.YOLODataset
14 |
15 |
16 | ---
17 | ## ::: ultralytics.data.dataset.ClassificationDataset
18 |
19 |
20 | ---
21 | ## ::: ultralytics.data.dataset.SemanticDataset
22 |
23 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/loaders.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Find detailed guides on Ultralytics YOLO data loaders, including LoadStreams, LoadImages and LoadTensor. Learn how to get the best YouTube URLs.
3 | keywords: Ultralytics, data loaders, LoadStreams, LoadImages, LoadTensor, YOLO, YouTube URLs
4 | ---
5 |
6 | # Reference for `ultralytics/data/loaders.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/loaders.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/loaders.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.loaders.SourceTypes
14 |
15 |
16 | ---
17 | ## ::: ultralytics.data.loaders.LoadStreams
18 |
19 |
20 | ---
21 | ## ::: ultralytics.data.loaders.LoadScreenshots
22 |
23 |
24 | ---
25 | ## ::: ultralytics.data.loaders.LoadImages
26 |
27 |
28 | ---
29 | ## ::: ultralytics.data.loaders.LoadPilAndNumpy
30 |
31 |
32 | ---
33 | ## ::: ultralytics.data.loaders.LoadTensor
34 |
35 |
36 | ---
37 | ## ::: ultralytics.data.loaders.autocast_list
38 |
39 |
40 | ---
41 | ## ::: ultralytics.data.loaders.get_best_youtube_url
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/data/utils.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Uncover a detailed guide to Ultralytics data utilities. Learn functions from img2label_paths to autosplit, all boosting your YOLO model’s efficiency.
3 | keywords: Ultralytics, data utils, YOLO, img2label_paths, exif_size, polygon2mask, polygons2masks_overlap, check_cls_dataset, delete_dsstore, autosplit
4 | ---
5 |
6 | # Reference for `ultralytics/data/utils.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.data.utils.HUBDatasetStats
14 |
15 |
16 | ---
17 | ## ::: ultralytics.data.utils.img2label_paths
18 |
19 |
20 | ---
21 | ## ::: ultralytics.data.utils.get_hash
22 |
23 |
24 | ---
25 | ## ::: ultralytics.data.utils.exif_size
26 |
27 |
28 | ---
29 | ## ::: ultralytics.data.utils.verify_image_label
30 |
31 |
32 | ---
33 | ## ::: ultralytics.data.utils.polygon2mask
34 |
35 |
36 | ---
37 | ## ::: ultralytics.data.utils.polygons2masks
38 |
39 |
40 | ---
41 | ## ::: ultralytics.data.utils.polygons2masks_overlap
42 |
43 |
44 | ---
45 | ## ::: ultralytics.data.utils.check_det_dataset
46 |
47 |
48 | ---
49 | ## ::: ultralytics.data.utils.check_cls_dataset
50 |
51 |
52 | ---
53 | ## ::: ultralytics.data.utils.compress_one_image
54 |
55 |
56 | ---
57 | ## ::: ultralytics.data.utils.delete_dsstore
58 |
59 |
60 | ---
61 | ## ::: ultralytics.data.utils.zip_directory
62 |
63 |
64 | ---
65 | ## ::: ultralytics.data.utils.autosplit
66 |
67 |
--------------------------------------------------------------------------------
/Detector/docs/reference/engine/exporter.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the exporter functionality of Ultralytics. Learn about exporting formats, iOSDetectModel, and try exporting with examples.
3 | keywords: Ultralytics, Exporter, iOSDetectModel, Export Formats, Try export
4 | ---
5 |
6 | # Reference for `ultralytics/engine/exporter.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/exporter.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/exporter.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.engine.exporter.Exporter
14 |
15 |
16 | ---
17 | ## ::: ultralytics.engine.exporter.iOSDetectModel
18 |
19 |
20 | ---
21 | ## ::: ultralytics.engine.exporter.export_formats
22 |
23 |
24 | ---
25 | ## ::: ultralytics.engine.exporter.gd_outputs
26 |
27 |
28 | ---
29 | ## ::: ultralytics.engine.exporter.try_export
30 |
31 |
32 | ---
33 | ## ::: ultralytics.engine.exporter.export
34 |
35 |
--------------------------------------------------------------------------------
/Detector/docs/reference/engine/model.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the detailed guide on using the Ultralytics YOLO Engine Model. Learn better ways to implement, train and evaluate YOLO models.
3 | keywords: Ultralytics, YOLO, engine model, documentation, guide, implementation, training, evaluation
4 | ---
5 |
6 | # Reference for `ultralytics/engine/model.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/model.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/model.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.engine.model.Model
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/engine/predictor.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about Ultralytics BasePredictor, an essential component of our engine that serves as the foundation for all prediction operations.
3 | keywords: Ultralytics, BasePredictor, YOLO, prediction, engine
4 | ---
5 |
6 | # Reference for `ultralytics/engine/predictor.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/predictor.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/predictor.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.engine.predictor.BasePredictor
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/engine/results.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Master Ultralytics engine results including base tensors, boxes, and keypoints with our thorough documentation.
3 | keywords: Ultralytics, engine, results, base tensor, boxes, keypoints
4 | ---
5 |
6 | # Reference for `ultralytics/engine/results.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/results.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/results.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.engine.results.BaseTensor
14 |
15 |
16 | ---
17 | ## ::: ultralytics.engine.results.Results
18 |
19 |
20 | ---
21 | ## ::: ultralytics.engine.results.Boxes
22 |
23 |
24 | ---
25 | ## ::: ultralytics.engine.results.Masks
26 |
27 |
28 | ---
29 | ## ::: ultralytics.engine.results.Keypoints
30 |
31 |
32 | ---
33 | ## ::: ultralytics.engine.results.Probs
34 |
35 |
--------------------------------------------------------------------------------
/Detector/docs/reference/engine/trainer.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about the BaseTrainer class in the Ultralytics library. From training control, customization to advanced usage.
3 | keywords: Ultralytics, BaseTrainer, Machine Learning, Training Control, Python library
4 | ---
5 |
6 | # Reference for `ultralytics/engine/trainer.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/trainer.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/trainer.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.engine.trainer.BaseTrainer
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/engine/validator.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about the Ultralytics BaseValidator module. Understand its principles, uses, and how it interacts with other components.
3 | keywords: Ultralytics, BaseValidator, Ultralytics engine, module, components
4 | ---
5 |
6 | # Reference for `ultralytics/engine/validator.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/validator.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/validator.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.engine.validator.BaseValidator
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/hub/__init__.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics hub functions for model resetting, checking datasets, model exporting and more. Easy-to-follow instructions provided.
3 | keywords: Ultralytics, hub functions, model export, dataset check, reset model, YOLO Docs
4 | ---
5 |
6 | # Reference for `ultralytics/hub/__init__.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/__init__.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/__init__.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.hub.login
14 |
15 |
16 | ---
17 | ## ::: ultralytics.hub.logout
18 |
19 |
20 | ---
21 | ## ::: ultralytics.hub.start
22 |
23 |
24 | ---
25 | ## ::: ultralytics.hub.reset_model
26 |
27 |
28 | ---
29 | ## ::: ultralytics.hub.export_fmts_hub
30 |
31 |
32 | ---
33 | ## ::: ultralytics.hub.export_model
34 |
35 |
36 | ---
37 | ## ::: ultralytics.hub.get_export
38 |
39 |
40 | ---
41 | ## ::: ultralytics.hub.check_dataset
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/hub/auth.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Dive into the Ultralytics Auth API documentation & learn how to manage authentication in your AI & ML projects easily and effectively.
3 | keywords: Ultralytics, Auth, API documentation, User Authentication, AI, Machine Learning
4 | ---
5 |
6 | # Reference for `ultralytics/hub/auth.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/auth.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/auth.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.hub.auth.Auth
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/hub/session.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore details about the HUBTrainingSession in Ultralytics framework. Learn to utilize this functionality for effective model training.
3 | keywords: Ultralytics, HUBTrainingSession, Documentation, Model Training, AI, Machine Learning, YOLO
4 | ---
5 |
6 | # Reference for `ultralytics/hub/session.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/session.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/session.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.hub.session.HUBTrainingSession
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/hub/utils.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics docs for various Events, including "request_with_credentials" and "requests_with_progress". Also, understand the use of the "smart_request".
3 | keywords: Ultralytics, Events, request_with_credentials, smart_request, Ultralytics hub utils, requests_with_progress
4 | ---
5 |
6 | # Reference for `ultralytics/hub/utils.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/utils.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.hub.utils.Events
14 |
15 |
16 | ---
17 | ## ::: ultralytics.hub.utils.request_with_credentials
18 |
19 |
20 | ---
21 | ## ::: ultralytics.hub.utils.requests_with_progress
22 |
23 |
24 | ---
25 | ## ::: ultralytics.hub.utils.smart_request
26 |
27 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/fastsam/model.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn all about Ultralytics FastSAM model. Dive into our comprehensive guide for seamless integration and efficient model training.
3 | keywords: Ultralytics, FastSAM model, Model documentation, Efficient model training
4 | ---
5 |
6 | # Reference for `ultralytics/models/fastsam/model.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/model.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/model.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.fastsam.model.FastSAM
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/fastsam/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Get detailed insights about Ultralytics FastSAMPredictor. Learn to predict and optimize your AI models with our properly documented guidelines.
3 | keywords: Ultralytics, FastSAMPredictor, predictive modeling, AI optimization, machine learning, deep learning, Ultralytics documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/fastsam/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.fastsam.predict.FastSAMPredictor
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/fastsam/prompt.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn to effectively utilize FastSAMPrompt model from Ultralytics. Detailed guide to help you get the most out of your machine learning models.
3 | keywords: Ultralytics, FastSAMPrompt, machine learning, model, guide, documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/fastsam/prompt.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/prompt.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/prompt.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.fastsam.prompt.FastSAMPrompt
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/fastsam/utils.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn how to adjust bounding boxes to image borders in Ultralytics models using the bbox_iou utility. Enhance your object detection performance.
3 | keywords: Ultralytics, bounding boxes, Bboxes, image borders, object detection, bbox_iou, model utilities
4 | ---
5 |
6 | # Reference for `ultralytics/models/fastsam/utils.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/utils.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.fastsam.utils.adjust_bboxes_to_image_border
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.fastsam.utils.bbox_iou
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/fastsam/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about FastSAMValidator in Ultralytics models. Comprehensive guide to enhancing AI capabilities with Ultralytics.
3 | keywords: Ultralytics, FastSAMValidator, model, synthetic, AI, machine learning, validation
4 | ---
5 |
6 | # Reference for `ultralytics/models/fastsam/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.fastsam.val.FastSAMValidator
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/nas/model.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn how our NAS model operates in Ultralytics. Comprehensive guide with detailed examples. Master the nuances of Ultralytics NAS model.
3 | keywords: Ultralytics, NAS model, NAS guide, machine learning, model documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/nas/model.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/nas/model.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/nas/model.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.nas.model.NAS
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/nas/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics NASPredictor. Understand high-level architecture of the model for effective implementation and efficient predictions.
3 | keywords: NASPredictor, Ultralytics, Ultralytics model, model architecture, efficient predictions
4 | ---
5 |
6 | # Reference for `ultralytics/models/nas/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/nas/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/nas/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.nas.predict.NASPredictor
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/nas/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the utilities and functions of the Ultralytics NASValidator. Find out how it benefits allocation and optimization in AI models.
3 | keywords: Ultralytics, NASValidator, models.nas.val.NASValidator, AI models, allocation, optimization
4 | ---
5 |
6 | # Reference for `ultralytics/models/nas/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/nas/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/nas/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.nas.val.NASValidator
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/rtdetr/model.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the specifics of using the RTDETR model in Ultralytics. Detailed documentation layered with explanations and examples.
3 | keywords: Ultralytics, RTDETR model, Ultralytics models, object detection, Ultralytics documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/rtdetr/model.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/model.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/model.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.rtdetr.model.RTDETR
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/rtdetr/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn how to use the RTDETRPredictor model of the Ultralytics package. Detailed documentation, usage instructions, and advice.
3 | keywords: Ultralytics, RTDETRPredictor, model documentation, guide, real-time object detection
4 | ---
5 |
6 | # Reference for `ultralytics/models/rtdetr/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.rtdetr.predict.RTDETRPredictor
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/rtdetr/train.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Get insights into RTDETRTrainer, a crucial component of Ultralytics for effective model training. Explore detailed documentation at Ultralytics.
3 | keywords: Ultralytics, RTDETRTrainer, model training, Ultralytics models, PyTorch models, neural networks, machine learning, deep learning
4 | ---
5 |
6 | # Reference for `ultralytics/models/rtdetr/train.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/train.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/train.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.rtdetr.train.RTDETRTrainer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.rtdetr.train.train
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/rtdetr/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore RTDETRDataset in Ultralytics Models. Learn about the RTDETRValidator function, understand its usage in real-time object detection.
3 | keywords: Ultralytics, RTDETRDataset, RTDETRValidator, real-time object detection, models documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/rtdetr/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/rtdetr/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.rtdetr.val.RTDETRDataset
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.rtdetr.val.RTDETRValidator
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/amg.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics methods for mask data processing, transformation and encoding. Deepen your understanding of RLE encoding, image cropping and more.
3 | keywords: Ultralytics, Mask Data, Transformation, Encoding, RLE encoding, Image cropping, Pytorch, SAM, AMG, Ultralytics model
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/amg.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/amg.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/amg.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.amg.MaskData
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.sam.amg.is_box_near_crop_edge
18 |
19 |
20 | ---
21 | ## ::: ultralytics.models.sam.amg.box_xyxy_to_xywh
22 |
23 |
24 | ---
25 | ## ::: ultralytics.models.sam.amg.batch_iterator
26 |
27 |
28 | ---
29 | ## ::: ultralytics.models.sam.amg.mask_to_rle_pytorch
30 |
31 |
32 | ---
33 | ## ::: ultralytics.models.sam.amg.rle_to_mask
34 |
35 |
36 | ---
37 | ## ::: ultralytics.models.sam.amg.area_from_rle
38 |
39 |
40 | ---
41 | ## ::: ultralytics.models.sam.amg.calculate_stability_score
42 |
43 |
44 | ---
45 | ## ::: ultralytics.models.sam.amg.build_point_grid
46 |
47 |
48 | ---
49 | ## ::: ultralytics.models.sam.amg.build_all_layer_point_grids
50 |
51 |
52 | ---
53 | ## ::: ultralytics.models.sam.amg.generate_crop_boxes
54 |
55 |
56 | ---
57 | ## ::: ultralytics.models.sam.amg.uncrop_boxes_xyxy
58 |
59 |
60 | ---
61 | ## ::: ultralytics.models.sam.amg.uncrop_points
62 |
63 |
64 | ---
65 | ## ::: ultralytics.models.sam.amg.uncrop_masks
66 |
67 |
68 | ---
69 | ## ::: ultralytics.models.sam.amg.remove_small_regions
70 |
71 |
72 | ---
73 | ## ::: ultralytics.models.sam.amg.coco_encode_rle
74 |
75 |
76 | ---
77 | ## ::: ultralytics.models.sam.amg.batched_mask_to_box
78 |
79 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/build.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Master building SAM ViT models with Ultralytics. Discover steps to leverage the power of SAM and Vision Transformer sessions.
3 | keywords: Ultralytics, SAM, build sam, vision transformer, vits, build_sam_vit_l, build_sam_vit_b, build_sam
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/build.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/build.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/build.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.build.build_sam_vit_h
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.sam.build.build_sam_vit_l
18 |
19 |
20 | ---
21 | ## ::: ultralytics.models.sam.build.build_sam_vit_b
22 |
23 |
24 | ---
25 | ## ::: ultralytics.models.sam.build.build_mobile_sam
26 |
27 |
28 | ---
29 | ## ::: ultralytics.models.sam.build._build_sam
30 |
31 |
32 | ---
33 | ## ::: ultralytics.models.sam.build.build_sam
34 |
35 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/model.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Dive into the SAM model details in the Ultralytics YOLO documentation. Understand, implement, and optimize your model use.
3 | keywords: Ultralytics, YOLO, SAM Model, Documentations, Machine Learning, AI, Convolutional neural network
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/model.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/model.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/model.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.model.SAM
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/modules/decoders.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore MaskDecoder, a part of the Ultralytics models. Gain insights on how to utilize it effectively in the SAM modules decoders MLP.
3 | keywords: Ultralytics, MaskDecoder, SAM modules, decoders, MLP, YOLO, machine learning, image recognition
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/modules/decoders.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/decoders.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/decoders.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.modules.decoders.MaskDecoder
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.sam.modules.decoders.MLP
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/modules/encoders.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover detailed information on ImageEncoderViT, PositionEmbeddingRandom, Attention, window_partition, get_rel_pos and more in Ultralytics models encoders documentation.
3 | keywords: Ultralytics, Encoders, Modules, Documentation, ImageEncoderViT, PositionEmbeddingRandom, Attention, window_partition, get_rel_pos
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/modules/encoders.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/encoders.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/encoders.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.modules.encoders.ImageEncoderViT
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.sam.modules.encoders.PromptEncoder
18 |
19 |
20 | ---
21 | ## ::: ultralytics.models.sam.modules.encoders.PositionEmbeddingRandom
22 |
23 |
24 | ---
25 | ## ::: ultralytics.models.sam.modules.encoders.Block
26 |
27 |
28 | ---
29 | ## ::: ultralytics.models.sam.modules.encoders.Attention
30 |
31 |
32 | ---
33 | ## ::: ultralytics.models.sam.modules.encoders.PatchEmbed
34 |
35 |
36 | ---
37 | ## ::: ultralytics.models.sam.modules.encoders.window_partition
38 |
39 |
40 | ---
41 | ## ::: ultralytics.models.sam.modules.encoders.window_unpartition
42 |
43 |
44 | ---
45 | ## ::: ultralytics.models.sam.modules.encoders.get_rel_pos
46 |
47 |
48 | ---
49 | ## ::: ultralytics.models.sam.modules.encoders.add_decomposed_rel_pos
50 |
51 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/modules/sam.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the Sam module of Ultralytics. Discover detailed methods, classes, and information for efficient deep-learning model training!.
3 | keywords: Ultralytics, Sam module, deep learning, model training, Ultralytics documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/modules/sam.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/sam.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/sam.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.modules.sam.Sam
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/modules/tiny_encoder.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Get in-depth insights about Ultralytics Tiny Encoder Modules such as Conv2d_BN, MBConv, ConvLayer, Attention, BasicLayer, and TinyViT. Improve your understanding of machine learning model components.
3 | keywords: Ultralytics, Tiny Encoder, Conv2d_BN, MBConv, ConvLayer, Attention, BasicLayer, TinyViT, Machine learning modules, Ultralytics models
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/modules/tiny_encoder.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/tiny_encoder.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/tiny_encoder.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.modules.tiny_encoder.Conv2d_BN
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.sam.modules.tiny_encoder.PatchEmbed
18 |
19 |
20 | ---
21 | ## ::: ultralytics.models.sam.modules.tiny_encoder.MBConv
22 |
23 |
24 | ---
25 | ## ::: ultralytics.models.sam.modules.tiny_encoder.PatchMerging
26 |
27 |
28 | ---
29 | ## ::: ultralytics.models.sam.modules.tiny_encoder.ConvLayer
30 |
31 |
32 | ---
33 | ## ::: ultralytics.models.sam.modules.tiny_encoder.Mlp
34 |
35 |
36 | ---
37 | ## ::: ultralytics.models.sam.modules.tiny_encoder.Attention
38 |
39 |
40 | ---
41 | ## ::: ultralytics.models.sam.modules.tiny_encoder.TinyViTBlock
42 |
43 |
44 | ---
45 | ## ::: ultralytics.models.sam.modules.tiny_encoder.BasicLayer
46 |
47 |
48 | ---
49 | ## ::: ultralytics.models.sam.modules.tiny_encoder.LayerNorm2d
50 |
51 |
52 | ---
53 | ## ::: ultralytics.models.sam.modules.tiny_encoder.TinyViT
54 |
55 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/modules/transformer.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about TwoWayTransformer and Attention modules in Ultralytics. Leverage these tools to enhance your AI models.
3 | keywords: Ultralytics, TwoWayTransformer, Attention, AI models, transformers
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/modules/transformer.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/transformer.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/modules/transformer.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.modules.transformer.TwoWayTransformer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.sam.modules.transformer.TwoWayAttentionBlock
18 |
19 |
20 | ---
21 | ## ::: ultralytics.models.sam.modules.transformer.Attention
22 |
23 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/sam/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Master the ultralytics.models.sam.predict.Predictor class with our comprehensive guide. Discover techniques to enhance your model predictions.
3 | keywords: Ultralytics, predictor, models, sam.predict.Predictor, AI, machine learning, predictive models
4 | ---
5 |
6 | # Reference for `ultralytics/models/sam/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/sam/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.sam.predict.Predictor
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/utils/loss.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn to use the DETRLoss function provided by Ultralytics YOLO. Understand how to utilize loss in RTDETR detection models to improve accuracy.
3 | keywords: Ultralytics, YOLO, Documentation, DETRLoss, Detection Loss, Loss function, DETR, RTDETR Detection Models
4 | ---
5 |
6 | # Reference for `ultralytics/models/utils/loss.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/utils/loss.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/utils/loss.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.utils.loss.DETRLoss
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.utils.loss.RTDETRDetectionLoss
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/utils/ops.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover details for "HungarianMatcher" & "inverse_sigmoid" functions in Ultralytics YOLO, advanced tools supporting detection models.
3 | keywords: Ultralytics, YOLO, HungarianMatcher, inverse_sigmoid, detection models, model utilities, ops
4 | ---
5 |
6 | # Reference for `ultralytics/models/utils/ops.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/utils/ops.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/utils/ops.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.utils.ops.HungarianMatcher
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.utils.ops.get_cdn_group
18 |
19 |
20 | ---
21 | ## ::: ultralytics.models.utils.ops.inverse_sigmoid
22 |
23 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/classify/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the Ultralytics ClassificationPredictor guide for model prediction and visualization. Build powerful AI models with YOLO.
3 | keywords: Ultralytics, classification predictor, predict, YOLO, AI models, model visualization
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/classify/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/classify/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/classify/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.classify.predict.ClassificationPredictor
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.classify.predict.predict
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/classify/train.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Delve into Classification Trainer at Ultralytics YOLO docs and optimize your model's training process with insights from the masters!.
3 | keywords: Ultralytics, YOLO, Classification Trainer, deep learning, training process, AI models, documentation
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/classify/train.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/classify/train.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/classify/train.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.classify.train.ClassificationTrainer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.classify.train.train
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/classify/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore YOLO ClassificationValidator, a key element of Ultralytics YOLO models. Learn how it validates and fine-tunes model outputs.
3 | keywords: Ultralytics, YOLO, ClassificationValidator, model validation, model fine-tuning, deep learning, computer vision
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/classify/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/classify/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/classify/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.classify.val.ClassificationValidator
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.classify.val.val
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/detect/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the guide to using the DetectionPredictor in Ultralytics YOLO. Learn how to predict, detect and analyze objects accurately.
3 | keywords: Ultralytics, YOLO, DetectionPredictor, detect, predict, object detection, analysis
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/detect/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/detect/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/detect/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.detect.predict.DetectionPredictor
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.detect.predict.predict
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/detect/train.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Maximize your model's potential with Ultralytics YOLO Detection Trainer. Learn advanced techniques, tips, and tricks for training.
3 | keywords: Ultralytics YOLO, YOLO, Detection Trainer, Model Training, Machine Learning, Deep Learning, Computer Vision
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/detect/train.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/detect/train.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/detect/train.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.detect.train.DetectionTrainer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.detect.train.train
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/detect/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover function valuation of your YOLO models with the Ultralytics Detection Validator. Enhance precision and recall rates today.
3 | keywords: Ultralytics, YOLO, Detection Validator, model valuation, precision, recall
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/detect/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/detect/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/detect/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.detect.val.DetectionValidator
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.detect.val.val
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/model.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover the Ultralytics YOLO model class. Learn advanced techniques, tips, and tricks for training.
3 | keywords: Ultralytics YOLO, YOLO, YOLO model, Model Training, Machine Learning, Deep Learning, Computer Vision
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/model.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/model.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/model.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.model.YOLO
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/pose/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover how to use PosePredictor in the Ultralytics YOLO model. Includes detailed guides, code examples, and explanations.
3 | keywords: Ultralytics, YOLO, PosePredictor, machine learning, AI, predictive models
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/pose/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/pose/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/pose/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.pose.predict.PosePredictor
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.pose.predict.predict
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/pose/train.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics PoseTrainer for YOLO models. Get a step-by-step guide on how to train on custom pose data for more accurate AI modeling.
3 | keywords: Ultralytics, YOLO, PoseTrainer, pose training, AI modeling, custom data training
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/pose/train.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/pose/train.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/pose/train.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.pose.train.PoseTrainer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.pose.train.train
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/pose/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the PoseValidator—review how Ultralytics YOLO validates poses for object detection. Improve your understanding of YOLO.
3 | keywords: PoseValidator, Ultralytics, YOLO, Object detection, Pose validation
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/pose/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/pose/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/pose/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.pose.val.PoseValidator
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.pose.val.val
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/segment/predict.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover how to utilize the YOLO Segmentation Predictor in Ultralytics. Enhance your objects detection skills with us.
3 | keywords: YOLO, Ultralytics, object detection, segmentation predictor
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/segment/predict.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/predict.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/predict.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.segment.predict.SegmentationPredictor
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.segment.predict.predict
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/segment/train.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Maximize your YOLO model's performance with our SegmentationTrainer. Explore comprehensive guides and tutorials on ultralytics.com.
3 | keywords: Ultralytics, YOLO, SegmentationTrainer, image segmentation, object detection, model training, YOLO model
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/segment/train.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/train.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/train.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.segment.train.SegmentationTrainer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.segment.train.train
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/models/yolo/segment/val.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Get practical insights about our SegmentationValidator in YOLO Ultralytics models. Discover functionality details, methods, inputs, and outputs.
3 | keywords: Ultralytics, YOLO, SegmentationValidator, model segmentation, image classification, object detection
4 | ---
5 |
6 | # Reference for `ultralytics/models/yolo/segment/val.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/val.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.models.yolo.segment.val.SegmentationValidator
14 |
15 |
16 | ---
17 | ## ::: ultralytics.models.yolo.segment.val.val
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/autobackend.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Get to know more about Ultralytics nn.autobackend.check_class_names functionality. Optimize your YOLO models seamlessly.
3 | keywords: Ultralytics, AutoBackend, check_class_names, YOLO, YOLO models, optimization
4 | ---
5 |
6 | # Reference for `ultralytics/nn/autobackend.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/autobackend.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/autobackend.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.nn.autobackend.AutoBackend
14 |
15 |
16 | ---
17 | ## ::: ultralytics.nn.autobackend.check_class_names
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/modules/block.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics YOLO neural network modules, Proto to BottleneckCSP. Detailed explanation of each module with easy-to-follow code examples.
3 | keywords: YOLO, Ultralytics, neural network, nn.modules.block, Proto, HGBlock, SPPF, C2, C3, RepC3, C3Ghost, Bottleneck, BottleneckCSP
4 | ---
5 |
6 | # Reference for `ultralytics/nn/modules/block.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/block.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/block.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.nn.modules.block.DFL
14 |
15 |
16 | ---
17 | ## ::: ultralytics.nn.modules.block.Proto
18 |
19 |
20 | ---
21 | ## ::: ultralytics.nn.modules.block.HGStem
22 |
23 |
24 | ---
25 | ## ::: ultralytics.nn.modules.block.HGBlock
26 |
27 |
28 | ---
29 | ## ::: ultralytics.nn.modules.block.SPP
30 |
31 |
32 | ---
33 | ## ::: ultralytics.nn.modules.block.SPPF
34 |
35 |
36 | ---
37 | ## ::: ultralytics.nn.modules.block.C1
38 |
39 |
40 | ---
41 | ## ::: ultralytics.nn.modules.block.C2
42 |
43 |
44 | ---
45 | ## ::: ultralytics.nn.modules.block.C2f
46 |
47 |
48 | ---
49 | ## ::: ultralytics.nn.modules.block.C3
50 |
51 |
52 | ---
53 | ## ::: ultralytics.nn.modules.block.C3x
54 |
55 |
56 | ---
57 | ## ::: ultralytics.nn.modules.block.RepC3
58 |
59 |
60 | ---
61 | ## ::: ultralytics.nn.modules.block.C3TR
62 |
63 |
64 | ---
65 | ## ::: ultralytics.nn.modules.block.C3Ghost
66 |
67 |
68 | ---
69 | ## ::: ultralytics.nn.modules.block.GhostBottleneck
70 |
71 |
72 | ---
73 | ## ::: ultralytics.nn.modules.block.Bottleneck
74 |
75 |
76 | ---
77 | ## ::: ultralytics.nn.modules.block.BottleneckCSP
78 |
79 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/modules/conv.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore various Ultralytics convolution modules including Conv2, DWConv, ConvTranspose, GhostConv, Channel Attention and more.
3 | keywords: Ultralytics, Convolution Modules, Conv2, DWConv, ConvTranspose, GhostConv, ChannelAttention, CBAM, autopad
4 | ---
5 |
6 | # Reference for `ultralytics/nn/modules/conv.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/conv.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/conv.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.nn.modules.conv.Conv
14 |
15 |
16 | ---
17 | ## ::: ultralytics.nn.modules.conv.Conv2
18 |
19 |
20 | ---
21 | ## ::: ultralytics.nn.modules.conv.LightConv
22 |
23 |
24 | ---
25 | ## ::: ultralytics.nn.modules.conv.DWConv
26 |
27 |
28 | ---
29 | ## ::: ultralytics.nn.modules.conv.DWConvTranspose2d
30 |
31 |
32 | ---
33 | ## ::: ultralytics.nn.modules.conv.ConvTranspose
34 |
35 |
36 | ---
37 | ## ::: ultralytics.nn.modules.conv.Focus
38 |
39 |
40 | ---
41 | ## ::: ultralytics.nn.modules.conv.GhostConv
42 |
43 |
44 | ---
45 | ## ::: ultralytics.nn.modules.conv.RepConv
46 |
47 |
48 | ---
49 | ## ::: ultralytics.nn.modules.conv.ChannelAttention
50 |
51 |
52 | ---
53 | ## ::: ultralytics.nn.modules.conv.SpatialAttention
54 |
55 |
56 | ---
57 | ## ::: ultralytics.nn.modules.conv.CBAM
58 |
59 |
60 | ---
61 | ## ::: ultralytics.nn.modules.conv.Concat
62 |
63 |
64 | ---
65 | ## ::: ultralytics.nn.modules.conv.autopad
66 |
67 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/modules/head.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore docs covering Ultralytics YOLO detection, pose & RTDETRDecoder. Comprehensive guides to help you understand Ultralytics nn modules.
3 | keywords: Ultralytics, YOLO, Detection, Pose, RTDETRDecoder, nn modules, guides
4 | ---
5 |
6 | # Reference for `ultralytics/nn/modules/head.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/head.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/head.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.nn.modules.head.Detect
14 |
15 |
16 | ---
17 | ## ::: ultralytics.nn.modules.head.Segment
18 |
19 |
20 | ---
21 | ## ::: ultralytics.nn.modules.head.Pose
22 |
23 |
24 | ---
25 | ## ::: ultralytics.nn.modules.head.Classify
26 |
27 |
28 | ---
29 | ## ::: ultralytics.nn.modules.head.RTDETRDecoder
30 |
31 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/modules/transformer.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about Ultralytics transformer encoder, layer, MLP block, LayerNorm2d and the deformable transformer decoder layer. Expand your understanding of these crucial AI modules.
3 | keywords: Ultralytics, Ultralytics documentation, TransformerEncoderLayer, TransformerLayer, MLPBlock, LayerNorm2d, DeformableTransformerDecoderLayer
4 | ---
5 |
6 | # Reference for `ultralytics/nn/modules/transformer.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/transformer.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/transformer.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.nn.modules.transformer.TransformerEncoderLayer
14 |
15 |
16 | ---
17 | ## ::: ultralytics.nn.modules.transformer.AIFI
18 |
19 |
20 | ---
21 | ## ::: ultralytics.nn.modules.transformer.TransformerLayer
22 |
23 |
24 | ---
25 | ## ::: ultralytics.nn.modules.transformer.TransformerBlock
26 |
27 |
28 | ---
29 | ## ::: ultralytics.nn.modules.transformer.MLPBlock
30 |
31 |
32 | ---
33 | ## ::: ultralytics.nn.modules.transformer.MLP
34 |
35 |
36 | ---
37 | ## ::: ultralytics.nn.modules.transformer.LayerNorm2d
38 |
39 |
40 | ---
41 | ## ::: ultralytics.nn.modules.transformer.MSDeformAttn
42 |
43 |
44 | ---
45 | ## ::: ultralytics.nn.modules.transformer.DeformableTransformerDecoderLayer
46 |
47 |
48 | ---
49 | ## ::: ultralytics.nn.modules.transformer.DeformableTransformerDecoder
50 |
51 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/modules/utils.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics neural network utils, such as bias_init_with_prob, inverse_sigmoid and multi_scale_deformable_attn_pytorch functions.
3 | keywords: Ultralytics, neural network, nn.modules.utils, bias_init_with_prob, inverse_sigmoid, multi_scale_deformable_attn_pytorch
4 | ---
5 |
6 | # Reference for `ultralytics/nn/modules/utils.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/utils.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.nn.modules.utils._get_clones
14 |
15 |
16 | ---
17 | ## ::: ultralytics.nn.modules.utils.bias_init_with_prob
18 |
19 |
20 | ---
21 | ## ::: ultralytics.nn.modules.utils.linear_init_
22 |
23 |
24 | ---
25 | ## ::: ultralytics.nn.modules.utils.inverse_sigmoid
26 |
27 |
28 | ---
29 | ## ::: ultralytics.nn.modules.utils.multi_scale_deformable_attn_pytorch
30 |
31 |
--------------------------------------------------------------------------------
/Detector/docs/reference/nn/tasks.md:
--------------------------------------------------------------------------------
1 | # Reference for `ultralytics/nn/tasks.py`
2 |
3 | !!! note
4 |
5 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/tasks.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/tasks.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
6 |
7 | ---
8 | ## ::: ultralytics.nn.tasks.BaseModel
9 |
10 |
11 | ---
12 | ## ::: ultralytics.nn.tasks.DetectionModel
13 |
14 |
15 | ---
16 | ## ::: ultralytics.nn.tasks.SegmentationModel
17 |
18 |
19 | ---
20 | ## ::: ultralytics.nn.tasks.PoseModel
21 |
22 |
23 | ---
24 | ## ::: ultralytics.nn.tasks.ClassificationModel
25 |
26 |
27 | ---
28 | ## ::: ultralytics.nn.tasks.RTDETRDetectionModel
29 |
30 |
31 | ---
32 | ## ::: ultralytics.nn.tasks.Ensemble
33 |
34 |
35 | ---
36 | ## ::: ultralytics.nn.tasks.temporary_modules
37 |
38 |
39 | ---
40 | ## ::: ultralytics.nn.tasks.torch_safe_load
41 |
42 |
43 | ---
44 | ## ::: ultralytics.nn.tasks.attempt_load_weights
45 |
46 |
47 | ---
48 | ## ::: ultralytics.nn.tasks.attempt_load_one_weight
49 |
50 |
51 | ---
52 | ## ::: ultralytics.nn.tasks.parse_model
53 |
54 |
55 | ---
56 | ## ::: ultralytics.nn.tasks.yaml_model_load
57 |
58 |
59 | ---
60 | ## ::: ultralytics.nn.tasks.guess_model_scale
61 |
62 |
63 | ---
64 | ## ::: ultralytics.nn.tasks.guess_model_task
65 |
66 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/basetrack.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Get familiar with TrackState in Ultralytics. Learn how it is used in the BaseTrack of the Ultralytics tracker for enhanced functionality.
3 | keywords: Ultralytics, TrackState, BaseTrack, Ultralytics tracker, Ultralytics documentation
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/basetrack.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/basetrack.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/basetrack.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.basetrack.TrackState
14 |
15 |
16 | ---
17 | ## ::: ultralytics.trackers.basetrack.BaseTrack
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/bot_sort.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Master the use of Ultralytics BOTrack, a key component of the powerful Ultralytics tracking system. Learn to integrate and use BOTSORT in your projects.
3 | keywords: Ultralytics, BOTSORT, BOTrack, tracking system, official documentation, machine learning, AI tracking
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/bot_sort.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/bot_sort.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/bot_sort.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.bot_sort.BOTrack
14 |
15 |
16 | ---
17 | ## ::: ultralytics.trackers.bot_sort.BOTSORT
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/byte_tracker.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Step-in to explore in-depth the functionalities of Ultralytics BYTETracker under STrack. Gain advanced feature insights to streamline your operations.
3 | keywords: STrack, Ultralytics, BYTETracker, documentation, Ultralytics tracker, object tracking, YOLO
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/byte_tracker.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/byte_tracker.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/byte_tracker.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.byte_tracker.STrack
14 |
15 |
16 | ---
17 | ## ::: ultralytics.trackers.byte_tracker.BYTETracker
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/track.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics documentation on prediction function starters & register trackers. Understand our code & its applications better.
3 | keywords: Ultralytics, YOLO, on predict start, register tracker, prediction functions, documentation
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/track.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/track.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/track.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.track.on_predict_start
14 |
15 |
16 | ---
17 | ## ::: ultralytics.trackers.track.on_predict_postprocess_end
18 |
19 |
20 | ---
21 | ## ::: ultralytics.trackers.track.register_tracker
22 |
23 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/utils/gmc.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the Ultralytics GMC tool in our comprehensive documentation. Learn how it works, best practices, and implementation advice.
3 | keywords: Ultralytics, GMC utility, Ultralytics documentation, Ultralytics tracker, machine learning tools
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/utils/gmc.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/utils/gmc.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/utils/gmc.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.utils.gmc.GMC
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/utils/kalman_filter.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore KalmanFilterXYAH, a key component of Ultralytics trackers. Understand its utilities and learn to leverage it in your own projects.
3 | keywords: Ultralytics, KalmanFilterXYAH, tracker, documentation, guide
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/utils/kalman_filter.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/utils/kalman_filter.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/utils/kalman_filter.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.utils.kalman_filter.KalmanFilterXYAH
14 |
15 |
16 | ---
17 | ## ::: ultralytics.trackers.utils.kalman_filter.KalmanFilterXYWH
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/trackers/utils/matching.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore in-depth guidance for using Ultralytics trackers utils matching, including merge_matches, linear_assignment, iou_distance, embedding_distance, fuse_motion, and fuse_score.
3 | keywords: Ultralytics, Trackers Utils, Matching, merge_matches, linear_assignment, iou_distance, embedding_distance, fuse_motion, fuse_score, documentation
4 | ---
5 |
6 | # Reference for `ultralytics/trackers/utils/matching.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/utils/matching.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/trackers/utils/matching.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.trackers.utils.matching.merge_matches
14 |
15 |
16 | ---
17 | ## ::: ultralytics.trackers.utils.matching._indices_to_matches
18 |
19 |
20 | ---
21 | ## ::: ultralytics.trackers.utils.matching.linear_assignment
22 |
23 |
24 | ---
25 | ## ::: ultralytics.trackers.utils.matching.ious
26 |
27 |
28 | ---
29 | ## ::: ultralytics.trackers.utils.matching.iou_distance
30 |
31 |
32 | ---
33 | ## ::: ultralytics.trackers.utils.matching.v_iou_distance
34 |
35 |
36 | ---
37 | ## ::: ultralytics.trackers.utils.matching.embedding_distance
38 |
39 |
40 | ---
41 | ## ::: ultralytics.trackers.utils.matching.gate_cost_matrix
42 |
43 |
44 | ---
45 | ## ::: ultralytics.trackers.utils.matching.fuse_motion
46 |
47 |
48 | ---
49 | ## ::: ultralytics.trackers.utils.matching.fuse_iou
50 |
51 |
52 | ---
53 | ## ::: ultralytics.trackers.utils.matching.fuse_score
54 |
55 |
56 | ---
57 | ## ::: ultralytics.trackers.utils.matching.bbox_ious
58 |
59 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/autobatch.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics documentation for check_train_batch_size utility in the autobatch module. Understand how it could improve your machine learning process.
3 | keywords: Ultralytics, check_train_batch_size, autobatch, utility, machine learning, documentation
4 | ---
5 |
6 | # Reference for `ultralytics/utils/autobatch.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/autobatch.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/autobatch.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.autobatch.check_train_batch_size
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.autobatch.autobatch
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/benchmarks.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover how to profile your models using Ultralytics utilities. Enhance performance, optimize your benchmarks, and learn best practices.
3 | keywords: Ultralytics, ProfileModels, benchmarks, model profiling, performance optimization
4 | ---
5 |
6 | # Reference for `ultralytics/utils/benchmarks.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/benchmarks.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/benchmarks.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.benchmarks.ProfileModels
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.benchmarks.benchmark
18 |
19 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/clearml.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Uncover the specifics of Ultralytics ClearML callbacks, from pretrain routine start to training end. Boost your ML model performance.
3 | keywords: Ultralytics, clearML, callbacks, pretrain routine start, validation end, train epoch end, training end
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/clearml.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/clearml.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/clearml.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.clearml._log_debug_samples
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.clearml._log_plot
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.clearml.on_pretrain_routine_start
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.callbacks.clearml.on_train_epoch_end
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.callbacks.clearml.on_fit_epoch_end
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.callbacks.clearml.on_val_end
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.callbacks.clearml.on_train_end
38 |
39 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/dvc.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Browse through Ultralytics YOLO docs to learn about important logging and callback functions used in training and pretraining models.
3 | keywords: Ultralytics, YOLO, callbacks, logger, training, pretraining, machine learning, models
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/dvc.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.dvc._logger_disabled
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.dvc._log_images
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.dvc._log_plots
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.callbacks.dvc._log_confusion_matrix
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.callbacks.dvc.on_pretrain_routine_start
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.callbacks.dvc.on_pretrain_routine_end
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.callbacks.dvc.on_train_start
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.callbacks.dvc.on_train_epoch_start
42 |
43 |
44 | ---
45 | ## ::: ultralytics.utils.callbacks.dvc.on_fit_epoch_end
46 |
47 |
48 | ---
49 | ## ::: ultralytics.utils.callbacks.dvc.on_train_end
50 |
51 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/hub.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore the detailed information on key Ultralytics callbacks such as on_pretrain_routine_end, on_model_save, on_train_start, and on_predict_start.
3 | keywords: Ultralytics, callbacks, on_pretrain_routine_end, on_model_save, on_train_start, on_predict_start, hub, training
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/hub.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/hub.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/hub.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.hub.on_pretrain_routine_end
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.hub.on_fit_epoch_end
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.hub.on_model_save
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.callbacks.hub.on_train_end
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.callbacks.hub.on_train_start
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.callbacks.hub.on_val_start
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.callbacks.hub.on_predict_start
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.callbacks.hub.on_export_start
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/mlflow.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Understand routines at the end of pre-training and training in Ultralytics. Elevate your MLflow callbacks expertise.
3 | keywords: Ultralytics, MLflow, Callbacks, on_pretrain_routine_end, on_train_end, Machine Learning, Training
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/mlflow.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/mlflow.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/mlflow.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.mlflow.on_pretrain_routine_end
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.mlflow.on_fit_epoch_end
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.mlflow.on_train_end
22 |
23 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/neptune.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore exhaustive details about Ultralytics callbacks in Neptune, with specifics about scalar logging, routine start, and more.
3 | keywords: Ultralytics, Neptune callbacks, on_train_epoch_end, on_val_end, _log_plot, _log_images, on_pretrain_routine_start, on_fit_epoch_end, on_train_end
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/neptune.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/neptune.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/neptune.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.neptune._log_scalars
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.neptune._log_images
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.neptune._log_plot
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.callbacks.neptune.on_pretrain_routine_start
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.callbacks.neptune.on_train_epoch_end
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.callbacks.neptune.on_fit_epoch_end
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.callbacks.neptune.on_val_end
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.callbacks.neptune.on_train_end
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/raytune.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover the functionality of the on_fit_epoch_end callback in the Ultralytics YOLO framework. Learn how to end an epoch in your deep learning projects.
3 | keywords: Ultralytics, YOLO, on_fit_epoch_end, callbacks, documentation, deep learning, YOLO framework
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/raytune.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/raytune.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/raytune.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.raytune.on_fit_epoch_end
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/tensorboard.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics YOLO Docs for a deep understanding of log_scalars, on_batch_end & other callback utilities embedded in the tensorboard module.
3 | keywords: Ultralytics, YOLO, documentation, callback utilities, log_scalars, on_batch_end, tensorboard
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/tensorboard.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/tensorboard.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/tensorboard.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.tensorboard._log_scalars
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.tensorboard.on_pretrain_routine_start
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.tensorboard.on_batch_end
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.callbacks.tensorboard.on_fit_epoch_end
26 |
27 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/callbacks/wb.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Deep dive into Ultralytics callbacks. Learn how to use the _log_plots, on_fit_epoch_end, and on_train_end functions effectively.
3 | keywords: Ultralytics, callbacks, _log_plots, on_fit_epoch_end, on_train_end
4 | ---
5 |
6 | # Reference for `ultralytics/utils/callbacks/wb.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/wb.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/wb.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.callbacks.wb._log_plots
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.callbacks.wb.on_pretrain_routine_start
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.callbacks.wb.on_fit_epoch_end
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.callbacks.wb.on_train_epoch_end
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.callbacks.wb.on_train_end
30 |
31 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/checks.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about our routine checks that safeguard Ultralytics operations including ASCII, font, YOLO file, YAML, Python and torchvision checks.
3 | keywords: Ultralytics, utility checks, ASCII, check_version, pip_update, check_python, check_torchvision, check_yaml, YOLO filename
4 | ---
5 |
6 | # Reference for `ultralytics/utils/checks.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/checks.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/checks.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.checks.is_ascii
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.checks.check_imgsz
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.checks.check_version
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.checks.check_latest_pypi_version
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.checks.check_pip_update_available
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.checks.check_font
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.checks.check_python
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.checks.check_requirements
42 |
43 |
44 | ---
45 | ## ::: ultralytics.utils.checks.check_torchvision
46 |
47 |
48 | ---
49 | ## ::: ultralytics.utils.checks.check_suffix
50 |
51 |
52 | ---
53 | ## ::: ultralytics.utils.checks.check_yolov5u_filename
54 |
55 |
56 | ---
57 | ## ::: ultralytics.utils.checks.check_file
58 |
59 |
60 | ---
61 | ## ::: ultralytics.utils.checks.check_yaml
62 |
63 |
64 | ---
65 | ## ::: ultralytics.utils.checks.check_imshow
66 |
67 |
68 | ---
69 | ## ::: ultralytics.utils.checks.check_yolo
70 |
71 |
72 | ---
73 | ## ::: ultralytics.utils.checks.check_amp
74 |
75 |
76 | ---
77 | ## ::: ultralytics.utils.checks.git_describe
78 |
79 |
80 | ---
81 | ## ::: ultralytics.utils.checks.print_args
82 |
83 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/dist.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover the role of dist.find_free_network_port & dist.generate_ddp_command in Ultralytics DDP utilities. Use our guide for efficient deployment.
3 | keywords: Ultralytics, DDP, DDP utility functions, Distributed Data Processing, find free network port, generate DDP command
4 | ---
5 |
6 | # Reference for `ultralytics/utils/dist.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/dist.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/dist.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.dist.find_free_network_port
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.dist.generate_ddp_file
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.dist.generate_ddp_command
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.dist.ddp_cleanup
26 |
27 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/downloads.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about the download utilities in Ultralytics YOLO, featuring functions like is_url, check_disk_space, get_github_assets, and download.
3 | keywords: Ultralytics, YOLO, download utilities, is_url, check_disk_space, get_github_assets, download, documentation
4 | ---
5 |
6 | # Reference for `ultralytics/utils/downloads.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/downloads.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/downloads.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.downloads.is_url
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.downloads.zip_directory
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.downloads.unzip_file
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.downloads.check_disk_space
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.downloads.get_google_drive_file_info
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.downloads.safe_download
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.downloads.get_github_assets
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.downloads.attempt_download_asset
42 |
43 |
44 | ---
45 | ## ::: ultralytics.utils.downloads.download
46 |
47 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/errors.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about the HUBModelError in Ultralytics. Enhance your understanding, troubleshoot errors and optimize your machine learning projects.
3 | keywords: Ultralytics, HUBModelError, Machine Learning, Error troubleshooting, Ultralytics documentation
4 | ---
5 |
6 | # Reference for `ultralytics/utils/errors.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/errors.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/errors.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.errors.HUBModelError
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/files.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Discover how to use Ultralytics utility functions for file-related operations including incrementing paths, finding file age, checking file size and creating directories.
3 | keywords: Ultralytics, utility functions, file operations, working directory, file age, file size, create directories
4 | ---
5 |
6 | # Reference for `ultralytics/utils/files.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/files.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/files.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.files.WorkingDirectory
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.files.spaces_in_path
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.files.increment_path
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.files.file_age
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.files.file_date
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.files.file_size
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.files.get_latest_run
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.files.make_dirs
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/instance.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Dive into Ultralytics detailed utility guide. Learn about Bboxes, _ntuple and more from Ultralytics utils.instance module.
3 | keywords: Ultralytics, Bboxes, _ntuple, utility, ultralytics utils.instance
4 | ---
5 |
6 | # Reference for `ultralytics/utils/instance.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/instance.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/instance.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.instance.Bboxes
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.instance.Instances
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.instance._ntuple
22 |
23 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/loss.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics' versatile loss functions - VarifocalLoss, BboxLoss, v8DetectionLoss, v8PoseLoss. Improve your accuracy on YOLO implementations.
3 | keywords: Ultralytics, Loss functions, VarifocalLoss, BboxLoss, v8DetectionLoss, v8PoseLoss, YOLO, Ultralytics Documentation
4 | ---
5 |
6 | # Reference for `ultralytics/utils/loss.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/loss.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/loss.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.loss.VarifocalLoss
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.loss.FocalLoss
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.loss.BboxLoss
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.loss.KeypointLoss
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.loss.v8DetectionLoss
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.loss.v8SegmentationLoss
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.loss.v8PoseLoss
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.loss.v8ClassificationLoss
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/patches.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn about Ultralytics utils patches including imread, imshow and torch_save. Enhance your image processing skills.
3 | keywords: Ultralytics, Utils, Patches, imread, imshow, torch_save, image processing
4 | ---
5 |
6 | # Reference for `ultralytics/utils/patches.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/patches.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/patches.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.patches.imread
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.patches.imwrite
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.patches.imshow
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.patches.torch_save
26 |
27 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/plotting.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Master advanced plotting utils from Ultralytics including color annotations, label and image plotting, and feature visualization.
3 | keywords: Ultralytics, plotting, utils, color annotation, label plotting, image plotting, feature visualization
4 | ---
5 |
6 | # Reference for `ultralytics/utils/plotting.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/plotting.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/plotting.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.plotting.Colors
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.plotting.Annotator
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.plotting.plot_labels
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.plotting.save_one_box
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.plotting.plot_images
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.plotting.plot_results
34 |
35 |
36 | ---
37 | ## ::: ultralytics.utils.plotting.output_to_target
38 |
39 |
40 | ---
41 | ## ::: ultralytics.utils.plotting.feature_visualization
42 |
43 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/tal.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Explore Ultralytics utilities for optimized task assignment, bounding box creation, and distance calculation. Learn more about algorithm implementations.
3 | keywords: Ultralytics, task aligned assigner, select highest overlaps, make anchors, dist2bbox, bbox2dist, utilities, algorithm
4 | ---
5 |
6 | # Reference for `ultralytics/utils/tal.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/tal.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/tal.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.tal.TaskAlignedAssigner
14 |
15 |
16 | ---
17 | ## ::: ultralytics.utils.tal.select_candidates_in_gts
18 |
19 |
20 | ---
21 | ## ::: ultralytics.utils.tal.select_highest_overlaps
22 |
23 |
24 | ---
25 | ## ::: ultralytics.utils.tal.make_anchors
26 |
27 |
28 | ---
29 | ## ::: ultralytics.utils.tal.dist2bbox
30 |
31 |
32 | ---
33 | ## ::: ultralytics.utils.tal.bbox2dist
34 |
35 |
--------------------------------------------------------------------------------
/Detector/docs/reference/utils/tuner.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: Learn to utilize the run_ray_tune function with Ultralytics. Make your machine learning tuning process easier and more efficient.
3 | keywords: Ultralytics, run_ray_tune, machine learning tuning, machine learning efficiency
4 | ---
5 |
6 | # Reference for `ultralytics/utils/tuner.py`
7 |
8 | !!! note
9 |
10 | Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/tuner.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/tuner.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
11 |
12 | ---
13 | ## ::: ultralytics.utils.tuner.run_ray_tune
14 |
15 |
--------------------------------------------------------------------------------
/Detector/docs/robots.txt:
--------------------------------------------------------------------------------
1 | User-agent: *
2 |
--------------------------------------------------------------------------------
/Detector/docs/stylesheets/style.css:
--------------------------------------------------------------------------------
1 | /* Table format like GitHub ----------------------------------------------------------------------------------------- */
2 | th, td {
3 | border: 1px solid var(--md-typeset-table-color);
4 | border-spacing: 0;
5 | border-bottom: none;
6 | border-left: none;
7 | border-top: none;
8 | }
9 |
10 | .md-typeset__table {
11 | line-height: 1;
12 | }
13 |
14 | .md-typeset__table table:not([class]) {
15 | font-size: .74rem;
16 | border-right: none;
17 | }
18 |
19 | .md-typeset__table table:not([class]) td,
20 | .md-typeset__table table:not([class]) th {
21 | padding: 9px;
22 | }
23 |
24 | /* light mode alternating table bg colors */
25 | .md-typeset__table tr:nth-child(2n) {
26 | background-color: #f8f8f8;
27 | }
28 |
29 | /* dark mode alternating table bg colors */
30 | [data-md-color-scheme="slate"] .md-typeset__table tr:nth-child(2n) {
31 | background-color: hsla(var(--md-hue),25%,25%,1)
32 | }
33 | /* Table format like GitHub ----------------------------------------------------------------------------------------- */
34 |
35 | /* Code block vertical scroll */
36 | div.highlight {
37 | max-height: 20rem;
38 | overflow-y: auto; /* for adding a scrollbar when needed */
39 | }
40 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-CPP-Inference/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.5)
2 |
3 | project(Yolov8CPPInference VERSION 0.1)
4 |
5 | set(CMAKE_INCLUDE_CURRENT_DIR ON)
6 |
7 | # CUDA
8 | set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
9 | find_package(CUDA 11 REQUIRED)
10 |
11 | set(CMAKE_CUDA_STANDARD 11)
12 | set(CMAKE_CUDA_STANDARD_REQUIRED ON)
13 | # !CUDA
14 |
15 | # OpenCV
16 | find_package(OpenCV REQUIRED)
17 | include_directories(${OpenCV_INCLUDE_DIRS})
18 | # !OpenCV
19 |
20 | set(PROJECT_SOURCES
21 | main.cpp
22 |
23 | inference.h
24 | inference.cpp
25 | )
26 |
27 | add_executable(Yolov8CPPInference ${PROJECT_SOURCES})
28 | target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS})
29 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-CPP-Inference/README.md:
--------------------------------------------------------------------------------
1 | # YOLOv8/YOLOv5 Inference C++
2 |
3 | This example demonstrates how to perform inference using YOLOv8 and YOLOv5 models in C++ with OpenCV's DNN API.
4 |
5 | ## Usage
6 |
7 | ```bash
8 | git clone ultralytics
9 | cd ultralytics
10 | pip install .
11 | cd examples/cpp_
12 |
13 | # Add a **yolov8\_.onnx** and/or **yolov5\_.onnx** model(s) to the ultralytics folder.
14 | # Edit the **main.cpp** to change the **projectBasePath** to match your user.
15 |
16 | # Note that by default the CMake file will try and import the CUDA library to be used with the OpenCVs dnn (cuDNN) GPU Inference.
17 | # If your OpenCV build does not use CUDA/cuDNN you can remove that import call and run the example on CPU.
18 |
19 | mkdir build
20 | cd build
21 | cmake ..
22 | make
23 | ./Yolov8CPPInference
24 | ```
25 |
26 | ## Exporting YOLOv8 and YOLOv5 Models
27 |
28 | To export YOLOv8 models:
29 |
30 | ```commandline
31 | yolo export model=yolov8s.pt imgsz=480,640 format=onnx opset=12
32 | ```
33 |
34 | To export YOLOv5 models:
35 |
36 | ```commandline
37 | python3 export.py --weights yolov5s.pt --img 480 640 --include onnx --opset 12
38 | ```
39 |
40 | yolov8s.onnx:
41 |
42 | 
43 |
44 | yolov5s.onnx:
45 |
46 | 
47 |
48 | This repository utilizes OpenCV's DNN API to run ONNX exported models of YOLOv5 and YOLOv8. In theory, it should work for YOLOv6 and YOLOv7 as well, but they have not been tested. Note that the example networks are exported with rectangular (640x480) resolutions, but any exported resolution will work. You may want to use the letterbox approach for square images, depending on your use case.
49 |
50 | The **main** branch version uses Qt as a GUI wrapper. The primary focus here is the **Inference** class file, which demonstrates how to transpose YOLOv8 models to work as YOLOv5 models.
51 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-ONNXRuntime-CPP/README.md:
--------------------------------------------------------------------------------
1 | # YOLOv8 OnnxRuntime C++
2 |
3 | This example demonstrates how to perform inference using YOLOv8 in C++ with ONNX Runtime and OpenCV's API.
4 |
5 | We recommend using Visual Studio to build the project.
6 |
7 | ## Benefits
8 |
9 | - Friendly for deployment in the industrial sector.
10 | - Faster than OpenCV's DNN inference on both CPU and GPU.
11 | - Supports CUDA acceleration.
12 | - Easy to add FP16 inference (using template functions).
13 |
14 | ## Exporting YOLOv8 Models
15 |
16 | To export YOLOv8 models, use the following Python script:
17 |
18 | ```python
19 | from ultralytics import YOLO
20 |
21 | # Load a YOLOv8 model
22 | model = YOLO("yolov8n.pt")
23 |
24 | # Export the model
25 | model.export(format="onnx", opset=12, simplify=True, dynamic=False, imgsz=640)
26 | ```
27 |
28 | ## Dependencies
29 |
30 | | Dependency | Version |
31 | | ----------------------- | -------- |
32 | | Onnxruntime-win-x64-gpu | >=1.14.1 |
33 | | OpenCV | >=4.0.0 |
34 | | C++ | >=17 |
35 |
36 | Note: The dependency on C++17 is due to the usage of the C++17 filesystem feature.
37 |
38 | ## Usage
39 |
40 | ```c++
41 | // CPU inference
42 | DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {imgsz_w, imgsz_h}, class_num, 0.1, 0.5, false};
43 | // GPU inference
44 | DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {imgsz_w, imgsz_h}, class_num, 0.1, 0.5, true};
45 |
46 | // Load your image
47 | cv::Mat img = cv::imread(img_path);
48 |
49 | char* ret = p1->CreateSession(params);
50 |
51 | ret = p->RunSession(img, res);
52 | ```
53 |
54 | This repository should also work for YOLOv5, which needs a permute operator for the output of the YOLOv5 model, but this has not been implemented yet.
55 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-ONNXRuntime-CPP/inference.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #define _CRT_SECURE_NO_WARNINGS
4 | #define RET_OK nullptr
5 |
6 | #include
7 | #include
8 | #include
9 | #include "io.h"
10 | #include "direct.h"
11 | #include "opencv.hpp"
12 | #include
13 | #include "onnxruntime_cxx_api.h"
14 |
15 |
16 | enum MODEL_TYPE
17 | {
18 | //FLOAT32 MODEL
19 | YOLO_ORIGIN_V5 = 0,
20 | YOLO_ORIGIN_V8 = 1,//only support v8 detector currently
21 | YOLO_POSE_V8 = 2,
22 | YOLO_CLS_V8 = 3
23 | };
24 |
25 |
26 | typedef struct _DCSP_INIT_PARAM
27 | {
28 | std::string ModelPath;
29 | MODEL_TYPE ModelType = YOLO_ORIGIN_V8;
30 | std::vector imgSize={640, 640};
31 |
32 | int classesNum=80;
33 | float RectConfidenceThreshold = 0.6;
34 | float iouThreshold = 0.5;
35 | bool CudaEnable = false;
36 | int LogSeverityLevel = 3;
37 | int IntraOpNumThreads = 1;
38 | }DCSP_INIT_PARAM;
39 |
40 |
41 | typedef struct _DCSP_RESULT
42 | {
43 | int classId;
44 | float confidence;
45 | cv::Rect box;
46 | }DCSP_RESULT;
47 |
48 |
49 | class DCSP_CORE
50 | {
51 | public:
52 | DCSP_CORE();
53 | ~DCSP_CORE();
54 |
55 | public:
56 | char* CreateSession(DCSP_INIT_PARAM &iParams);
57 |
58 |
59 | char* RunSession(cv::Mat &iImg, std::vector& oResult);
60 |
61 |
62 | char* WarmUpSession();
63 |
64 |
65 | template
66 | char* TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::vector& inputNodeDims, std::vector& oResult);
67 |
68 |
69 | private:
70 | Ort::Env env;
71 | Ort::Session* session;
72 | bool cudaEnable;
73 | Ort::RunOptions options;
74 | std::vector inputNodeNames;
75 | std::vector outputNodeNames;
76 |
77 |
78 | int classesNum;
79 | MODEL_TYPE modelType;
80 | std::vector imgSize;
81 | float rectConfidenceThreshold;
82 | float iouThreshold;
83 | };
84 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-ONNXRuntime-CPP/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "inference.h"
4 | #include
5 |
6 |
7 |
8 | void file_iterator(DCSP_CORE*& p)
9 | {
10 | std::filesystem::path img_path = R"(E:\project\Project_C++\DCPS_ONNX\TEST_ORIGIN)";
11 | int k = 0;
12 | for (auto& i : std::filesystem::directory_iterator(img_path))
13 | {
14 | if (i.path().extension() == ".jpg")
15 | {
16 | std::string img_path = i.path().string();
17 | //std::cout << img_path << std::endl;
18 | cv::Mat img = cv::imread(img_path);
19 | std::vector res;
20 | char* ret = p->RunSession(img, res);
21 | for (int i = 0; i < res.size(); i++)
22 | {
23 | cv::rectangle(img, res.at(i).box, cv::Scalar(125, 123, 0), 3);
24 | }
25 |
26 | k++;
27 | cv::imshow("TEST_ORIGIN", img);
28 | cv::waitKey(0);
29 | cv::destroyAllWindows();
30 | //cv::imwrite("E:\\output\\" + std::to_string(k) + ".png", img);
31 | }
32 | }
33 | }
34 |
35 |
36 |
37 | int main()
38 | {
39 | DCSP_CORE* p1 = new DCSP_CORE;
40 | std::string model_path = "yolov8n.onnx";
41 | DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {640, 640}, 80, 0.1, 0.5, false };
42 | char* ret = p1->CreateSession(params);
43 | file_iterator(p1);
44 | }
45 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-ONNXRuntime/README.md:
--------------------------------------------------------------------------------
1 | # YOLOv8 - ONNX Runtime
2 |
3 | This project implements YOLOv8 using ONNX Runtime.
4 |
5 | ## Installation
6 |
7 | To run this project, you need to install the required dependencies. The following instructions will guide you through the installation process.
8 |
9 | ### Installing Required Dependencies
10 |
11 | You can install the required dependencies by running the following command:
12 |
13 | ```bash
14 | pip install -r requirements.txt
15 | ```
16 |
17 | ### Installing `onnxruntime-gpu`
18 |
19 | If you have an NVIDIA GPU and want to leverage GPU acceleration, you can install the onnxruntime-gpu package using the following command:
20 |
21 | ```bash
22 | pip install onnxruntime-gpu
23 | ```
24 |
25 | Note: Make sure you have the appropriate GPU drivers installed on your system.
26 |
27 | ### Installing `onnxruntime` (CPU version)
28 |
29 | If you don't have an NVIDIA GPU or prefer to use the CPU version of onnxruntime, you can install the onnxruntime package using the following command:
30 |
31 | ```bash
32 | pip install onnxruntime
33 | ```
34 |
35 | ### Usage
36 |
37 | After successfully installing the required packages, you can run the YOLOv8 implementation using the following command:
38 |
39 | ```bash
40 | python main.py --model yolov8n.onnx --img image.jpg --conf-thres 0.5 --iou-thres 0.5
41 | ```
42 |
43 | Make sure to replace yolov8n.onnx with the path to your YOLOv8 ONNX model file, image.jpg with the path to your input image, and adjust the confidence threshold (conf-thres) and IoU threshold (iou-thres) values as needed.
44 |
--------------------------------------------------------------------------------
/Detector/examples/YOLOv8-OpenCV-ONNX-Python/README.md:
--------------------------------------------------------------------------------
1 | # YOLOv8 - OpenCV
2 |
3 | Implementation YOLOv8 on OpenCV using ONNX Format.
4 |
5 | Just simply clone and run
6 |
7 | ```bash
8 | pip install -r requirements.txt
9 | python main.py --model yolov8n.onnx --img image.jpg
10 | ```
11 |
12 | If you start from scratch:
13 |
14 | ```bash
15 | pip install ultralytics
16 | yolo export model=yolov8n.pt imgsz=640 format=onnx opset=12
17 | ```
18 |
19 | _\*Make sure to include "opset=12"_
20 |
--------------------------------------------------------------------------------
/Detector/requirements.txt:
--------------------------------------------------------------------------------
1 | # Ultralytics requirements
2 | # Usage: pip install -r requirements.txt
3 |
4 | # Base ----------------------------------------
5 | matplotlib>=3.2.2
6 | numpy>=1.22.2 # pinned by Snyk to avoid a vulnerability
7 | opencv-python>=4.6.0
8 | pillow>=7.1.2
9 | pyyaml>=5.3.1
10 | requests>=2.23.0
11 | scipy>=1.4.1
12 | torch>=1.7.0
13 | torchvision>=0.8.1
14 | tqdm>=4.64.0
15 |
16 | # Logging -------------------------------------
17 | # tensorboard>=2.13.0
18 | # dvclive>=2.12.0
19 | # clearml
20 | # comet
21 |
22 | # Plotting ------------------------------------
23 | pandas>=1.1.4
24 | seaborn>=0.11.0
25 |
26 | # Export --------------------------------------
27 | # coremltools>=6.0,<=6.2 # CoreML export
28 | # onnx>=1.12.0 # ONNX export
29 | # onnxsim>=0.4.1 # ONNX simplifier
30 | # nvidia-pyindex # TensorRT export
31 | # nvidia-tensorrt # TensorRT export
32 | # scikit-learn==0.19.2 # CoreML quantization
33 | # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
34 | # tflite-support
35 | # tensorflowjs>=3.9.0 # TF.js export
36 | # openvino-dev>=2023.0 # OpenVINO export
37 |
38 | # Extras --------------------------------------
39 | psutil # system utilization
40 | py-cpuinfo # display CPU info
41 | # thop>=0.1.1 # FLOPs computation
42 | # ipython # interactive notebook
43 | # albumentations>=1.0.3 # training augmentations
44 | # pycocotools>=2.0.6 # COCO mAP
45 | # roboflow
46 |
--------------------------------------------------------------------------------
/Detector/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 | # Local usage: pip install pre-commit, pre-commit run --all-files
4 |
5 | [metadata]
6 | license_files = LICENSE
7 | description_file = README.md
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 | [flake8]
20 | max-line-length = 120
21 | exclude = .tox,*.egg,build,temp
22 | select = E,W,F
23 | doctests = True
24 | verbose = 2
25 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
26 | format = pylint
27 | # see: https://www.flake8rules.com/
28 | ignore = E731,F405,E402,W504,E501
29 | # E731: Do not assign a lambda expression, use a def
30 | # F405: name may be undefined, or defined from star imports: module
31 | # E402: module level import not at top of file
32 | # W504: line break after binary operator
33 | # E501: line too long
34 | # removed:
35 | # F401: module imported but unused
36 | # E231: missing whitespace after ‘,’, ‘;’, or ‘:’
37 | # E127: continuation line over-indented for visual indent
38 | # F403: ‘from module import *’ used; unable to detect undefined names
39 |
40 |
41 | [isort]
42 | # https://pycqa.github.io/isort/docs/configuration/options.html
43 | line_length = 120
44 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
45 | multi_line_output = 0
46 |
47 | [yapf]
48 | based_on_style = pep8
49 | spaces_before_comment = 2
50 | COLUMN_LIMIT = 120
51 | COALESCE_BRACKETS = True
52 | SPACES_AROUND_POWER_OPERATOR = True
53 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
54 | SPLIT_BEFORE_CLOSING_BRACKET = False
55 | SPLIT_BEFORE_FIRST_ARGUMENT = False
56 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
57 |
--------------------------------------------------------------------------------
/Detector/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | def pytest_addoption(parser):
5 | parser.addoption('--runslow', action='store_true', default=False, help='run slow tests')
6 |
7 |
8 | def pytest_configure(config):
9 | config.addinivalue_line('markers', 'slow: mark test as slow to run')
10 |
11 |
12 | def pytest_collection_modifyitems(config, items):
13 | if config.getoption('--runslow'):
14 | # --runslow given in cli: do not skip slow tests
15 | return
16 | skip_slow = pytest.mark.skip(reason='need --runslow option to run')
17 | for item in items:
18 | if 'slow' in item.keywords:
19 | item.add_marker(skip_slow)
20 |
--------------------------------------------------------------------------------
/Detector/ultralytics/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | __version__ = '8.0.149'
4 |
5 | from ultralytics.hub import start
6 | from ultralytics.models import RTDETR, SAM, YOLO
7 | from ultralytics.models.fastsam import FastSAM
8 | from ultralytics.models.nas import NAS
9 | from ultralytics.utils import SETTINGS as settings
10 | from ultralytics.utils.checks import check_yolo as checks
11 | from ultralytics.utils.downloads import download
12 |
13 | __all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'FastSAM', 'RTDETR', 'checks', 'download', 'start', 'settings' # allow simpler import
14 |
--------------------------------------------------------------------------------
/Detector/ultralytics/assets/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/ultralytics/assets/bus.jpg
--------------------------------------------------------------------------------
/Detector/ultralytics/assets/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/ultralytics/assets/zidane.jpg
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/datasets/coco-pose.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: yolo train data=coco-pose.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco-pose ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco-pose # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Keypoints
17 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18 | flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
19 |
20 | # Classes
21 | names:
22 | 0: person
23 |
24 | # Download script/URL (optional)
25 | download: |
26 | from ultralytics.utils.downloads import download
27 | from pathlib import Path
28 |
29 | # Download labels
30 | dir = Path(yaml['path']) # dataset root dir
31 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
32 | urls = [url + 'coco2017labels-pose.zip'] # labels
33 | download(urls, dir=dir.parent)
34 | # Download data
35 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
36 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
37 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
38 | download(urls, dir=dir / 'images', threads=3)
39 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/datasets/coco8-pose.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco8-pose.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco8-pose ← downloads here (1 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco8-pose # dataset root dir
12 | train: images/train # train images (relative to 'path') 4 images
13 | val: images/val # val images (relative to 'path') 4 images
14 | test: # test images (optional)
15 |
16 | # Keypoints
17 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18 | flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
19 |
20 | # Classes
21 | names:
22 | 0: person
23 |
24 | # Download script/URL (optional)
25 | download: https://ultralytics.com/assets/coco8-pose.zip
26 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v3/yolov3-spp.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv3-SPP object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | depth_multiple: 1.0 # model depth multiple
7 | width_multiple: 1.0 # layer channel multiple
8 |
9 | # darknet53 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [32, 3, 1]], # 0
13 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
14 | [-1, 1, Bottleneck, [64]],
15 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
16 | [-1, 2, Bottleneck, [128]],
17 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
18 | [-1, 8, Bottleneck, [256]],
19 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
20 | [-1, 8, Bottleneck, [512]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
22 | [-1, 4, Bottleneck, [1024]], # 10
23 | ]
24 |
25 | # YOLOv3-SPP head
26 | head:
27 | [[-1, 1, Bottleneck, [1024, False]],
28 | [-1, 1, SPP, [512, [5, 9, 13]]],
29 | [-1, 1, Conv, [1024, 3, 1]],
30 | [-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
32 |
33 | [-2, 1, Conv, [256, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
36 | [-1, 1, Bottleneck, [512, False]],
37 | [-1, 1, Bottleneck, [512, False]],
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
40 |
41 | [-2, 1, Conv, [128, 1, 1]],
42 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
43 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
44 | [-1, 1, Bottleneck, [256, False]],
45 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
46 |
47 | [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v3/yolov3-tiny.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv3-tiny object detection model with P4-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | depth_multiple: 1.0 # model depth multiple
7 | width_multiple: 1.0 # layer channel multiple
8 |
9 | # YOLOv3-tiny backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [16, 3, 1]], # 0
13 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
14 | [-1, 1, Conv, [32, 3, 1]],
15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
16 | [-1, 1, Conv, [64, 3, 1]],
17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
18 | [-1, 1, Conv, [128, 3, 1]],
19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
20 | [-1, 1, Conv, [256, 3, 1]],
21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
22 | [-1, 1, Conv, [512, 3, 1]],
23 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
24 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
25 | ]
26 |
27 | # YOLOv3-tiny head
28 | head:
29 | [[-1, 1, Conv, [1024, 3, 1]],
30 | [-1, 1, Conv, [256, 1, 1]],
31 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
32 |
33 | [-2, 1, Conv, [128, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
36 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
37 |
38 | [[19, 15], 1, Detect, [nc]], # Detect(P4, P5)
39 | ]
40 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v3/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv3 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | depth_multiple: 1.0 # model depth multiple
7 | width_multiple: 1.0 # layer channel multiple
8 |
9 | # darknet53 backbone
10 | backbone:
11 | # [from, number, module, args]
12 | [[-1, 1, Conv, [32, 3, 1]], # 0
13 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
14 | [-1, 1, Bottleneck, [64]],
15 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
16 | [-1, 2, Bottleneck, [128]],
17 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
18 | [-1, 8, Bottleneck, [256]],
19 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
20 | [-1, 8, Bottleneck, [512]],
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
22 | [-1, 4, Bottleneck, [1024]], # 10
23 | ]
24 |
25 | # YOLOv3 head
26 | head:
27 | [[-1, 1, Bottleneck, [1024, False]],
28 | [-1, 1, Conv, [512, 1, 1]],
29 | [-1, 1, Conv, [1024, 3, 1]],
30 | [-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
32 |
33 | [-2, 1, Conv, [256, 1, 1]],
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
36 | [-1, 1, Bottleneck, [512, False]],
37 | [-1, 1, Bottleneck, [512, False]],
38 | [-1, 1, Conv, [256, 1, 1]],
39 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
40 |
41 | [-2, 1, Conv, [128, 1, 1]],
42 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
43 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
44 | [-1, 1, Bottleneck, [256, False]],
45 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
46 |
47 | [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5)
48 | ]
49 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v5/yolov5.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv5 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov5
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call yolov5.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 1024]
11 | l: [1.00, 1.00, 1024]
12 | x: [1.33, 1.25, 1024]
13 |
14 | # YOLOv5 v6.0 backbone
15 | backbone:
16 | # [from, number, module, args]
17 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
19 | [-1, 3, C3, [128]],
20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
21 | [-1, 6, C3, [256]],
22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
23 | [-1, 9, C3, [512]],
24 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
25 | [-1, 3, C3, [1024]],
26 | [-1, 1, SPPF, [1024, 5]], # 9
27 | ]
28 |
29 | # YOLOv5 v6.0 head
30 | head:
31 | [[-1, 1, Conv, [512, 1, 1]],
32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
34 | [-1, 3, C3, [512, False]], # 13
35 |
36 | [-1, 1, Conv, [256, 1, 1]],
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
39 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
40 |
41 | [-1, 1, Conv, [256, 3, 2]],
42 | [[-1, 14], 1, Concat, [1]], # cat head P4
43 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
44 |
45 | [-1, 1, Conv, [512, 3, 2]],
46 | [[-1, 10], 1, Concat, [1]], # cat head P5
47 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
48 |
49 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
50 | ]
51 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v6/yolov6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv6 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/models/yolov6
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | activation: nn.ReLU() # (optional) model default activation function
7 | scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov8.yaml with scale 'n'
8 | # [depth, width, max_channels]
9 | n: [0.33, 0.25, 1024]
10 | s: [0.33, 0.50, 1024]
11 | m: [0.67, 0.75, 768]
12 | l: [1.00, 1.00, 512]
13 | x: [1.00, 1.25, 512]
14 |
15 | # YOLOv6-3.0s backbone
16 | backbone:
17 | # [from, repeats, module, args]
18 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20 | - [-1, 6, Conv, [128, 3, 1]]
21 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22 | - [-1, 12, Conv, [256, 3, 1]]
23 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24 | - [-1, 18, Conv, [512, 3, 1]]
25 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
26 | - [-1, 6, Conv, [1024, 3, 1]]
27 | - [-1, 1, SPPF, [1024, 5]] # 9
28 |
29 | # YOLOv6-3.0s head
30 | head:
31 | - [-1, 1, Conv, [256, 1, 1]]
32 | - [-1, 1, nn.ConvTranspose2d, [256, 2, 2, 0]]
33 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
34 | - [-1, 1, Conv, [256, 3, 1]]
35 | - [-1, 9, Conv, [256, 3, 1]] # 14
36 |
37 | - [-1, 1, Conv, [128, 1, 1]]
38 | - [-1, 1, nn.ConvTranspose2d, [128, 2, 2, 0]]
39 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
40 | - [-1, 1, Conv, [128, 3, 1]]
41 | - [-1, 9, Conv, [128, 3, 1]] # 19
42 |
43 | - [-1, 1, Conv, [128, 3, 2]]
44 | - [[-1, 15], 1, Concat, [1]] # cat head P4
45 | - [-1, 1, Conv, [256, 3, 1]]
46 | - [-1, 9, Conv, [256, 3, 1]] # 23
47 |
48 | - [-1, 1, Conv, [256, 3, 2]]
49 | - [[-1, 10], 1, Concat, [1]] # cat head P5
50 | - [-1, 1, Conv, [512, 3, 1]]
51 | - [-1, 9, Conv, [512, 3, 1]] # 27
52 |
53 | - [[19, 23, 27], 1, Detect, [nc]] # Detect(P3, P4, P5)
54 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify
3 |
4 | # Parameters
5 | nc: 1000 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 1024]
11 | l: [1.00, 1.00, 1024]
12 | x: [1.00, 1.25, 1024]
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 |
27 | # YOLOv8.0n head
28 | head:
29 | - [-1, 1, Classify, [nc]] # Classify
30 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8-p2.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 768]
11 | l: [1.00, 1.00, 512]
12 | x: [1.00, 1.25, 512]
13 |
14 | # YOLOv8.0 backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0-p2 head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
39 | - [[-1, 2], 1, Concat, [1]] # cat backbone P2
40 | - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall)
41 |
42 | - [-1, 1, Conv, [128, 3, 2]]
43 | - [[-1, 15], 1, Concat, [1]] # cat head P3
44 | - [-1, 3, C2f, [256]] # 21 (P3/8-small)
45 |
46 | - [-1, 1, Conv, [256, 3, 2]]
47 | - [[-1, 12], 1, Concat, [1]] # cat head P4
48 | - [-1, 3, C2f, [512]] # 24 (P4/16-medium)
49 |
50 | - [-1, 1, Conv, [512, 3, 2]]
51 | - [[-1, 9], 1, Concat, [1]] # cat head P5
52 | - [-1, 3, C2f, [1024]] # 27 (P5/32-large)
53 |
54 | - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5)
55 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8-p6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 768]
11 | l: [1.00, 1.00, 512]
12 | x: [1.00, 1.25, 512]
13 |
14 | # YOLOv8.0x6 backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [768, True]]
26 | - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64
27 | - [-1, 3, C2f, [1024, True]]
28 | - [-1, 1, SPPF, [1024, 5]] # 11
29 |
30 | # YOLOv8.0x6 head
31 | head:
32 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
33 | - [[-1, 8], 1, Concat, [1]] # cat backbone P5
34 | - [-1, 3, C2, [768, False]] # 14
35 |
36 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
37 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
38 | - [-1, 3, C2, [512, False]] # 17
39 |
40 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
41 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
42 | - [-1, 3, C2, [256, False]] # 20 (P3/8-small)
43 |
44 | - [-1, 1, Conv, [256, 3, 2]]
45 | - [[-1, 17], 1, Concat, [1]] # cat head P4
46 | - [-1, 3, C2, [512, False]] # 23 (P4/16-medium)
47 |
48 | - [-1, 1, Conv, [512, 3, 2]]
49 | - [[-1, 14], 1, Concat, [1]] # cat head P5
50 | - [-1, 3, C2, [768, False]] # 26 (P5/32-large)
51 |
52 | - [-1, 1, Conv, [768, 3, 2]]
53 | - [[-1, 11], 1, Concat, [1]] # cat head P6
54 | - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge)
55 |
56 | - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6)
57 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8-pose.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose
3 |
4 | # Parameters
5 | nc: 1 # number of classes
6 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
7 | scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will call yolov8-pose.yaml with scale 'n'
8 | # [depth, width, max_channels]
9 | n: [0.33, 0.25, 1024]
10 | s: [0.33, 0.50, 1024]
11 | m: [0.67, 0.75, 768]
12 | l: [1.00, 1.00, 512]
13 | x: [1.00, 1.25, 512]
14 |
15 | # YOLOv8.0n backbone
16 | backbone:
17 | # [from, repeats, module, args]
18 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20 | - [-1, 3, C2f, [128, True]]
21 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22 | - [-1, 6, C2f, [256, True]]
23 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24 | - [-1, 6, C2f, [512, True]]
25 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
26 | - [-1, 3, C2f, [1024, True]]
27 | - [-1, 1, SPPF, [1024, 5]] # 9
28 |
29 | # YOLOv8.0n head
30 | head:
31 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
32 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
33 | - [-1, 3, C2f, [512]] # 12
34 |
35 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
36 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
37 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
38 |
39 | - [-1, 1, Conv, [256, 3, 2]]
40 | - [[-1, 12], 1, Concat, [1]] # cat head P4
41 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
42 |
43 | - [-1, 1, Conv, [512, 3, 2]]
44 | - [[-1, 9], 1, Concat, [1]] # cat head P5
45 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
46 |
47 | - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5)
48 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
9 | s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
10 | m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
11 | l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
12 | x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0n head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 12], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 9], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
45 |
46 | - [[15, 18, 21], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5)
47 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 768]
11 | l: [1.00, 1.00, 512]
12 | x: [1.00, 1.25, 512]
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0n head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 12], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 9], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
45 |
46 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5)
47 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/models/v8/yolov8.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
9 | s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
10 | m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
11 | l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
12 | x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0n head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 12], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 9], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
45 |
46 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
47 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/trackers/botsort.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT
3 |
4 | tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
5 | track_high_thresh: 0.5 # threshold for the first association
6 | track_low_thresh: 0.1 # threshold for the second association
7 | new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
8 | track_buffer: 30 # buffer to calculate the time when to remove tracks
9 | match_thresh: 0.8 # threshold for matching tracks
10 | # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
11 | # mot20: False # for tracker evaluation(not used for now)
12 |
13 | # BoT-SORT settings
14 | cmc_method: sparseOptFlow # method of global motion compensation
15 | # ReID model related thresh (not supported yet)
16 | proximity_thresh: 0.5
17 | appearance_thresh: 0.25
18 | with_reid: False
19 |
--------------------------------------------------------------------------------
/Detector/ultralytics/cfg/trackers/bytetrack.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
3 |
4 | tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
5 | track_high_thresh: 0.5 # threshold for the first association
6 | track_low_thresh: 0.1 # threshold for the second association
7 | new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
8 | track_buffer: 30 # buffer to calculate the time when to remove tracks
9 | match_thresh: 0.8 # threshold for matching tracks
10 | # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
11 | # mot20: False # for tracker evaluation(not used for now)
12 |
--------------------------------------------------------------------------------
/Detector/ultralytics/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .base import BaseDataset
4 | from .build import build_dataloader, build_yolo_dataset, load_inference_source
5 | from .dataset import ClassificationDataset, SemanticDataset, YOLODataset
6 |
7 | __all__ = ('BaseDataset', 'ClassificationDataset', 'SemanticDataset', 'YOLODataset', 'build_yolo_dataset',
8 | 'build_dataloader', 'load_inference_source')
9 |
--------------------------------------------------------------------------------
/Detector/ultralytics/data/annotator.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from ultralytics import SAM, YOLO
4 |
5 |
6 | def auto_annotate(data, det_model='yolov8x.pt', sam_model='sam_b.pt', device='', output_dir=None):
7 | """
8 | Automatically annotates images using a YOLO object detection model and a SAM segmentation model.
9 | Args:
10 | data (str): Path to a folder containing images to be annotated.
11 | det_model (str, optional): Pre-trained YOLO detection model. Defaults to 'yolov8x.pt'.
12 | sam_model (str, optional): Pre-trained SAM segmentation model. Defaults to 'sam_b.pt'.
13 | device (str, optional): Device to run the models on. Defaults to an empty string (CPU or GPU, if available).
14 | output_dir (str | None | optional): Directory to save the annotated results.
15 | Defaults to a 'labels' folder in the same directory as 'data'.
16 | """
17 | det_model = YOLO(det_model)
18 | sam_model = SAM(sam_model)
19 |
20 | if not output_dir:
21 | output_dir = Path(str(data)).parent / 'labels'
22 | Path(output_dir).mkdir(exist_ok=True, parents=True)
23 |
24 | det_results = det_model(data, stream=True, device=device)
25 |
26 | for result in det_results:
27 | class_ids = result.boxes.cls.int().tolist() # noqa
28 | if len(class_ids):
29 | boxes = result.boxes.xyxy # Boxes object for bbox outputs
30 | sam_results = sam_model(result.orig_img, bboxes=boxes, verbose=False, save=False, device=device)
31 | segments = sam_results[0].masks.xyn # noqa
32 |
33 | with open(f'{str(Path(output_dir) / Path(result.path).stem)}.txt', 'w') as f:
34 | for i in range(len(segments)):
35 | s = segments[i]
36 | if len(s) == 0:
37 | continue
38 | segment = map(str, segments[i].reshape(-1).tolist())
39 | f.write(f'{class_ids[i]} ' + ' '.join(segment) + '\n')
40 |
--------------------------------------------------------------------------------
/Detector/ultralytics/data/dataloaders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/ultralytics/data/dataloaders/__init__.py
--------------------------------------------------------------------------------
/Detector/ultralytics/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Ultralytics YOLO 🚀, AGPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/assets/releases
4 | # Example usage: bash ultralytics/data/scripts/download_weights.sh
5 | # parent
6 | # └── weights
7 | # ├── yolov8n.pt ← downloads here
8 | # ├── yolov8s.pt
9 | # └── ...
10 |
11 | python - < None:
26 | assert Path(model).suffix not in ('.yaml', '.yml'), 'YOLO-NAS models only support pre-trained models.'
27 | super().__init__(model, task='detect')
28 |
29 | @smart_inference_mode()
30 | def _load(self, weights: str, task: str):
31 | # Load or create new NAS model
32 | import super_gradients
33 | suffix = Path(weights).suffix
34 | if suffix == '.pt':
35 | self.model = torch.load(weights)
36 | elif suffix == '':
37 | self.model = super_gradients.training.models.get(weights, pretrained_weights='coco')
38 | # Standardize model
39 | self.model.fuse = lambda verbose=True: self.model
40 | self.model.stride = torch.tensor([32])
41 | self.model.names = dict(enumerate(self.model._class_names))
42 | self.model.is_fused = lambda: False # for info()
43 | self.model.yaml = {} # for info()
44 | self.model.pt_path = weights # for export()
45 | self.model.task = 'detect' # for export()
46 |
47 | def info(self, detailed=False, verbose=True):
48 | """
49 | Logs model info.
50 |
51 | Args:
52 | detailed (bool): Show detailed information about model.
53 | verbose (bool): Controls verbosity.
54 | """
55 | return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640)
56 |
57 | @property
58 | def task_map(self):
59 | return {'detect': {'predictor': NASPredictor, 'validator': NASValidator}}
60 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/nas/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.engine.predictor import BasePredictor
6 | from ultralytics.engine.results import Results
7 | from ultralytics.utils import ops
8 | from ultralytics.utils.ops import xyxy2xywh
9 |
10 |
11 | class NASPredictor(BasePredictor):
12 |
13 | def postprocess(self, preds_in, img, orig_imgs):
14 | """Postprocesses predictions and returns a list of Results objects."""
15 |
16 | # Cat boxes and class scores
17 | boxes = xyxy2xywh(preds_in[0][0])
18 | preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
19 |
20 | preds = ops.non_max_suppression(preds,
21 | self.args.conf,
22 | self.args.iou,
23 | agnostic=self.args.agnostic_nms,
24 | max_det=self.args.max_det,
25 | classes=self.args.classes)
26 |
27 | results = []
28 | for i, pred in enumerate(preds):
29 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
30 | if not isinstance(orig_imgs, torch.Tensor):
31 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
32 | path = self.batch[0]
33 | img_path = path[i] if isinstance(path, list) else path
34 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred))
35 | return results
36 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/nas/val.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.models.yolo.detect import DetectionValidator
6 | from ultralytics.utils import ops
7 | from ultralytics.utils.ops import xyxy2xywh
8 |
9 | __all__ = ['NASValidator']
10 |
11 |
12 | class NASValidator(DetectionValidator):
13 |
14 | def postprocess(self, preds_in):
15 | """Apply Non-maximum suppression to prediction outputs."""
16 | boxes = xyxy2xywh(preds_in[0][0])
17 | preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
18 | return ops.non_max_suppression(preds,
19 | self.args.conf,
20 | self.args.iou,
21 | labels=self.lb,
22 | multi_label=False,
23 | agnostic=self.args.single_cls,
24 | max_det=self.args.max_det,
25 | max_time_img=0.5)
26 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/rtdetr/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .model import RTDETR
4 | from .predict import RTDETRPredictor
5 | from .val import RTDETRValidator
6 |
7 | __all__ = 'RTDETRPredictor', 'RTDETRValidator', 'RTDETR'
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/rtdetr/model.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | """
3 | RT-DETR model interface
4 | """
5 | from ultralytics.engine.model import Model
6 | from ultralytics.nn.tasks import RTDETRDetectionModel
7 |
8 | from .predict import RTDETRPredictor
9 | from .train import RTDETRTrainer
10 | from .val import RTDETRValidator
11 |
12 |
13 | class RTDETR(Model):
14 | """
15 | RTDETR model interface.
16 | """
17 |
18 | def __init__(self, model='rtdetr-l.pt') -> None:
19 | if model and not model.split('.')[-1] in ('pt', 'yaml', 'yml'):
20 | raise NotImplementedError('RT-DETR only supports creating from *.pt file or *.yaml file.')
21 | super().__init__(model=model, task='detect')
22 |
23 | @property
24 | def task_map(self):
25 | return {
26 | 'detect': {
27 | 'predictor': RTDETRPredictor,
28 | 'validator': RTDETRValidator,
29 | 'trainer': RTDETRTrainer,
30 | 'model': RTDETRDetectionModel}}
31 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/rtdetr/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.data.augment import LetterBox
6 | from ultralytics.engine.predictor import BasePredictor
7 | from ultralytics.engine.results import Results
8 | from ultralytics.utils import ops
9 |
10 |
11 | class RTDETRPredictor(BasePredictor):
12 |
13 | def postprocess(self, preds, img, orig_imgs):
14 | """Postprocess predictions and returns a list of Results objects."""
15 | nd = preds[0].shape[-1]
16 | bboxes, scores = preds[0].split((4, nd - 4), dim=-1)
17 | results = []
18 | for i, bbox in enumerate(bboxes): # (300, 4)
19 | bbox = ops.xywh2xyxy(bbox)
20 | score, cls = scores[i].max(-1, keepdim=True) # (300, 1)
21 | idx = score.squeeze(-1) > self.args.conf # (300, )
22 | if self.args.classes is not None:
23 | idx = (cls == torch.tensor(self.args.classes, device=cls.device)).any(1) & idx
24 | pred = torch.cat([bbox, score, cls], dim=-1)[idx] # filter
25 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
26 | oh, ow = orig_img.shape[:2]
27 | if not isinstance(orig_imgs, torch.Tensor):
28 | pred[..., [0, 2]] *= ow
29 | pred[..., [1, 3]] *= oh
30 | path = self.batch[0]
31 | img_path = path[i] if isinstance(path, list) else path
32 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred))
33 | return results
34 |
35 | def pre_transform(self, im):
36 | """Pre-transform input image before inference.
37 |
38 | Args:
39 | im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list.
40 |
41 | Return: A list of transformed imgs.
42 | """
43 | # The size must be square(640) and scaleFilled.
44 | return [LetterBox(self.imgsz, auto=False, scaleFill=True)(image=x) for x in im]
45 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/sam/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .model import SAM
4 | from .predict import Predictor
5 |
6 | # from .build import build_sam
7 |
8 | __all__ = 'SAM', 'Predictor' # tuple or list
9 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/sam/model.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | """
3 | SAM model interface
4 | """
5 |
6 | from ultralytics.engine.model import Model
7 | from ultralytics.utils.torch_utils import model_info
8 |
9 | from .build import build_sam
10 | from .predict import Predictor
11 |
12 |
13 | class SAM(Model):
14 | """
15 | SAM model interface.
16 | """
17 |
18 | def __init__(self, model='sam_b.pt') -> None:
19 | if model and not model.endswith('.pt') and not model.endswith('.pth'):
20 | # Should raise AssertionError instead?
21 | raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint')
22 | super().__init__(model=model, task='segment')
23 |
24 | def _load(self, weights: str, task=None):
25 | self.model = build_sam(weights)
26 |
27 | def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs):
28 | """Predicts and returns segmentation masks for given image or video source."""
29 | overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024)
30 | kwargs.update(overrides)
31 | prompts = dict(bboxes=bboxes, points=points, labels=labels)
32 | return super().predict(source, stream, prompts=prompts, **kwargs)
33 |
34 | def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs):
35 | """Calls the 'predict' function with given arguments to perform object detection."""
36 | return self.predict(source, stream, bboxes, points, labels, **kwargs)
37 |
38 | def info(self, detailed=False, verbose=True):
39 | """
40 | Logs model info.
41 |
42 | Args:
43 | detailed (bool): Show detailed information about model.
44 | verbose (bool): Controls verbosity.
45 | """
46 | return model_info(self.model, detailed=detailed, verbose=verbose)
47 |
48 | @property
49 | def task_map(self):
50 | return {'segment': {'predictor': Predictor}}
51 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/sam/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from ultralytics.models.yolo import classify, detect, pose, segment
4 |
5 | from .model import YOLO
6 |
7 | __all__ = 'classify', 'segment', 'detect', 'pose', 'YOLO'
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/classify/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from ultralytics.models.yolo.classify.predict import ClassificationPredictor, predict
4 | from ultralytics.models.yolo.classify.train import ClassificationTrainer, train
5 | from ultralytics.models.yolo.classify.val import ClassificationValidator, val
6 |
7 | __all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val'
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/classify/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.engine.predictor import BasePredictor
6 | from ultralytics.engine.results import Results
7 | from ultralytics.utils import DEFAULT_CFG, ROOT
8 |
9 |
10 | class ClassificationPredictor(BasePredictor):
11 |
12 | def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
13 | super().__init__(cfg, overrides, _callbacks)
14 | self.args.task = 'classify'
15 |
16 | def preprocess(self, img):
17 | """Converts input image to model-compatible data type."""
18 | if not isinstance(img, torch.Tensor):
19 | img = torch.stack([self.transforms(im) for im in img], dim=0)
20 | img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device)
21 | return img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
22 |
23 | def postprocess(self, preds, img, orig_imgs):
24 | """Postprocesses predictions to return Results objects."""
25 | results = []
26 | for i, pred in enumerate(preds):
27 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
28 | path = self.batch[0]
29 | img_path = path[i] if isinstance(path, list) else path
30 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, probs=pred))
31 |
32 | return results
33 |
34 |
35 | def predict(cfg=DEFAULT_CFG, use_python=False):
36 | """Run YOLO model predictions on input images/videos."""
37 | model = cfg.model or 'yolov8n-cls.pt' # or "resnet18"
38 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
39 | else 'https://ultralytics.com/images/bus.jpg'
40 |
41 | args = dict(model=model, source=source)
42 | if use_python:
43 | from ultralytics import YOLO
44 | YOLO(model)(**args)
45 | else:
46 | predictor = ClassificationPredictor(overrides=args)
47 | predictor.predict_cli()
48 |
49 |
50 | if __name__ == '__main__':
51 | predict()
52 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/detect/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .predict import DetectionPredictor, predict
4 | from .train import DetectionTrainer, train
5 | from .val import DetectionValidator, val
6 |
7 | __all__ = 'DetectionPredictor', 'predict', 'DetectionTrainer', 'train', 'DetectionValidator', 'val'
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/detect/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.engine.predictor import BasePredictor
6 | from ultralytics.engine.results import Results
7 | from ultralytics.utils import DEFAULT_CFG, ROOT, ops
8 |
9 |
10 | class DetectionPredictor(BasePredictor):
11 |
12 | def postprocess(self, preds, img, orig_imgs):
13 | """Postprocesses predictions and returns a list of Results objects."""
14 | preds = ops.non_max_suppression(preds,
15 | self.args.conf,
16 | self.args.iou,
17 | agnostic=self.args.agnostic_nms,
18 | max_det=self.args.max_det,
19 | classes=self.args.classes)
20 |
21 | results = []
22 | for i, pred in enumerate(preds):
23 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
24 | if not isinstance(orig_imgs, torch.Tensor):
25 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
26 | path = self.batch[0]
27 | img_path = path[i] if isinstance(path, list) else path
28 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred))
29 | return results
30 |
31 |
32 | def predict(cfg=DEFAULT_CFG, use_python=False):
33 | """Runs YOLO model inference on input image(s)."""
34 | model = cfg.model or 'yolov8n.pt'
35 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
36 | else 'https://ultralytics.com/images/bus.jpg'
37 |
38 | args = dict(model=model, source=source)
39 | if use_python:
40 | from ultralytics import YOLO
41 | YOLO(model)(**args)
42 | else:
43 | predictor = DetectionPredictor(overrides=args)
44 | predictor.predict_cli()
45 |
46 |
47 | if __name__ == '__main__':
48 | predict()
49 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/model.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from ultralytics.engine.model import Model
4 | from ultralytics.models import yolo # noqa
5 | from ultralytics.nn.tasks import ClassificationModel, DetectionModel, PoseModel, SegmentationModel
6 |
7 |
8 | class YOLO(Model):
9 | """
10 | YOLO (You Only Look Once) object detection model.
11 | """
12 |
13 | @property
14 | def task_map(self):
15 | """Map head to model, trainer, validator, and predictor classes"""
16 | return {
17 | 'classify': {
18 | 'model': ClassificationModel,
19 | 'trainer': yolo.classify.ClassificationTrainer,
20 | 'validator': yolo.classify.ClassificationValidator,
21 | 'predictor': yolo.classify.ClassificationPredictor, },
22 | 'detect': {
23 | 'model': DetectionModel,
24 | 'trainer': yolo.detect.DetectionTrainer,
25 | 'validator': yolo.detect.DetectionValidator,
26 | 'predictor': yolo.detect.DetectionPredictor, },
27 | 'segment': {
28 | 'model': SegmentationModel,
29 | 'trainer': yolo.segment.SegmentationTrainer,
30 | 'validator': yolo.segment.SegmentationValidator,
31 | 'predictor': yolo.segment.SegmentationPredictor, },
32 | 'pose': {
33 | 'model': PoseModel,
34 | 'trainer': yolo.pose.PoseTrainer,
35 | 'validator': yolo.pose.PoseValidator,
36 | 'predictor': yolo.pose.PosePredictor, }, }
37 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/pose/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .predict import PosePredictor, predict
4 | from .train import PoseTrainer, train
5 | from .val import PoseValidator, val
6 |
7 | __all__ = 'PoseTrainer', 'train', 'PoseValidator', 'val', 'PosePredictor', 'predict'
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/models/yolo/segment/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .predict import SegmentationPredictor, predict
4 | from .train import SegmentationTrainer, train
5 | from .val import SegmentationValidator, val
6 |
7 | __all__ = 'SegmentationPredictor', 'predict', 'SegmentationTrainer', 'train', 'SegmentationValidator', 'val'
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/nn/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .tasks import (BaseModel, ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight,
4 | attempt_load_weights, guess_model_scale, guess_model_task, parse_model, torch_safe_load,
5 | yaml_model_load)
6 |
7 | __all__ = ('attempt_load_one_weight', 'attempt_load_weights', 'parse_model', 'yaml_model_load', 'guess_model_task',
8 | 'guess_model_scale', 'torch_safe_load', 'DetectionModel', 'SegmentationModel', 'ClassificationModel',
9 | 'BaseModel')
10 |
--------------------------------------------------------------------------------
/Detector/ultralytics/nn/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | """
3 | Ultralytics modules. Visualize with:
4 |
5 | from ultralytics.nn.modules import *
6 | import torch
7 | import os
8 |
9 | x = torch.ones(1, 128, 40, 40)
10 | m = Conv(128, 128)
11 | f = f'{m._get_name()}.onnx'
12 | torch.onnx.export(m, x, f)
13 | os.system(f'onnxsim {f} {f} && open {f}')
14 | """
15 |
16 | from .block import (C1, C2, C3, C3TR, DFL, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, GhostBottleneck,
17 | HGBlock, HGStem, Proto, RepC3)
18 | from .conv import (CBAM, ChannelAttention, Concat, Conv, Conv2, ConvTranspose, DWConv, DWConvTranspose2d, Focus,
19 | GhostConv, LightConv, RepConv, SpatialAttention)
20 | from .head import Classify, Detect, Pose, RTDETRDecoder, Segment
21 | from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d,
22 | MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer)
23 |
24 | __all__ = ('Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus',
25 | 'GhostConv', 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer',
26 | 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3',
27 | 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect',
28 | 'Segment', 'Pose', 'Classify', 'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI',
29 | 'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP')
30 |
--------------------------------------------------------------------------------
/Detector/ultralytics/trackers/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .bot_sort import BOTSORT
4 | from .byte_tracker import BYTETracker
5 | from .track import register_tracker
6 |
7 | __all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import
8 |
--------------------------------------------------------------------------------
/Detector/ultralytics/trackers/basetrack.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from collections import OrderedDict
4 |
5 | import numpy as np
6 |
7 |
8 | class TrackState:
9 | """Enumeration of possible object tracking states."""
10 |
11 | New = 0
12 | Tracked = 1
13 | Lost = 2
14 | Removed = 3
15 |
16 |
17 | class BaseTrack:
18 | """Base class for object tracking, handling basic track attributes and operations."""
19 |
20 | _count = 0
21 |
22 | track_id = 0
23 | is_activated = False
24 | state = TrackState.New
25 |
26 | history = OrderedDict()
27 | features = []
28 | curr_feature = None
29 | score = 0
30 | start_frame = 0
31 | frame_id = 0
32 | time_since_update = 0
33 |
34 | # Multi-camera
35 | location = (np.inf, np.inf)
36 |
37 | @property
38 | def end_frame(self):
39 | """Return the last frame ID of the track."""
40 | return self.frame_id
41 |
42 | @staticmethod
43 | def next_id():
44 | """Increment and return the global track ID counter."""
45 | BaseTrack._count += 1
46 | return BaseTrack._count
47 |
48 | def activate(self, *args):
49 | """Activate the track with the provided arguments."""
50 | raise NotImplementedError
51 |
52 | def predict(self):
53 | """Predict the next state of the track."""
54 | raise NotImplementedError
55 |
56 | def update(self, *args, **kwargs):
57 | """Update the track with new observations."""
58 | raise NotImplementedError
59 |
60 | def mark_lost(self):
61 | """Mark the track as lost."""
62 | self.state = TrackState.Lost
63 |
64 | def mark_removed(self):
65 | """Mark the track as removed."""
66 | self.state = TrackState.Removed
67 |
68 | @staticmethod
69 | def reset_id():
70 | """Reset the global track ID counter."""
71 | BaseTrack._count = 0
72 |
--------------------------------------------------------------------------------
/Detector/ultralytics/trackers/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/ultralytics/trackers/utils/__init__.py
--------------------------------------------------------------------------------
/Detector/ultralytics/utils/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from .base import add_integration_callbacks, default_callbacks, get_default_callbacks
4 |
5 | __all__ = 'add_integration_callbacks', 'default_callbacks', 'get_default_callbacks'
6 |
--------------------------------------------------------------------------------
/Detector/ultralytics/utils/callbacks/raytune.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from ultralytics.utils import SETTINGS
4 |
5 | try:
6 | import ray
7 | from ray import tune
8 | from ray.air import session
9 |
10 | assert SETTINGS['raytune'] is True # verify integration is enabled
11 | except (ImportError, AssertionError):
12 | tune = None
13 |
14 |
15 | def on_fit_epoch_end(trainer):
16 | """Sends training metrics to Ray Tune at end of each epoch."""
17 | if ray.tune.is_session_enabled():
18 | metrics = trainer.metrics
19 | metrics['epoch'] = trainer.epoch
20 | session.report(metrics)
21 |
22 |
23 | callbacks = {
24 | 'on_fit_epoch_end': on_fit_epoch_end, } if tune else {}
25 |
--------------------------------------------------------------------------------
/Detector/ultralytics/utils/callbacks/tensorboard.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr
4 |
5 | try:
6 | from torch.utils.tensorboard import SummaryWriter
7 |
8 | assert not TESTS_RUNNING # do not log pytest
9 | assert SETTINGS['tensorboard'] is True # verify integration is enabled
10 |
11 | # TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows
12 | except (ImportError, AssertionError, TypeError):
13 | SummaryWriter = None
14 |
15 | writer = None # TensorBoard SummaryWriter instance
16 |
17 |
18 | def _log_scalars(scalars, step=0):
19 | """Logs scalar values to TensorBoard."""
20 | if writer:
21 | for k, v in scalars.items():
22 | writer.add_scalar(k, v, step)
23 |
24 |
25 | def on_pretrain_routine_start(trainer):
26 | """Initialize TensorBoard logging with SummaryWriter."""
27 | if SummaryWriter:
28 | try:
29 | global writer
30 | writer = SummaryWriter(str(trainer.save_dir))
31 | prefix = colorstr('TensorBoard: ')
32 | LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
33 | except Exception as e:
34 | LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')
35 |
36 |
37 | def on_batch_end(trainer):
38 | """Logs scalar statistics at the end of a training batch."""
39 | _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1)
40 |
41 |
42 | def on_fit_epoch_end(trainer):
43 | """Logs epoch metrics at end of training epoch."""
44 | _log_scalars(trainer.metrics, trainer.epoch + 1)
45 |
46 |
47 | callbacks = {
48 | 'on_pretrain_routine_start': on_pretrain_routine_start,
49 | 'on_fit_epoch_end': on_fit_epoch_end,
50 | 'on_batch_end': on_batch_end}
51 |
--------------------------------------------------------------------------------
/Detector/ultralytics/utils/errors.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from ultralytics.utils import emojis
4 |
5 |
6 | class HUBModelError(Exception):
7 |
8 | def __init__(self, message='Model not found. Please check model URL and try again.'):
9 | """Create an exception for when a model is not found."""
10 | super().__init__(emojis(message))
11 |
--------------------------------------------------------------------------------
/Detector/ultralytics/utils/patches.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | """
3 | Monkey patches to update/extend functionality of existing functions
4 | """
5 |
6 | from pathlib import Path
7 |
8 | import cv2
9 | import numpy as np
10 | import torch
11 |
12 | # OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------
13 | _imshow = cv2.imshow # copy to avoid recursion errors
14 |
15 |
16 | def imread(filename, flags=cv2.IMREAD_COLOR):
17 | return cv2.imdecode(np.fromfile(filename, np.uint8), flags)
18 |
19 |
20 | def imwrite(filename, img):
21 | try:
22 | cv2.imencode(Path(filename).suffix, img)[1].tofile(filename)
23 | return True
24 | except Exception:
25 | return False
26 |
27 |
28 | def imshow(path, im):
29 | _imshow(path.encode('unicode_escape').decode(), im)
30 |
31 |
32 | # PyTorch functions ----------------------------------------------------------------------------------------------------
33 | _torch_save = torch.save # copy to avoid recursion errors
34 |
35 |
36 | def torch_save(*args, **kwargs):
37 | """Use dill (if exists) to serialize the lambda functions where pickle does not do this."""
38 | try:
39 | import dill as pickle
40 | except ImportError:
41 | import pickle
42 |
43 | if 'pickle_module' not in kwargs:
44 | kwargs['pickle_module'] = pickle
45 | return _torch_save(*args, **kwargs)
46 |
--------------------------------------------------------------------------------
/Detector/ultralytics/yolo/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 |
3 | from . import v8
4 |
5 | __all__ = 'v8', # tuple or list
6 |
--------------------------------------------------------------------------------
/Detector/ultralytics/yolo/cfg/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 |
4 | from ultralytics.utils import LOGGER
5 |
6 | # Set modules in sys.modules under their old name
7 | sys.modules['ultralytics.yolo.cfg'] = importlib.import_module('ultralytics.cfg')
8 |
9 | LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.cfg' is deprecated since '8.0.136' and will be removed in '8.1.0'. "
10 | "Please use 'ultralytics.cfg' instead.")
11 |
--------------------------------------------------------------------------------
/Detector/ultralytics/yolo/data/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 |
4 | from ultralytics.utils import LOGGER
5 |
6 | # Set modules in sys.modules under their old name
7 | sys.modules['ultralytics.yolo.data'] = importlib.import_module('ultralytics.data')
8 | # This is for updating old cls models, or the way in following warning won't work.
9 | sys.modules['ultralytics.yolo.data.augment'] = importlib.import_module('ultralytics.data.augment')
10 |
11 | DATA_WARNING = """WARNING ⚠️ 'ultralytics.yolo.data' is deprecated since '8.0.136' and will be removed in '8.1.0'. Please use 'ultralytics.data' instead.
12 | Note this warning may be related to loading older models. You can update your model to current structure with:
13 | import torch
14 | ckpt = torch.load("model.pt") # applies to both official and custom models
15 | torch.save(ckpt, "updated-model.pt")
16 | """
17 | LOGGER.warning(DATA_WARNING)
18 |
--------------------------------------------------------------------------------
/Detector/ultralytics/yolo/engine/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 |
4 | from ultralytics.utils import LOGGER
5 |
6 | # Set modules in sys.modules under their old name
7 | sys.modules['ultralytics.yolo.engine'] = importlib.import_module('ultralytics.engine')
8 |
9 | LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.engine' is deprecated since '8.0.136' and will be removed in '8.1.0'. "
10 | "Please use 'ultralytics.engine' instead.")
11 |
--------------------------------------------------------------------------------
/Detector/ultralytics/yolo/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 |
4 | from ultralytics.utils import LOGGER
5 |
6 | # Set modules in sys.modules under their old name
7 | sys.modules['ultralytics.yolo.utils'] = importlib.import_module('ultralytics.utils')
8 |
9 | UTILS_WARNING = """WARNING ⚠️ 'ultralytics.yolo.utils' is deprecated since '8.0.136' and will be removed in '8.1.0'. Please use 'ultralytics.utils' instead.
10 | Note this warning may be related to loading older models. You can update your model to current structure with:
11 | import torch
12 | ckpt = torch.load("model.pt") # applies to both official and custom models
13 | torch.save(ckpt, "updated-model.pt")
14 | """
15 | LOGGER.warning(UTILS_WARNING)
16 |
--------------------------------------------------------------------------------
/Detector/ultralytics/yolo/v8/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 |
4 | from ultralytics.utils import LOGGER
5 |
6 | # Set modules in sys.modules under their old name
7 | sys.modules['ultralytics.yolo.v8'] = importlib.import_module('ultralytics.models.yolo')
8 |
9 | LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.v8' is deprecated since '8.0.136' and will be removed in '8.1.0'. "
10 | "Please use 'ultralytics.models.yolo' instead.")
11 |
--------------------------------------------------------------------------------
/Detector/yolov8n.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/Detector/yolov8n.pt
--------------------------------------------------------------------------------
/cldm/model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 |
4 | from omegaconf import OmegaConf
5 | from ldm.util import instantiate_from_config
6 |
7 |
8 | def get_state_dict(d):
9 | return d.get('state_dict', d)
10 |
11 |
12 | def load_state_dict(ckpt_path, location='cpu'):
13 | _, extension = os.path.splitext(ckpt_path)
14 | if extension.lower() == ".safetensors":
15 | import safetensors.torch
16 | state_dict = safetensors.torch.load_file(ckpt_path, device=location)
17 | else:
18 | state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))
19 | state_dict = get_state_dict(state_dict)
20 | print(f'Loaded state_dict from [{ckpt_path}]')
21 | return state_dict
22 |
23 |
24 | def create_model(config_path):
25 | config = OmegaConf.load(config_path)
26 | model = instantiate_from_config(config.model).cpu()
27 | print(f'Loaded model config from [{config_path}]')
28 | return model
29 |
--------------------------------------------------------------------------------
/dataset_test_load.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import numpy as np
4 |
5 | from torch.utils.data import Dataset
6 | import os
7 |
8 |
9 | class MyTestDataset(Dataset):
10 | def __init__(self):
11 | self.data = []
12 | with open('./sample_test.json', 'rt') as f:
13 | for line in f:
14 | self.data.append(json.loads(line))
15 |
16 | def __len__(self):
17 | return len(self.data)
18 |
19 | def __getitem__(self, idx):
20 | item = self.data[idx]
21 |
22 | source_filename = item['source']
23 | target_filename = item['target']
24 | prompt = item['prompt']
25 |
26 | source = cv2.imread('./polyp_data/' + source_filename) # replace directory "polyp_data" with your test data source directory
27 | target = cv2.imread('./polyp_data/' + target_filename)
28 |
29 | #source = cv2.resize(source, (64, 64)) # !!!!!!!!!
30 | #target = cv2.resize(target, (64, 64)) # !!!!!!!!!
31 |
32 |
33 | # Do not forget that OpenCV read images in BGR order.
34 | source = cv2.cvtColor(source, cv2.COLOR_BGR2RGB)
35 | target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
36 |
37 | # Normalize source images to [0, 1].
38 | source = source.astype(np.float16) / 255.0
39 |
40 | # Normalize target images to [-1, 1].
41 | target = (target.astype(np.float16) / 127.5) - 1.0
42 |
43 |
44 | return dict(jpg=target, txt=prompt, hint=source, target_name = target_filename)
45 |
--------------------------------------------------------------------------------
/dataset_train_load.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import numpy as np
4 |
5 | from torch.utils.data import Dataset
6 |
7 |
8 | class MyDataset(Dataset):
9 | def __init__(self):
10 | self.data = []
11 | with open('./sample_train.json', 'rt') as f:
12 | for line in f:
13 | self.data.append(json.loads(line))
14 |
15 | def __len__(self):
16 | return len(self.data)
17 |
18 | def __getitem__(self, idx):
19 | item = self.data[idx]
20 |
21 | source_filename = item['source']
22 | target_filename = item['target']
23 | prompt = item['prompt']
24 |
25 | source = cv2.imread('./polyp_data/' + source_filename) #replace directory "polyp_data" with your source data directory
26 | target = cv2.imread('./polyp_data/' + target_filename) #replace directory "polyp_data" with your target data directory
27 |
28 | #source = cv2.resize(source, (64, 64)) # !!!!!!!!!
29 | #target = cv2.resize(target, (64, 64)) # !!!!!!!!!
30 |
31 |
32 | # Do not forget that OpenCV read images in BGR order.
33 | source = cv2.cvtColor(source, cv2.COLOR_BGR2RGB)
34 | target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
35 |
36 | # Normalize source images to [0, 1].
37 | source = source.astype(np.float16) / 255.0
38 |
39 | # Normalize target images to [-1, 1].
40 | target = (target.astype(np.float16) / 127.5) - 1.0
41 |
42 |
43 | return dict(jpg=target, txt=prompt, hint=source)
44 |
--------------------------------------------------------------------------------
/figures/.gitkeep:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/figures/intro.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/figures/intro.png
--------------------------------------------------------------------------------
/ldm/data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/ldm/data/__init__.py
--------------------------------------------------------------------------------
/ldm/data/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from ldm.modules.midas.api import load_midas_transform
4 |
5 |
6 | class AddMiDaS(object):
7 | def __init__(self, model_type):
8 | super().__init__()
9 | self.transform = load_midas_transform(model_type)
10 |
11 | def pt2np(self, x):
12 | x = ((x + 1.0) * .5).detach().cpu().numpy()
13 | return x
14 |
15 | def np2pt(self, x):
16 | x = torch.from_numpy(x) * 2 - 1.
17 | return x
18 |
19 | def __call__(self, sample):
20 | # sample['jpg'] is tensor hwc in [-1, 1] at this point
21 | x = self.pt2np(sample['jpg'])
22 | x = self.transform({"image": x})["image"]
23 | sample['midas_in'] = x
24 | return sample
25 |
--------------------------------------------------------------------------------
/ldm/models/diffusion/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/ldm/models/diffusion/__init__.py
--------------------------------------------------------------------------------
/ldm/models/diffusion/dpm_solver/__init__.py:
--------------------------------------------------------------------------------
1 | from .sampler import DPMSolverSampler
--------------------------------------------------------------------------------
/ldm/models/diffusion/sampling_util.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | def append_dims(x, target_dims):
6 | """Appends dimensions to the end of a tensor until it has target_dims dimensions.
7 | From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
8 | dims_to_append = target_dims - x.ndim
9 | if dims_to_append < 0:
10 | raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
11 | return x[(...,) + (None,) * dims_to_append]
12 |
13 |
14 | def norm_thresholding(x0, value):
15 | s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
16 | return x0 * (value / s)
17 |
18 |
19 | def spatial_norm_thresholding(x0, value):
20 | # b c h w
21 | s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
22 | return x0 * (value / s)
--------------------------------------------------------------------------------
/ldm/modules/diffusionmodules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/ldm/modules/diffusionmodules/__init__.py
--------------------------------------------------------------------------------
/ldm/modules/distributions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/ldm/modules/distributions/__init__.py
--------------------------------------------------------------------------------
/ldm/modules/encoders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Vanshali/ControlPolypNet/a7d1cedd781075c485c0852d6783bd8d6698a16b/ldm/modules/encoders/__init__.py
--------------------------------------------------------------------------------
/ldm/modules/image_degradation/__init__.py:
--------------------------------------------------------------------------------
1 | from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
2 | from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch==1.12.1
2 | torchvision==0.13.1
3 | numpy==1.23.1
4 | albumentations==1.3.0
5 | opencv-contrib-python==4.4.0.46
6 | imageio==2.9.0
7 | imageio-ffmpeg==0.4.2
8 | pytorch-lightning==1.5.0
9 | omegaconf==2.1.1
10 | test-tube>=0.7.5
11 | streamlit==1.12.1
12 | einops==0.3.0
13 | transformers==4.19.2
14 | webdataset==0.2.5
15 | kornia==0.6
16 | open_clip_torch==2.0.2
17 | invisible-watermark>=0.1.5
18 | streamlit-drawable-canvas==0.8.0
19 | torchmetrics==0.6.0
20 | timm==0.6.12
21 | addict==2.4.0
22 | yapf==0.32.0
23 | prettytable==3.6.0
24 | safetensors==0.2.7
25 | basicsr==1.4.2
26 | ultralytics
27 |
--------------------------------------------------------------------------------
/sample_test.json:
--------------------------------------------------------------------------------
1 | {"source": "./negative_data/negative_masked_images_all/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image011171.jpg", "target": "./negative_data/negative_orig/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image011171.jpg", "prompt": "polyp"}
2 | {"source": "./negative_data/negative_masked_images_all/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image008809.jpg", "target": "./negative_data/negative_orig/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image008809.jpg", "prompt": "polyp"}
3 | {"source": "./negative_data/negative_masked_images_all/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image008088.jpg", "target": "./negative_data/negative_orig/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image008088.jpg", "prompt": "polyp"}
4 | {"source": "./negative_data/negative_masked_images_all/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image001353.jpg", "target": "./negative_data/negative_orig/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image001353.jpg", "prompt": "polyp"}
5 | {"source": "./negative_data/negative_masked_images_all/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image007298.jpg", "target": "./negative_data/negative_orig/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image007298.jpg", "prompt": "polyp"}
6 | {"source": "./negative_data/negative_masked_images_all/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image008893.jpg", "target": "./negative_data/negative_orig/case1/case_M_20181109094641_0U62372110931241_1_003_001-1_Negative_ayy_image008893.jpg", "prompt": "polyp"}
7 |
--------------------------------------------------------------------------------
/tool_add_control.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | assert len(sys.argv) == 3, 'Args are wrong.'
5 |
6 | input_path = sys.argv[1]
7 | output_path = sys.argv[2]
8 |
9 | assert os.path.exists(input_path), 'Input model does not exist.'
10 | assert not os.path.exists(output_path), 'Output filename already exists.'
11 | assert os.path.exists(os.path.dirname(output_path)), 'Output path is not valid.'
12 |
13 | import torch
14 | from share import *
15 | from cldm.model import create_model
16 |
17 |
18 | def get_node_name(name, parent_name):
19 | if len(name) <= len(parent_name):
20 | return False, ''
21 | p = name[:len(parent_name)]
22 | if p != parent_name:
23 | return False, ''
24 | return True, name[len(parent_name):]
25 |
26 |
27 | model = create_model(config_path='./models/cldm_v15.yaml') # Replace with path to cldm_v15.yaml file inside models folder
28 |
29 | pretrained_weights = torch.load(input_path)
30 | if 'state_dict' in pretrained_weights:
31 | pretrained_weights = pretrained_weights['state_dict']
32 |
33 | scratch_dict = model.state_dict()
34 |
35 | target_dict = {}
36 | for k in scratch_dict.keys():
37 | is_control, name = get_node_name(k, 'control_')
38 | if is_control:
39 | copy_k = 'model.diffusion_' + name
40 | else:
41 | copy_k = k
42 | if copy_k in pretrained_weights:
43 | target_dict[k] = pretrained_weights[copy_k].clone()
44 | else:
45 | target_dict[k] = scratch_dict[k].clone()
46 | print(f'These weights are newly added: {k}')
47 |
48 | model.load_state_dict(target_dict, strict=True)
49 | torch.save(model.state_dict(), output_path)
50 | print('Done.')
51 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | # Part of the code used from ControlNet GitHub repository https://github.com/lllyasviel/ControlNet and https://github.com/ultralytics/ultralytics/tree/main
2 |
3 | # from share import *
4 |
5 | import pytorch_lightning as pl
6 | from torch.utils.data import DataLoader
7 | from dataset_train_load import MyDataset
8 | from cldm.logger import ImageLogger
9 | from cldm.model import create_model, load_state_dict
10 | from pytorch_lightning.callbacks import ModelCheckpoint
11 |
12 | from dataset_test_load import MyTestDataset
13 |
14 |
15 | # Configs
16 | resume_path = './models/control_sd15_ini.ckpt'
17 | #resume_path = './model-epoch=55.ckpt'
18 |
19 | batch_size = 32
20 | logger_freq = 300
21 | #learning_rate = 1e-5
22 | #sd_locked = True
23 | only_mid_control = False
24 |
25 | learning_rate = 2e-6
26 | sd_locked = False
27 |
28 |
29 | # First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
30 | model = create_model('./models/cldm_v15.yaml').cpu()
31 | model.load_state_dict(load_state_dict(resume_path, location='cpu'))
32 | model.learning_rate = learning_rate
33 | model.sd_locked = sd_locked
34 | model.only_mid_control = only_mid_control
35 |
36 |
37 | # Misc
38 | dataset = MyDataset()
39 | dataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True)
40 | checkpoint_callback = ModelCheckpoint(dirpath="./", filename='model-{epoch:02d}', save_top_k=-1)
41 | logger = ImageLogger(batch_frequency=logger_freq)
42 | trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger,checkpoint_callback], default_root_dir='./chkp_folder/', enable_checkpointing = True, max_epochs=55)
43 | #trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger], accumulate_grad_batches=4) # But this will be 4x slower
44 |
45 | # Train!
46 | trainer.fit(model, dataloader)
47 |
48 | for i in range(1,201): # Evaluate the model on first 200 images from test json file
49 | test_dataset = MyTestDataset(i)
50 | # Create a test dataloader
51 | test_loader = DataLoader(test_dataset, batch_size=50, shuffle=False, num_workers=0)
52 | trainer.validate(model = model, dataloaders = test_loader)
53 |
54 |
--------------------------------------------------------------------------------