├── .gitignore
├── README.md
├── agent.py
├── config.py
├── dataloader.py
├── dataset.py
├── docs
└── teaser.png
├── isp
├── __init__.py
├── denoise.py
├── filters.py
├── sharpen.py
└── unprocess_np.py
├── replay_memory.py
├── requirements.txt
├── train.py
├── util.py
├── value.py
└── yolov3
├── .dockerignore
├── .pre-commit-config.yaml
├── Arial.ttf
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── README.zh-CN.md
├── benchmarks.py
├── classify
├── predict.py
├── train.py
├── tutorial.ipynb
└── val.py
├── data
├── Argoverse.yaml
├── GlobalWheat2020.yaml
├── ImageNet.yaml
├── SKU-110K.yaml
├── VisDrone.yaml
├── coco-2017-5000.yaml
├── coco-2017-small.yaml
├── coco-2017.yaml
├── coco-seg.yaml
├── coco.yaml
├── coco128-seg.yaml
├── coco128.yaml
├── hyps
│ ├── hyp.Objects365.yaml
│ ├── hyp.VOC.yaml
│ ├── hyp.no-augmentation.yaml
│ ├── hyp.scratch-high.yaml
│ ├── hyp.scratch-low.yaml
│ └── hyp.scratch-med.yaml
├── images
│ ├── bus.jpg
│ └── zidane.jpg
├── lod.yaml
├── lod_pynet.yaml
├── lod_rgb_dark.yaml
├── objects365.yaml
├── oprd.yaml
├── rod.yaml
├── rod_day.yaml
├── rod_night.yaml
├── rod_npy.yaml
├── rod_png.yaml
├── scripts
│ ├── download_weights.sh
│ ├── get_coco.sh
│ ├── get_coco128.sh
│ └── get_imagenet.sh
├── voc.yaml
├── xView.yaml
└── yolov3-lod.yaml
├── detect.py
├── export.py
├── hubconf.py
├── models
├── __init__.py
├── common.py
├── experimental.py
├── hub
│ ├── anchors.yaml
│ ├── yolov5-bifpn.yaml
│ ├── yolov5-fpn.yaml
│ ├── yolov5-p2.yaml
│ ├── yolov5-p34.yaml
│ ├── yolov5-p6.yaml
│ ├── yolov5-p7.yaml
│ ├── yolov5-panet.yaml
│ ├── yolov5l6.yaml
│ ├── yolov5m6.yaml
│ ├── yolov5n6.yaml
│ ├── yolov5s-LeakyReLU.yaml
│ ├── yolov5s-ghost.yaml
│ ├── yolov5s-transformer.yaml
│ ├── yolov5s6.yaml
│ └── yolov5x6.yaml
├── segment
│ ├── yolov5l-seg.yaml
│ ├── yolov5m-seg.yaml
│ ├── yolov5n-seg.yaml
│ ├── yolov5s-seg.yaml
│ └── yolov5x-seg.yaml
├── tf.py
├── yolo.py
├── yolov3-spp.yaml
├── yolov3-tiny.yaml
├── yolov3.yaml
├── yolov5l.yaml
├── yolov5m.yaml
├── yolov5n.yaml
├── yolov5s.yaml
└── yolov5x.yaml
├── requirements.txt
├── run.sh
├── segment
├── predict.py
├── train.py
├── tutorial.ipynb
└── val.py
├── setup.cfg
├── train.py
├── tutorial.ipynb
├── utils
├── __init__.py
├── activations.py
├── augmentations.py
├── autoanchor.py
├── autobatch.py
├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
├── callbacks.py
├── dataloaders.py
├── docker
│ ├── Dockerfile
│ ├── Dockerfile-arm64
│ └── Dockerfile-cpu
├── downloads.py
├── flask_rest_api
│ ├── README.md
│ ├── example_request.py
│ └── restapi.py
├── general.py
├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
├── loggers
│ ├── __init__.py
│ ├── clearml
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── clearml_utils.py
│ │ └── hpo.py
│ ├── comet
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── comet_utils.py
│ │ ├── hpo.py
│ │ └── optimizer_config.json
│ └── wandb
│ │ ├── __init__.py
│ │ └── wandb_utils.py
├── loss.py
├── metrics.py
├── plots.py
├── segment
│ ├── __init__.py
│ ├── augmentations.py
│ ├── dataloaders.py
│ ├── general.py
│ ├── loss.py
│ ├── metrics.py
│ └── plots.py
├── torch_utils.py
└── triton.py
├── val.py
└── val_adaptiveisp.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | **/__pycache__/
3 | .idea/
4 | .vscode
5 |
6 | # Distribution / packaging
7 | .Python
8 | env/
9 | build/
10 | develop-eggs/
11 | dist/
12 | downloads/
13 | eggs/
14 | .eggs/
15 | lib/
16 | lib64/
17 | parts/
18 | sdist/
19 | var/
20 | wheels/
21 | *.egg-info/
22 | /wandb/
23 | .installed.cfg
24 | *.egg
25 |
26 | .DS_Store
27 |
28 | # Jupyter Notebook
29 | .ipynb_checkpoints
30 |
31 | # pyenv
32 | .python-version
33 |
34 |
35 | experiments
36 | outputs
37 | output
38 | output_results
39 | tmp
40 | *.out
41 | results
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | from util import Dict
2 | from isp.filters import *
3 |
4 |
5 | cfg = Dict()
6 | cfg.val_freq = 1000
7 | cfg.save_model_freq = 1000
8 | cfg.print_freq = 100
9 | cfg.summary_freq = 100
10 | cfg.show_img_num = 2
11 |
12 | cfg.parameter_lr_mul = 1
13 | cfg.value_lr_mul = 1
14 | cfg.critic_lr_mul = 1
15 |
16 | ###########################################################################
17 | # Filter Parameters
18 | ###########################################################################
19 | cfg.filters = [
20 | ExposureFilter, GammaFilter, CCMFilter, SharpenFilter, DenoiseFilter,
21 | ToneFilter, ContrastFilter, SaturationPlusFilter, WNBFilter, ImprovedWhiteBalanceFilter
22 | ]
23 | cfg.filter_runtime_penalty = False
24 | cfg.filters_runtime = [1.7, 2.0, 1.9, 6.3, 10, 2.7, 2.1, 2.0, 1.9, 1.7]
25 | cfg.filter_runtime_penalty_lambda = 0.01
26 |
27 | # Gamma = 1/x ~ x
28 | cfg.curve_steps = 8
29 | cfg.gamma_range = 3
30 | cfg.exposure_range = 3.5
31 | cfg.wb_range = 1.1
32 | cfg.color_curve_range = (0.90, 1.10)
33 | cfg.lab_curve_range = (0.90, 1.10)
34 | cfg.tone_curve_range = (0.5, 2)
35 | cfg.usm_sharpen_range = (0.0, 2.0) # wikipedia recommended sigma 0.5-2.0; amount 0.5-1.5
36 | cfg.sharpen_range = (0.0, 10.0)
37 | cfg.ccm_range = (-2.0, 2.0)
38 | cfg.denoise_range = (0.0, 1.0)
39 |
40 | cfg.masking = False
41 | cfg.minimum_strength = 0.3
42 | cfg.maximum_sharpness = 1
43 | cfg.clamp = False
44 |
45 |
46 | ###########################################################################
47 | # RL Parameters
48 | ###########################################################################
49 | cfg.critic_logit_multiplier = 100
50 | cfg.discount_factor = 1.0 # 0.98
51 | # Each time the agent reuse a filter, a penalty is subtracted from the reward. Set to 0 to disable.
52 | cfg.filter_usage_penalty = 1.0
53 | # Use temporal difference error (thereby the value network is used) or directly a single step award (greedy)?
54 | cfg.use_TD = True
55 | # Replay memory
56 | cfg.replay_memory_size = 128
57 | # Note, a trajectory will be killed either after achieving this value (by chance) or submission
58 | # Thus exploration will lead to kills as well.
59 | cfg.maximum_trajectory_length = 7
60 | cfg.over_length_keep_prob = 0.5
61 | cfg.all_reward = 1.0
62 | # Append input image with states?
63 | cfg.img_include_states = True
64 | # with prob. cfg.exploration, we randomly pick one action during training
65 | cfg.exploration = 0.05
66 | # Action entropy penalization
67 | cfg.exploration_penalty = 0.05
68 | cfg.early_stop_penalty = 1.0
69 | cfg.detect_loss_weight = 1.0
70 |
71 | ###########################################################################
72 | # Agent, Value Network Parameters
73 | ###########################################################################
74 | cfg.base_channels = 32
75 | cfg.dropout_keep_prob = 0.5
76 | cfg.shared_feature_extractor = True
77 | cfg.fc1_size = 128
78 | cfg.bnw = False
79 | # number of filters for the first convolutional layers for all networks
80 | cfg.feature_extractor_dims = 4096
81 | cfg.use_penalty = True
82 | cfg.z_type = 'uniform'
83 | cfg.z_dim_per_filter = 16
84 |
85 | cfg.num_state_dim = 3 + len(cfg.filters)
86 | cfg.z_dim = 3 + len(cfg.filters) * cfg.z_dim_per_filter
87 | cfg.test_steps = 5
88 |
--------------------------------------------------------------------------------
/docs/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/docs/teaser.png
--------------------------------------------------------------------------------
/isp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/isp/__init__.py
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | about-time==4.2.1
2 | absl-py==2.1.0
3 | accelerate==0.33.0
4 | addict==2.4.0
5 | aiohttp==3.9.5
6 | aiosignal==1.3.1
7 | alive-progress==3.1.5
8 | antlr4-python3-runtime==4.9.3
9 | asttokens==2.4.1
10 | async-timeout==4.0.3
11 | attrs==23.2.0
12 | autograd==1.6.2
13 | cattrs==23.2.3
14 | cffi==1.16.0
15 | cma==3.2.2
16 | colorama==0.4.6
17 | colour-demosaicing==0.2.5
18 | colour-science==0.4.4
19 | comm==0.2.2
20 | contourpy==1.2.1
21 | cryptography==43.0.0
22 | cycler==0.12.1
23 | debugpy==1.8.1
24 | decorator==4.4.2
25 | Deprecated==1.2.14
26 | diffusers==0.30.0
27 | dill==0.3.8
28 | distro==1.9.0
29 | easydict==1.13
30 | einops==0.8.0
31 | exceptiongroup==1.2.1
32 | executing==2.0.1
33 | facexlib==0.3.0
34 | filterpy==1.4.5
35 | fonttools==4.51.0
36 | Forward_Warp==0.0.1
37 | forward_warp_cuda==0.0.0
38 | frozenlist==1.4.1
39 | fsspec==2024.3.1
40 | ftfy==6.2.0
41 | future==1.0.0
42 | grapheme==0.6.0
43 | grpcio==1.63.0
44 | huggingface-hub==0.24.5
45 | icecream==2.1.3
46 | imageio==2.34.1
47 | imageio-ffmpeg==0.5.1
48 | imgaug==0.4.0
49 | importlib_metadata==7.1.0
50 | iopath==0.1.10
51 | ipykernel==6.29.4
52 | ipython==8.24.0
53 | jedi==0.19.1
54 | joblib==1.4.2
55 | jupyter_client==8.6.1
56 | jupyter_core==5.7.2
57 | kiwisolver==1.4.5
58 | kornia==0.7.2
59 | kornia_rs==0.1.3
60 | lazy_loader==0.4
61 | lightning-utilities==0.11.5
62 | llvmlite==0.42.0
63 | lmdb==1.4.1
64 | loguru==0.7.2
65 | Markdown==3.6
66 | markdown-it-py==3.0.0
67 | matplotlib==3.8.4
68 | matplotlib-inline==0.1.7
69 | mdurl==0.1.2
70 | meshzoo==0.11.6
71 | mkl-service==2.4.0
72 | mmengine==0.10.4
73 | moviepy==1.0.3
74 | multidict==6.0.5
75 | nest-asyncio==1.6.0
76 | ninja==1.11.1.1
77 | numba==0.59.1
78 | omegaconf==2.3.0
79 | openai-clip==1.0.1
80 | opencv-python==4.9.0.80
81 | packaging==24.0
82 | pandas==2.2.2
83 | parso==0.8.4
84 | pexpect==4.9.0
85 | platformdirs==4.2.1
86 | portalocker==2.10.1
87 | proglog==0.1.10
88 | prompt-toolkit==3.0.43
89 | protobuf==5.26.1
90 | psutil==5.9.8
91 | ptyprocess==0.7.0
92 | pure-eval==0.2.2
93 | py-cpuinfo==9.0.0
94 | py-machineid==0.6.0
95 | pycocotools==2.0.7
96 | pycparser==2.22
97 | Pygments==2.18.0
98 | pyiqa==0.1.11
99 | pymoo==0.6.1.1
100 | pyparsing==3.1.2
101 | python-dateutil==2.9.0.post0
102 | python-package-info==0.0.9
103 | pytorch-lightning==2.3.3
104 | pytz==2024.1
105 | PyYAML==6.0.1
106 | pyzmq==26.0.3
107 | rawpy==0.21.0
108 | regex==2024.5.10
109 | requests-cache==1.2.1
110 | rich==13.7.1
111 | rich-argparse==1.5.2
112 | safetensors==0.4.3
113 | scikit-image==0.23.2
114 | scipy==1.13.0
115 | seaborn==0.13.2
116 | sentencepiece==0.2.0
117 | shapely==2.0.4
118 | six==1.16.0
119 | sk-video==1.1.10
120 | stack-data==0.6.3
121 | stonefish-license-manager==0.4.36
122 | tabulate==0.9.0
123 | tensorboard==2.16.2
124 | tensorboard-data-server==0.7.2
125 | termcolor==2.4.0
126 | thop==0.1.1.post2209072238
127 | tifffile==2024.5.10
128 | timm==0.9.16
129 | tokenizers==0.15.2
130 | tomli==2.0.1
131 | torch==2.0.1
132 | torch-tb-profiler==0.4.3
133 | torchaudio==2.0.2
134 | torchmetrics==1.4.0.post0
135 | torchvision==0.15.2
136 | tornado==6.4
137 | tqdm==4.66.4
138 | traitlets==5.14.3
139 | transformers==4.37.2
140 | triton==2.0.0
141 | tzdata==2024.1
142 | ultralytics==8.2.16
143 | url-normalize==1.4.3
144 | wcwidth==0.2.13
145 | Werkzeug==3.0.3
146 | wrapt==1.16.0
147 | x21==0.5.2
148 | yacs==0.1.8
149 | yapf==0.40.2
150 | yarl==1.9.4
151 | zipp==3.18.1
152 |
--------------------------------------------------------------------------------
/value.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class FeatureExtractor(torch.nn.Module):
7 | def __init__(self, shape=(17, 64, 64), mid_channels=32, output_dim=4096):
8 | """shape: c,h,w"""
9 | super(FeatureExtractor, self).__init__()
10 | in_channels = shape[0]
11 | self.output_dim = output_dim
12 |
13 | min_feature_map_size = 4
14 | assert output_dim % (min_feature_map_size ** 2) == 0, 'output dim=%d' % output_dim
15 | size = int(shape[2])
16 | # print('Agent CNN:')
17 | # print(' ', shape)
18 | size = size // 2
19 | channels = mid_channels
20 | layers = []
21 | layers.append(nn.Conv2d(in_channels, channels, kernel_size=4, stride=2, padding=1))
22 | layers.append(nn.BatchNorm2d(channels))
23 | layers.append(nn.LeakyReLU(negative_slope=0.2))
24 | while size > min_feature_map_size:
25 | in_channels = channels
26 | if size == min_feature_map_size * 2:
27 | channels = output_dim // (min_feature_map_size ** 2)
28 | else:
29 | channels *= 2
30 | assert size % 2 == 0
31 | size = size // 2
32 | # print(size, in_channels, channels)
33 | layers.append(nn.Conv2d(in_channels, channels, kernel_size=4, stride=2, padding=1))
34 | layers.append(nn.BatchNorm2d(channels))
35 | layers.append(nn.LeakyReLU(negative_slope=0.2))
36 | # layers.append(nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1))
37 | # layers.append(nn.BatchNorm2d(channels))
38 | # layers.append(nn.LeakyReLU(negative_slope=0.2))
39 | self.layers = nn.Sequential(*layers)
40 |
41 | def forward(self, x):
42 | x = self.layers(x)
43 | x = torch.reshape(x, [-1, self.output_dim])
44 | return x
45 |
46 |
47 | # input: float in [0, 1]
48 | class Value(nn.Module):
49 | def __init__(self, cfg, shape=(19, 64, 64)):
50 | super(Value, self).__init__()
51 | self.cfg = cfg
52 | self.feature_extractor = FeatureExtractor(shape=shape, mid_channels=cfg.base_channels,
53 | output_dim=cfg.feature_extractor_dims)
54 |
55 | self.fc1 = nn.Linear(cfg.feature_extractor_dims, cfg.fc1_size)
56 | self.lrelu = nn.LeakyReLU(negative_slope=0.2)
57 | self.fc2 = nn.Linear(cfg.fc1_size, 1)
58 | self.tanh = nn.Tanh()
59 |
60 | self.down_sample = nn.AdaptiveAvgPool2d((shape[1], shape[2]))
61 |
62 | def forward(self, images, states=None):
63 | images = self.down_sample(images)
64 | lum = (images[:, 0, :, :] * 0.27 + images[:, 1, :, :] * 0.67 + images[:, 2, :, :] * 0.06 + 1e-5)[:, None, :, :]
65 | # print(lum.shape)
66 | # luminance and contrast
67 | luminance = torch.mean(lum, dim=(1, 2, 3))
68 | contrast = torch.var(lum, dim=(1, 2, 3))
69 | # saturation
70 | i_max, _ = torch.max(torch.clip(images, min=0.0, max=1.0), dim=1)
71 | i_min, _ = torch.min(torch.clip(images, min=0.0, max=1.0), dim=1)
72 | # print("i_max i_min shape:", i_max.shape, i_min.shape)
73 | sat = (i_max - i_min) / (torch.minimum(i_max + i_min, 2.0 - i_max - i_min) + 1e-2)
74 | # print("sat.shape", sat.shape)
75 | saturation = torch.mean(sat, dim=[1, 2])
76 | # print("luminance shape:", luminance.shape, contrast.shape, saturation.shape)
77 | repetition = 1
78 | state_feature = torch.cat(
79 | [torch.tile(luminance[:, None], [1, repetition]),
80 | torch.tile(contrast[:, None], [1, repetition]),
81 | torch.tile(saturation[:, None], [1, repetition])], dim=1)
82 | # print('States:', states.shape)
83 | if states is None:
84 | states = state_feature
85 | else:
86 | assert len(states.shape) == len(state_feature.shape)
87 | states = torch.cat([states, state_feature], dim=1)
88 | if states is not None:
89 | states = states[:, :, None, None] + images[:, 0:1, :, :] * 0
90 | # print(' States:', states.shape)
91 | images = torch.cat([images, states], dim=1)
92 | # print("images.shape", images.shape)
93 | feature = self.feature_extractor(images)
94 | # print(' CNN shape: ', feature.shape)
95 | # print('Before final FCs', feature.shape)
96 | out = self.fc2(self.lrelu(self.fc1(feature)))
97 | # print(' ', out.shape)
98 | # out = self.tanh(out)
99 | return out
100 |
101 |
102 | if __name__ == "__main__":
103 | from easydict import EasyDict
104 | import numpy as np
105 | cfg = EasyDict()
106 | cfg['base_channels'] = 32
107 | cfg['fc1_size'] = 128
108 | cfg['feature_extractor_dims'] = 4096
109 |
110 | np.random.seed(0)
111 | x = torch.randn((1, 3, 512, 512))
112 | states = torch.randn((1, 11))
113 | # x = np.transpose(x, (0, 3, 1, 2))
114 | # x = torch.from_numpy(x)
115 | value = Value(cfg)
116 | y = value(x, states)
117 | print(y.shape, y)
118 | print(value.state_dict())
119 | torch.save(value.state_dict(), "value.pth")
--------------------------------------------------------------------------------
/yolov3/.dockerignore:
--------------------------------------------------------------------------------
1 | # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2 | .git
3 | .cache
4 | .idea
5 | runs
6 | output
7 | coco
8 | storage.googleapis.com
9 |
10 | data/samples/*
11 | **/results*.csv
12 | *.jpg
13 |
14 | # Neural Network weights -----------------------------------------------------------------------------------------------
15 | **/*.pt
16 | **/*.pth
17 | **/*.onnx
18 | **/*.engine
19 | **/*.mlmodel
20 | **/*.torchscript
21 | **/*.torchscript.pt
22 | **/*.tflite
23 | **/*.h5
24 | **/*.pb
25 | *_saved_model/
26 | *_web_model/
27 | *_openvino_model/
28 |
29 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
31 |
32 |
33 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
34 | # Byte-compiled / optimized / DLL files
35 | __pycache__/
36 | *.py[cod]
37 | *$py.class
38 |
39 | # C extensions
40 | *.so
41 |
42 | # Distribution / packaging
43 | .Python
44 | env/
45 | build/
46 | develop-eggs/
47 | dist/
48 | downloads/
49 | eggs/
50 | .eggs/
51 | lib/
52 | lib64/
53 | parts/
54 | sdist/
55 | var/
56 | wheels/
57 | *.egg-info/
58 | wandb/
59 | .installed.cfg
60 | *.egg
61 |
62 | # PyInstaller
63 | # Usually these files are written by a python script from a template
64 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
65 | *.manifest
66 | *.spec
67 |
68 | # Installer logs
69 | pip-log.txt
70 | pip-delete-this-directory.txt
71 |
72 | # Unit test / coverage reports
73 | htmlcov/
74 | .tox/
75 | .coverage
76 | .coverage.*
77 | .cache
78 | nosetests.xml
79 | coverage.xml
80 | *.cover
81 | .hypothesis/
82 |
83 | # Translations
84 | *.mo
85 | *.pot
86 |
87 | # Django stuff:
88 | *.log
89 | local_settings.py
90 |
91 | # Flask stuff:
92 | instance/
93 | .webassets-cache
94 |
95 | # Scrapy stuff:
96 | .scrapy
97 |
98 | # Sphinx documentation
99 | docs/_build/
100 |
101 | # PyBuilder
102 | target/
103 |
104 | # Jupyter Notebook
105 | .ipynb_checkpoints
106 |
107 | # pyenv
108 | .python-version
109 |
110 | # celery beat schedule file
111 | celerybeat-schedule
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # dotenv
117 | .env
118 |
119 | # virtualenv
120 | .venv*
121 | venv*/
122 | ENV*/
123 |
124 | # Spyder project settings
125 | .spyderproject
126 | .spyproject
127 |
128 | # Rope project settings
129 | .ropeproject
130 |
131 | # mkdocs documentation
132 | /site
133 |
134 | # mypy
135 | .mypy_cache/
136 |
137 |
138 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
139 |
140 | # General
141 | .DS_Store
142 | .AppleDouble
143 | .LSOverride
144 |
145 | # Icon must end with two \r
146 | Icon
147 | Icon?
148 |
149 | # Thumbnails
150 | ._*
151 |
152 | # Files that might appear in the root of a volume
153 | .DocumentRevisions-V100
154 | .fseventsd
155 | .Spotlight-V100
156 | .TemporaryItems
157 | .Trashes
158 | .VolumeIcon.icns
159 | .com.apple.timemachine.donotpresent
160 |
161 | # Directories potentially created on remote AFP share
162 | .AppleDB
163 | .AppleDesktop
164 | Network Trash Folder
165 | Temporary Items
166 | .apdisk
167 |
168 |
169 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
170 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
171 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
172 |
173 | # User-specific stuff:
174 | .idea/*
175 | .idea/**/workspace.xml
176 | .idea/**/tasks.xml
177 | .idea/dictionaries
178 | .html # Bokeh Plots
179 | .pg # TensorFlow Frozen Graphs
180 | .avi # videos
181 |
182 | # Sensitive or high-churn files:
183 | .idea/**/dataSources/
184 | .idea/**/dataSources.ids
185 | .idea/**/dataSources.local.xml
186 | .idea/**/sqlDataSources.xml
187 | .idea/**/dynamic.xml
188 | .idea/**/uiDesigner.xml
189 |
190 | # Gradle:
191 | .idea/**/gradle.xml
192 | .idea/**/libraries
193 |
194 | # CMake
195 | cmake-build-debug/
196 | cmake-build-release/
197 |
198 | # Mongo Explorer plugin:
199 | .idea/**/mongoSettings.xml
200 |
201 | ## File-based project format:
202 | *.iws
203 |
204 | ## Plugin-specific files:
205 |
206 | # IntelliJ
207 | out/
208 |
209 | # mpeltonen/sbt-idea plugin
210 | .idea_modules/
211 |
212 | # JIRA plugin
213 | atlassian-ide-plugin.xml
214 |
215 | # Cursive Clojure plugin
216 | .idea/replstate.xml
217 |
218 | # Crashlytics plugin (for Android Studio and IntelliJ)
219 | com_crashlytics_export_strings.xml
220 | crashlytics.properties
221 | crashlytics-build.properties
222 | fabric.properties
223 |
--------------------------------------------------------------------------------
/yolov3/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md
3 |
4 | exclude: 'docs/'
5 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
6 | ci:
7 | autofix_prs: true
8 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
9 | autoupdate_schedule: monthly
10 | # submodules: true
11 |
12 | repos:
13 | - repo: https://github.com/pre-commit/pre-commit-hooks
14 | rev: v4.4.0
15 | hooks:
16 | - id: end-of-file-fixer
17 | - id: trailing-whitespace
18 | - id: check-case-conflict
19 | # - id: check-yaml
20 | - id: check-docstring-first
21 | - id: double-quote-string-fixer
22 | - id: detect-private-key
23 |
24 | - repo: https://github.com/asottile/pyupgrade
25 | rev: v3.10.1
26 | hooks:
27 | - id: pyupgrade
28 | name: Upgrade code
29 |
30 | - repo: https://github.com/PyCQA/isort
31 | rev: 5.12.0
32 | hooks:
33 | - id: isort
34 | name: Sort imports
35 |
36 | - repo: https://github.com/google/yapf
37 | rev: v0.40.0
38 | hooks:
39 | - id: yapf
40 | name: YAPF formatting
41 |
42 | - repo: https://github.com/executablebooks/mdformat
43 | rev: 0.7.16
44 | hooks:
45 | - id: mdformat
46 | name: MD formatting
47 | additional_dependencies:
48 | - mdformat-gfm
49 | - mdformat-black
50 | # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
51 |
52 | - repo: https://github.com/PyCQA/flake8
53 | rev: 6.1.0
54 | hooks:
55 | - id: flake8
56 | name: PEP8
57 |
58 | - repo: https://github.com/codespell-project/codespell
59 | rev: v2.2.5
60 | hooks:
61 | - id: codespell
62 | args:
63 | - --ignore-words-list=crate,nd,strack,dota
64 |
65 | # - repo: https://github.com/asottile/yesqa
66 | # rev: v1.4.0
67 | # hooks:
68 | # - id: yesqa
69 |
70 | # - repo: https://github.com/asottile/dead
71 | # rev: v1.5.0
72 | # hooks:
73 | # - id: dead
74 |
--------------------------------------------------------------------------------
/yolov3/Arial.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/Arial.ttf
--------------------------------------------------------------------------------
/yolov3/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | preferred-citation:
3 | type: software
4 | message: If you use YOLOv5, please cite it as below.
5 | authors:
6 | - family-names: Jocher
7 | given-names: Glenn
8 | orcid: "https://orcid.org/0000-0001-5950-6979"
9 | title: "YOLOv5 by Ultralytics"
10 | version: 7.0
11 | doi: 10.5281/zenodo.3908559
12 | date-released: 2020-5-29
13 | license: AGPL-3.0
14 | url: "https://github.com/ultralytics/yolov5"
15 |
--------------------------------------------------------------------------------
/yolov3/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing to YOLOv3 🚀
2 |
3 | We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing a new feature
9 | - Becoming a maintainer
10 |
11 | YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12 | helping push the frontiers of what's possible in AI 😃!
13 |
14 | ## Submitting a Pull Request (PR) 🛠️
15 |
16 | Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17 |
18 | ### 1. Select File to Update
19 |
20 | Select `requirements.txt` to update by clicking on it in GitHub.
21 |
22 |

23 |
24 | ### 2. Click 'Edit this file'
25 |
26 | The button is in the top-right corner.
27 |
28 | 
29 |
30 | ### 3. Make Changes
31 |
32 | Change the `matplotlib` version from `3.2.2` to `3.3`.
33 |
34 | 
35 |
36 | ### 4. Preview Changes and Submit PR
37 |
38 | Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
39 | for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
40 | changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
41 |
42 | 
43 |
44 | ### PR recommendations
45 |
46 | To allow your work to be integrated as seamlessly as possible, we advise you to:
47 |
48 | - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update
49 | your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
50 |
51 | 
52 |
53 | - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
54 |
55 | 
56 |
57 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
58 | but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
59 |
60 | ## Submitting a Bug Report 🐛
61 |
62 | If you spot a problem with YOLOv5 please submit a Bug Report!
63 |
64 | For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
65 | short guidelines below to help users provide what we need to get started.
66 |
67 | When asking a question, people will be better able to provide help if you provide **code** that they can easily
68 | understand and use to **reproduce** the problem. This is referred to by community members as creating
69 | a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). Your code that reproduces
70 | the problem should be:
71 |
72 | - ✅ **Minimal** – Use as little code as possible that still produces the same problem
73 | - ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
74 | - ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
75 |
76 | In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
77 | should be:
78 |
79 | - ✅ **Current** – Verify that your code is up-to-date with the current
80 | GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
81 | copy to ensure your problem has not already been resolved by previous commits.
82 | - ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
83 | repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
84 |
85 | If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛
86 | **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide
87 | a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better
88 | understand and diagnose your problem.
89 |
90 | ## License
91 |
92 | By contributing, you agree that your contributions will be licensed under
93 | the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/)
94 |
--------------------------------------------------------------------------------
/yolov3/data/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: python train.py --data Argoverse.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── Argoverse ← downloads here (31.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: bus
23 | 5: truck
24 | 6: traffic_light
25 | 7: stop_sign
26 |
27 |
28 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
29 | download: |
30 | import json
31 |
32 | from tqdm import tqdm
33 | from utils.general import download, Path
34 |
35 |
36 | def argoverse2yolo(set):
37 | labels = {}
38 | a = json.load(open(set, "rb"))
39 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
40 | img_id = annot['image_id']
41 | img_name = a['images'][img_id]['name']
42 | img_label_name = f'{img_name[:-3]}txt'
43 |
44 | cls = annot['category_id'] # instance class id
45 | x_center, y_center, width, height = annot['bbox']
46 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
47 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
48 | width /= 1920.0 # scale
49 | height /= 1200.0 # scale
50 |
51 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
52 | if not img_dir.exists():
53 | img_dir.mkdir(parents=True, exist_ok=True)
54 |
55 | k = str(img_dir / img_label_name)
56 | if k not in labels:
57 | labels[k] = []
58 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
59 |
60 | for k in labels:
61 | with open(k, "w") as f:
62 | f.writelines(labels[k])
63 |
64 |
65 | # Download
66 | dir = Path(yaml['path']) # dataset root dir
67 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
68 | download(urls, dir=dir, delete=False)
69 |
70 | # Convert
71 | annotations_dir = 'Argoverse-HD/annotations/'
72 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
73 | for d in "train.json", "val.json":
74 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
75 |
--------------------------------------------------------------------------------
/yolov3/data/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: python train.py --data GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | names:
30 | 0: wheat_head
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from utils.general import download, Path
36 |
37 |
38 | # Download
39 | dir = Path(yaml['path']) # dataset root dir
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42 | download(urls, dir=dir)
43 |
44 | # Make Directories
45 | for p in 'annotations', 'images', 'labels':
46 | (dir / p).mkdir(parents=True, exist_ok=True)
47 |
48 | # Move
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51 | (dir / p).rename(dir / 'images' / p) # move to /images
52 | f = (dir / p).with_suffix('.json') # json file
53 | if f.exists():
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
55 |
--------------------------------------------------------------------------------
/yolov3/data/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: python train.py --data SKU-110K.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── SKU-110K ← downloads here (13.6 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | names:
18 | 0: object
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from tqdm import tqdm
25 | from utils.general import np, pd, Path, download, xyxy2xywh
26 |
27 |
28 | # Download
29 | dir = Path(yaml['path']) # dataset root dir
30 | parent = Path(dir.parent) # download dir
31 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
32 | download(urls, dir=parent, delete=False)
33 |
34 | # Rename directories
35 | if dir.exists():
36 | shutil.rmtree(dir)
37 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
38 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
39 |
40 | # Convert labels
41 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
42 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
43 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
44 | images, unique_images = x[:, 0], np.unique(x[:, 0])
45 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
46 | f.writelines(f'./images/{s}\n' for s in unique_images)
47 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
48 | cls = 0 # single-class dataset
49 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
50 | for r in x[images == im]:
51 | w, h = r[6], r[7] # image width, height
52 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
53 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
54 |
--------------------------------------------------------------------------------
/yolov3/data/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: python train.py --data VisDrone.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VisDrone ← downloads here (2.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | names:
18 | 0: pedestrian
19 | 1: people
20 | 2: bicycle
21 | 3: car
22 | 4: van
23 | 5: truck
24 | 6: tricycle
25 | 7: awning-tricycle
26 | 8: bus
27 | 9: motor
28 |
29 |
30 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
31 | download: |
32 | from utils.general import download, os, Path
33 |
34 | def visdrone2yolo(dir):
35 | from PIL import Image
36 | from tqdm import tqdm
37 |
38 | def convert_box(size, box):
39 | # Convert VisDrone box to YOLO xywh box
40 | dw = 1. / size[0]
41 | dh = 1. / size[1]
42 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
43 |
44 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
45 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
46 | for f in pbar:
47 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
48 | lines = []
49 | with open(f, 'r') as file: # read annotation.txt
50 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
51 | if row[4] == '0': # VisDrone 'ignored regions' class 0
52 | continue
53 | cls = int(row[5]) - 1
54 | box = convert_box(img_size, tuple(map(int, row[:4])))
55 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
56 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
57 | fl.writelines(lines) # write label.txt
58 |
59 |
60 | # Download
61 | dir = Path(yaml['path']) # dataset root dir
62 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
63 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
64 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
65 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
66 | download(urls, dir=dir, curl=True, threads=4)
67 |
68 | # Convert
69 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
70 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
71 |
--------------------------------------------------------------------------------
/yolov3/data/coco-2017-5000.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/coco2017 # dataset root dir
12 | train: train2017-5000.txt # train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: val2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/coco-2017-small.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/coco2017 # dataset root dir
12 | train: train2017-1000.txt # train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017-small.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/coco-2017.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/coco2017 # dataset root dir
12 | train: train2017-1000.txt # train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/coco-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/coco2017/coco2017labels-segments # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 128 images
13 | val: adaptiveisp_val2017.txt # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: https://ultralytics.com/assets/coco128-seg.zip
102 |
--------------------------------------------------------------------------------
/yolov3/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/coco2017 # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | # download: |
102 | # from utils.general import download, Path
103 |
104 |
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 |
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/coco128-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128-seg # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128-seg.zip
102 |
--------------------------------------------------------------------------------
/yolov3/data/coco128.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128.zip
102 |
--------------------------------------------------------------------------------
/yolov3/data/hyps/hyp.Objects365.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for Objects365 training
3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.00258
7 | lrf: 0.17
8 | momentum: 0.779
9 | weight_decay: 0.00058
10 | warmup_epochs: 1.33
11 | warmup_momentum: 0.86
12 | warmup_bias_lr: 0.0711
13 | box: 0.0539
14 | cls: 0.299
15 | cls_pw: 0.825
16 | obj: 0.632
17 | obj_pw: 1.0
18 | iou_t: 0.2
19 | anchor_t: 3.44
20 | anchors: 3.2
21 | fl_gamma: 0.0
22 | hsv_h: 0.0188
23 | hsv_s: 0.704
24 | hsv_v: 0.36
25 | degrees: 0.0
26 | translate: 0.0902
27 | scale: 0.491
28 | shear: 0.0
29 | perspective: 0.0
30 | flipud: 0.0
31 | fliplr: 0.5
32 | mosaic: 1.0
33 | mixup: 0.0
34 | copy_paste: 0.0
35 |
--------------------------------------------------------------------------------
/yolov3/data/hyps/hyp.VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for VOC training
3 | # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # YOLOv3 Hyperparameter Evolution Results
7 | # Best generation: 467
8 | # Last generation: 996
9 | # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
10 | # 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865
11 |
12 | lr0: 0.00334
13 | lrf: 0.15135
14 | momentum: 0.74832
15 | weight_decay: 0.00025
16 | warmup_epochs: 3.3835
17 | warmup_momentum: 0.59462
18 | warmup_bias_lr: 0.18657
19 | box: 0.02
20 | cls: 0.21638
21 | cls_pw: 0.5
22 | obj: 0.51728
23 | obj_pw: 0.67198
24 | iou_t: 0.2
25 | anchor_t: 3.3744
26 | fl_gamma: 0.0
27 | hsv_h: 0.01041
28 | hsv_s: 0.54703
29 | hsv_v: 0.27739
30 | degrees: 0.0
31 | translate: 0.04591
32 | scale: 0.75544
33 | shear: 0.0
34 | perspective: 0.0
35 | flipud: 0.0
36 | fliplr: 0.5
37 | mosaic: 0.85834
38 | mixup: 0.04266
39 | copy_paste: 0.0
40 | anchors: 3.412
41 |
--------------------------------------------------------------------------------
/yolov3/data/hyps/hyp.no-augmentation.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters when using Albumentations frameworks
3 | # python train.py --hyp hyp.no-augmentation.yaml
4 | # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv3 + Albumentations Usage examples
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | # this parameters are all zero since we want to use albumentation framework
22 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
23 | hsv_h: 0 # image HSV-Hue augmentation (fraction)
24 | hsv_s: 0 # image HSV-Saturation augmentation (fraction)
25 | hsv_v: 0 # image HSV-Value augmentation (fraction)
26 | degrees: 0.0 # image rotation (+/- deg)
27 | translate: 0 # image translation (+/- fraction)
28 | scale: 0 # image scale (+/- gain)
29 | shear: 0 # image shear (+/- deg)
30 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
31 | flipud: 0.0 # image flip up-down (probability)
32 | fliplr: 0.0 # image flip left-right (probability)
33 | mosaic: 0.0 # image mosaic (probability)
34 | mixup: 0.0 # image mixup (probability)
35 | copy_paste: 0.0 # segment copy-paste (probability)
36 |
--------------------------------------------------------------------------------
/yolov3/data/hyps/hyp.scratch-high.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for high-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.1 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov3/data/hyps/hyp.scratch-low.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for low-augmentation COCO training from scratch
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov3/data/hyps/hyp.scratch-med.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for medium-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov3/data/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/data/images/bus.jpg
--------------------------------------------------------------------------------
/yolov3/data/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/data/images/zidane.jpg
--------------------------------------------------------------------------------
/yolov3/data/lod.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/LOD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 1000 images
13 | val: val.txt # val images (relative to 'path') 1230 images
14 | test: test.txt # all images 2230
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/lod_pynet.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/LOD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 1000 images
13 | val: val_PyNetISP.txt # val.txt # val images (relative to 'path') 1230 images
14 | test: test.txt # all images 2230
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/lod_rgb_dark.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/LOD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 1000 images
13 | val: val_rgb_dark.txt # val.txt # val images (relative to 'path') 1230 images
14 | test: test.txt # all images 2230
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/oprd.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/OPRD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 118287 images
13 | val: val.txt # val images (relative to 'path') 5000 images
14 | test: test.txt #test_PyNetISP.txt
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | #download: |
102 | # from utils.general import download, Path
103 | #
104 | #
105 | # # Download labels
106 | # segments = False # segment or box labels
107 | # dir = Path(yaml['path']) # dataset root dir
108 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | # download(urls, dir=dir.parent)
111 | #
112 | # # Download data
113 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | # download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov3/data/rod.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/ROD # dataset root dir
12 | train: train.txt # ../../../datasets/ROD/npy/train # val_npy10.txt # train_npy.txt # train images (relative to 'path') 12000 images
13 | val: test.txt # ../../../datasets/ROD/npy/test # val_npy10.txt # val images (relative to 'path') 3200 images
14 |
15 | # Classes
16 | names:
17 | 0: person
18 | 1: bicycle
19 | 2: car
20 | 3: motorcycle
21 | 4: airplane
22 | 5: bus
23 | 6: train
24 | 7: truck
25 | 8: boat
26 | 9: traffic light
27 | 10: fire hydrant
28 | 11: stop sign
29 | 12: parking meter
30 | 13: bench
31 | 14: bird
32 | 15: cat
33 | 16: dog
34 | 17: horse
35 | 18: sheep
36 | 19: cow
37 | 20: elephant
38 | 21: bear
39 | 22: zebra
40 | 23: giraffe
41 | 24: backpack
42 | 25: umbrella
43 | 26: handbag
44 | 27: tie
45 | 28: suitcase
46 | 29: frisbee
47 | 30: skis
48 | 31: snowboard
49 | 32: sports ball
50 | 33: kite
51 | 34: baseball bat
52 | 35: baseball glove
53 | 36: skateboard
54 | 37: surfboard
55 | 38: tennis racket
56 | 39: bottle
57 | 40: wine glass
58 | 41: cup
59 | 42: fork
60 | 43: knife
61 | 44: spoon
62 | 45: bowl
63 | 46: banana
64 | 47: apple
65 | 48: sandwich
66 | 49: orange
67 | 50: broccoli
68 | 51: carrot
69 | 52: hot dog
70 | 53: pizza
71 | 54: donut
72 | 55: cake
73 | 56: chair
74 | 57: couch
75 | 58: potted plant
76 | 59: bed
77 | 60: dining table
78 | 61: toilet
79 | 62: tv
80 | 63: laptop
81 | 64: mouse
82 | 65: remote
83 | 66: keyboard
84 | 67: cell phone
85 | 68: microwave
86 | 69: oven
87 | 70: toaster
88 | 71: sink
89 | 72: refrigerator
90 | 73: book
91 | 74: clock
92 | 75: vase
93 | 76: scissors
94 | 77: teddy bear
95 | 78: hair drier
96 | 79: toothbrush
97 |
98 |
99 | # Download script/URL (optional)
100 | #download: |
101 | # from utils.general import download, Path
102 | #
103 | #
104 | # # Download labels
105 | # segments = False # segment or box labels
106 | # dir = Path(yaml['path']) # dataset root dir
107 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
108 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
109 | # download(urls, dir=dir.parent)
110 | #
111 | # # Download data
112 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | # download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/yolov3/data/rod_day.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/ROD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 12000 images
13 | val: val_day.txt # val images (relative to 'path') 3200 images
14 |
15 | # Classes
16 | names:
17 | 0: person
18 | 1: bicycle
19 | 2: car
20 | 3: motorcycle
21 | 4: airplane
22 | 5: bus
23 | 6: train
24 | 7: truck
25 | 8: boat
26 | 9: traffic light
27 | 10: fire hydrant
28 | 11: stop sign
29 | 12: parking meter
30 | 13: bench
31 | 14: bird
32 | 15: cat
33 | 16: dog
34 | 17: horse
35 | 18: sheep
36 | 19: cow
37 | 20: elephant
38 | 21: bear
39 | 22: zebra
40 | 23: giraffe
41 | 24: backpack
42 | 25: umbrella
43 | 26: handbag
44 | 27: tie
45 | 28: suitcase
46 | 29: frisbee
47 | 30: skis
48 | 31: snowboard
49 | 32: sports ball
50 | 33: kite
51 | 34: baseball bat
52 | 35: baseball glove
53 | 36: skateboard
54 | 37: surfboard
55 | 38: tennis racket
56 | 39: bottle
57 | 40: wine glass
58 | 41: cup
59 | 42: fork
60 | 43: knife
61 | 44: spoon
62 | 45: bowl
63 | 46: banana
64 | 47: apple
65 | 48: sandwich
66 | 49: orange
67 | 50: broccoli
68 | 51: carrot
69 | 52: hot dog
70 | 53: pizza
71 | 54: donut
72 | 55: cake
73 | 56: chair
74 | 57: couch
75 | 58: potted plant
76 | 59: bed
77 | 60: dining table
78 | 61: toilet
79 | 62: tv
80 | 63: laptop
81 | 64: mouse
82 | 65: remote
83 | 66: keyboard
84 | 67: cell phone
85 | 68: microwave
86 | 69: oven
87 | 70: toaster
88 | 71: sink
89 | 72: refrigerator
90 | 73: book
91 | 74: clock
92 | 75: vase
93 | 76: scissors
94 | 77: teddy bear
95 | 78: hair drier
96 | 79: toothbrush
97 |
98 |
99 | # Download script/URL (optional)
100 | #download: |
101 | # from utils.general import download, Path
102 | #
103 | #
104 | # # Download labels
105 | # segments = False # segment or box labels
106 | # dir = Path(yaml['path']) # dataset root dir
107 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
108 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
109 | # download(urls, dir=dir.parent)
110 | #
111 | # # Download data
112 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | # download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/yolov3/data/rod_night.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/ROD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 12000 images
13 | val: val_night.txt # val images (relative to 'path') 3200 images
14 |
15 | # Classes
16 | names:
17 | 0: person
18 | 1: bicycle
19 | 2: car
20 | 3: motorcycle
21 | 4: airplane
22 | 5: bus
23 | 6: train
24 | 7: truck
25 | 8: boat
26 | 9: traffic light
27 | 10: fire hydrant
28 | 11: stop sign
29 | 12: parking meter
30 | 13: bench
31 | 14: bird
32 | 15: cat
33 | 16: dog
34 | 17: horse
35 | 18: sheep
36 | 19: cow
37 | 20: elephant
38 | 21: bear
39 | 22: zebra
40 | 23: giraffe
41 | 24: backpack
42 | 25: umbrella
43 | 26: handbag
44 | 27: tie
45 | 28: suitcase
46 | 29: frisbee
47 | 30: skis
48 | 31: snowboard
49 | 32: sports ball
50 | 33: kite
51 | 34: baseball bat
52 | 35: baseball glove
53 | 36: skateboard
54 | 37: surfboard
55 | 38: tennis racket
56 | 39: bottle
57 | 40: wine glass
58 | 41: cup
59 | 42: fork
60 | 43: knife
61 | 44: spoon
62 | 45: bowl
63 | 46: banana
64 | 47: apple
65 | 48: sandwich
66 | 49: orange
67 | 50: broccoli
68 | 51: carrot
69 | 52: hot dog
70 | 53: pizza
71 | 54: donut
72 | 55: cake
73 | 56: chair
74 | 57: couch
75 | 58: potted plant
76 | 59: bed
77 | 60: dining table
78 | 61: toilet
79 | 62: tv
80 | 63: laptop
81 | 64: mouse
82 | 65: remote
83 | 66: keyboard
84 | 67: cell phone
85 | 68: microwave
86 | 69: oven
87 | 70: toaster
88 | 71: sink
89 | 72: refrigerator
90 | 73: book
91 | 74: clock
92 | 75: vase
93 | 76: scissors
94 | 77: teddy bear
95 | 78: hair drier
96 | 79: toothbrush
97 |
98 |
99 | # Download script/URL (optional)
100 | #download: |
101 | # from utils.general import download, Path
102 | #
103 | #
104 | # # Download labels
105 | # segments = False # segment or box labels
106 | # dir = Path(yaml['path']) # dataset root dir
107 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
108 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
109 | # download(urls, dir=dir.parent)
110 | #
111 | # # Download data
112 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | # download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/yolov3/data/rod_npy.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/ROD # dataset root dir
12 | train: ../../../datasets/ROD/npy/train # val_npy10.txt # train_npy.txt # train images (relative to 'path') 12000 images
13 | val: ../../../datasets/ROD/npy/val # val_npy10.txt # val images (relative to 'path') 3200 images
14 |
15 | # Classes
16 | names:
17 | 0: person
18 | 1: bicycle
19 | 2: car
20 | 3: motorcycle
21 | 4: airplane
22 | 5: bus
23 | 6: train
24 | 7: truck
25 | 8: boat
26 | 9: traffic light
27 | 10: fire hydrant
28 | 11: stop sign
29 | 12: parking meter
30 | 13: bench
31 | 14: bird
32 | 15: cat
33 | 16: dog
34 | 17: horse
35 | 18: sheep
36 | 19: cow
37 | 20: elephant
38 | 21: bear
39 | 22: zebra
40 | 23: giraffe
41 | 24: backpack
42 | 25: umbrella
43 | 26: handbag
44 | 27: tie
45 | 28: suitcase
46 | 29: frisbee
47 | 30: skis
48 | 31: snowboard
49 | 32: sports ball
50 | 33: kite
51 | 34: baseball bat
52 | 35: baseball glove
53 | 36: skateboard
54 | 37: surfboard
55 | 38: tennis racket
56 | 39: bottle
57 | 40: wine glass
58 | 41: cup
59 | 42: fork
60 | 43: knife
61 | 44: spoon
62 | 45: bowl
63 | 46: banana
64 | 47: apple
65 | 48: sandwich
66 | 49: orange
67 | 50: broccoli
68 | 51: carrot
69 | 52: hot dog
70 | 53: pizza
71 | 54: donut
72 | 55: cake
73 | 56: chair
74 | 57: couch
75 | 58: potted plant
76 | 59: bed
77 | 60: dining table
78 | 61: toilet
79 | 62: tv
80 | 63: laptop
81 | 64: mouse
82 | 65: remote
83 | 66: keyboard
84 | 67: cell phone
85 | 68: microwave
86 | 69: oven
87 | 70: toaster
88 | 71: sink
89 | 72: refrigerator
90 | 73: book
91 | 74: clock
92 | 75: vase
93 | 76: scissors
94 | 77: teddy bear
95 | 78: hair drier
96 | 79: toothbrush
97 |
98 |
99 | # Download script/URL (optional)
100 | #download: |
101 | # from utils.general import download, Path
102 | #
103 | #
104 | # # Download labels
105 | # segments = False # segment or box labels
106 | # dir = Path(yaml['path']) # dataset root dir
107 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
108 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
109 | # download(urls, dir=dir.parent)
110 | #
111 | # # Download data
112 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | # download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/yolov3/data/rod_png.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../../../datasets/ROD # dataset root dir
12 | train: train.txt # train images (relative to 'path') 12000 images
13 | val: val.txt # val images (relative to 'path') 3200 images
14 |
15 | # Classes
16 | names:
17 | 0: person
18 | 1: bicycle
19 | 2: car
20 | 3: motorcycle
21 | 4: airplane
22 | 5: bus
23 | 6: train
24 | 7: truck
25 | 8: boat
26 | 9: traffic light
27 | 10: fire hydrant
28 | 11: stop sign
29 | 12: parking meter
30 | 13: bench
31 | 14: bird
32 | 15: cat
33 | 16: dog
34 | 17: horse
35 | 18: sheep
36 | 19: cow
37 | 20: elephant
38 | 21: bear
39 | 22: zebra
40 | 23: giraffe
41 | 24: backpack
42 | 25: umbrella
43 | 26: handbag
44 | 27: tie
45 | 28: suitcase
46 | 29: frisbee
47 | 30: skis
48 | 31: snowboard
49 | 32: sports ball
50 | 33: kite
51 | 34: baseball bat
52 | 35: baseball glove
53 | 36: skateboard
54 | 37: surfboard
55 | 38: tennis racket
56 | 39: bottle
57 | 40: wine glass
58 | 41: cup
59 | 42: fork
60 | 43: knife
61 | 44: spoon
62 | 45: bowl
63 | 46: banana
64 | 47: apple
65 | 48: sandwich
66 | 49: orange
67 | 50: broccoli
68 | 51: carrot
69 | 52: hot dog
70 | 53: pizza
71 | 54: donut
72 | 55: cake
73 | 56: chair
74 | 57: couch
75 | 58: potted plant
76 | 59: bed
77 | 60: dining table
78 | 61: toilet
79 | 62: tv
80 | 63: laptop
81 | 64: mouse
82 | 65: remote
83 | 66: keyboard
84 | 67: cell phone
85 | 68: microwave
86 | 69: oven
87 | 70: toaster
88 | 71: sink
89 | 72: refrigerator
90 | 73: book
91 | 74: clock
92 | 75: vase
93 | 76: scissors
94 | 77: teddy bear
95 | 78: hair drier
96 | 79: toothbrush
97 |
98 |
99 | # Download script/URL (optional)
100 | #download: |
101 | # from utils.general import download, Path
102 | #
103 | #
104 | # # Download labels
105 | # segments = False # segment or box labels
106 | # dir = Path(yaml['path']) # dataset root dir
107 | # url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
108 | # urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
109 | # download(urls, dir=dir.parent)
110 | #
111 | # # Download data
112 | # urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | # 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | # 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | # download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/yolov3/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases
4 | # Example usage: bash data/scripts/download_weights.sh
5 | # parent
6 | # └── yolov5
7 | # ├── yolov5s.pt ← downloads here
8 | # ├── yolov5m.pt
9 | # └── ...
10 |
11 | python - <=3.1.30
6 | matplotlib>=3.3
7 | numpy>=1.22.2
8 | opencv-python>=4.1.1
9 | Pillow>=7.1.2
10 | psutil # system resources
11 | PyYAML>=5.3.1
12 | requests>=2.23.0
13 | scipy>=1.4.1
14 | thop>=0.1.1 # FLOPs computation
15 | torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
16 | torchvision>=0.8.1
17 | tqdm>=4.64.0
18 | ultralytics>=8.0.147
19 | # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
20 |
21 | # Logging ---------------------------------------------------------------------
22 | # tensorboard>=2.4.1
23 | # clearml>=1.2.0
24 | # comet
25 |
26 | # Plotting --------------------------------------------------------------------
27 | pandas>=1.1.4
28 | seaborn>=0.11.0
29 |
30 | # Export ----------------------------------------------------------------------
31 | # coremltools>=6.0 # CoreML export
32 | # onnx>=1.10.0 # ONNX export
33 | # onnx-simplifier>=0.4.1 # ONNX simplifier
34 | # nvidia-pyindex # TensorRT export
35 | # nvidia-tensorrt # TensorRT export
36 | # scikit-learn<=1.1.2 # CoreML quantization
37 | # tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
38 | # tensorflowjs>=3.9.0 # TF.js export
39 | # openvino-dev>=2023.0 # OpenVINO export
40 |
41 | # Deploy ----------------------------------------------------------------------
42 | setuptools>=65.5.1 # Snyk vulnerability fix
43 | # tritonclient[all]~=2.24.0
44 |
45 | # Extras ----------------------------------------------------------------------
46 | # ipython # interactive notebook
47 | # mss # screenshots
48 | # albumentations>=1.0.3
49 | # pycocotools>=2.0.6 # COCO mAP
50 |
--------------------------------------------------------------------------------
/yolov3/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 | # Local usage: pip install pre-commit, pre-commit run --all-files
4 |
5 | [metadata]
6 | license_files = LICENSE
7 | description_file = README.md
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 | [flake8]
20 | max-line-length = 120
21 | exclude = .tox,*.egg,build,temp
22 | select = E,W,F
23 | doctests = True
24 | verbose = 2
25 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
26 | format = pylint
27 | # see: https://www.flake8rules.com/
28 | ignore = E731,F405,E402,W504,E501
29 | # E731: Do not assign a lambda expression, use a def
30 | # F405: name may be undefined, or defined from star imports: module
31 | # E402: module level import not at top of file
32 | # W504: line break after binary operator
33 | # E501: line too long
34 | # removed:
35 | # F401: module imported but unused
36 | # E231: missing whitespace after ‘,’, ‘;’, or ‘:’
37 | # E127: continuation line over-indented for visual indent
38 | # F403: ‘from module import *’ used; unable to detect undefined names
39 |
40 |
41 | [isort]
42 | # https://pycqa.github.io/isort/docs/configuration/options.html
43 | line_length = 120
44 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
45 | multi_line_output = 0
46 |
47 | [yapf]
48 | based_on_style = pep8
49 | spaces_before_comment = 2
50 | COLUMN_LIMIT = 120
51 | COALESCE_BRACKETS = True
52 | SPACES_AROUND_POWER_OPERATOR = True
53 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
54 | SPLIT_BEFORE_CLOSING_BRACKET = False
55 | SPLIT_BEFORE_FIRST_ARGUMENT = False
56 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
57 |
--------------------------------------------------------------------------------
/yolov3/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 | import contextlib
7 | import platform
8 | import threading
9 |
10 |
11 | def emojis(str=''):
12 | # Return platform-dependent emoji-safe version of string
13 | return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
14 |
15 |
16 | class TryExcept(contextlib.ContextDecorator):
17 | # YOLOv3 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
18 | def __init__(self, msg=''):
19 | self.msg = msg
20 |
21 | def __enter__(self):
22 | pass
23 |
24 | def __exit__(self, exc_type, value, traceback):
25 | if value:
26 | print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
27 | return True
28 |
29 |
30 | def threaded(func):
31 | # Multi-threads a target function and returns thread. Usage: @threaded decorator
32 | def wrapper(*args, **kwargs):
33 | thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
34 | thread.start()
35 | return thread
36 |
37 | return wrapper
38 |
39 |
40 | def join_threads(verbose=False):
41 | # Join all daemon threads, i.e. atexit.register(lambda: join_threads())
42 | main_thread = threading.current_thread()
43 | for t in threading.enumerate():
44 | if t is not main_thread:
45 | if verbose:
46 | print(f'Joining thread {t.name}')
47 | t.join()
48 |
49 |
50 | def notebook_init(verbose=True):
51 | # Check system software and hardware
52 | print('Checking setup...')
53 |
54 | import os
55 | import shutil
56 |
57 | from ultralytics.utils.checks import check_requirements
58 |
59 | from utils.general import check_font, is_colab
60 | from utils.torch_utils import select_device # imports
61 |
62 | check_font()
63 |
64 | import psutil
65 |
66 | if check_requirements('wandb', install=False):
67 | os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang
68 | if is_colab():
69 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
70 |
71 | # System info
72 | display = None
73 | if verbose:
74 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
75 | ram = psutil.virtual_memory().total
76 | total, used, free = shutil.disk_usage('/')
77 | with contextlib.suppress(Exception): # clear display if ipython is installed
78 | from IPython import display
79 | display.clear_output()
80 | s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
81 | else:
82 | s = ''
83 |
84 | select_device(newline=False)
85 | print(emojis(f'Setup complete ✅ {s}'))
86 | return display
87 |
--------------------------------------------------------------------------------
/yolov3/utils/activations.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Activation functions
4 | """
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | class SiLU(nn.Module):
12 | # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
13 | @staticmethod
14 | def forward(x):
15 | return x * torch.sigmoid(x)
16 |
17 |
18 | class Hardswish(nn.Module):
19 | # Hard-SiLU activation
20 | @staticmethod
21 | def forward(x):
22 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML
23 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
24 |
25 |
26 | class Mish(nn.Module):
27 | # Mish activation https://github.com/digantamisra98/Mish
28 | @staticmethod
29 | def forward(x):
30 | return x * F.softplus(x).tanh()
31 |
32 |
33 | class MemoryEfficientMish(nn.Module):
34 | # Mish activation memory-efficient
35 | class F(torch.autograd.Function):
36 |
37 | @staticmethod
38 | def forward(ctx, x):
39 | ctx.save_for_backward(x)
40 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
41 |
42 | @staticmethod
43 | def backward(ctx, grad_output):
44 | x = ctx.saved_tensors[0]
45 | sx = torch.sigmoid(x)
46 | fx = F.softplus(x).tanh()
47 | return grad_output * (fx + x * sx * (1 - fx * fx))
48 |
49 | def forward(self, x):
50 | return self.F.apply(x)
51 |
52 |
53 | class FReLU(nn.Module):
54 | # FReLU activation https://arxiv.org/abs/2007.11824
55 | def __init__(self, c1, k=3): # ch_in, kernel
56 | super().__init__()
57 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
58 | self.bn = nn.BatchNorm2d(c1)
59 |
60 | def forward(self, x):
61 | return torch.max(x, self.bn(self.conv(x)))
62 |
63 |
64 | class AconC(nn.Module):
65 | r""" ACON activation (activate or not)
66 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
67 | according to "Activate or Not: Learning Customized Activation" .
68 | """
69 |
70 | def __init__(self, c1):
71 | super().__init__()
72 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
73 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
74 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
75 |
76 | def forward(self, x):
77 | dpx = (self.p1 - self.p2) * x
78 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
79 |
80 |
81 | class MetaAconC(nn.Module):
82 | r""" ACON activation (activate or not)
83 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
84 | according to "Activate or Not: Learning Customized Activation" .
85 | """
86 |
87 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
88 | super().__init__()
89 | c2 = max(r, c1 // r)
90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
92 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
93 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
94 | # self.bn1 = nn.BatchNorm2d(c2)
95 | # self.bn2 = nn.BatchNorm2d(c1)
96 |
97 | def forward(self, x):
98 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
99 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
100 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
101 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
102 | dpx = (self.p1 - self.p2) * x
103 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
104 |
--------------------------------------------------------------------------------
/yolov3/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 |
11 | from utils.general import LOGGER, colorstr
12 | from utils.torch_utils import profile
13 |
14 |
15 | def check_train_batch_size(model, imgsz=640, amp=True):
16 | # Check YOLOv3 training batch size
17 | with torch.cuda.amp.autocast(amp):
18 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
19 |
20 |
21 | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
22 | # Automatically estimate best YOLOv3 batch size to use `fraction` of available CUDA memory
23 | # Usage:
24 | # import torch
25 | # from utils.autobatch import autobatch
26 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
27 | # print(autobatch(model))
28 |
29 | # Check device
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 | if torch.backends.cudnn.benchmark:
37 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
38 | return batch_size
39 |
40 | # Inspect CUDA memory
41 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
42 | d = str(device).upper() # 'CUDA:0'
43 | properties = torch.cuda.get_device_properties(device) # device properties
44 | t = properties.total_memory / gb # GiB total
45 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved
46 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated
47 | f = t - (r + a) # GiB free
48 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
49 |
50 | # Profile batch sizes
51 | batch_sizes = [1, 2, 4, 8, 16]
52 | try:
53 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
54 | results = profile(img, model, n=3, device=device)
55 | except Exception as e:
56 | LOGGER.warning(f'{prefix}{e}')
57 |
58 | # Fit a solution
59 | y = [x[2] for x in results if x] # memory [2]
60 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
61 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
62 | if None in results: # some sizes failed
63 | i = results.index(None) # first fail index
64 | if b >= batch_sizes[i]: # y intercept above failure point
65 | b = batch_sizes[max(i - 1, 0)] # select prior safe point
66 | if b < 1 or b > 1024: # b outside of safe range
67 | b = batch_size
68 | LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
69 |
70 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
71 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
72 | return b
73 |
--------------------------------------------------------------------------------
/yolov3/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/utils/aws/__init__.py
--------------------------------------------------------------------------------
/yolov3/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/yolov3/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv3 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/yolov3/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/yolov3/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Callback utils
4 | """
5 |
6 | import threading
7 |
8 |
9 | class Callbacks:
10 | """"
11 | Handles all registered callbacks for YOLOv3 Hooks
12 | """
13 |
14 | def __init__(self):
15 | # Define the available callbacks
16 | self._callbacks = {
17 | 'on_pretrain_routine_start': [],
18 | 'on_pretrain_routine_end': [],
19 | 'on_train_start': [],
20 | 'on_train_epoch_start': [],
21 | 'on_train_batch_start': [],
22 | 'optimizer_step': [],
23 | 'on_before_zero_grad': [],
24 | 'on_train_batch_end': [],
25 | 'on_train_epoch_end': [],
26 | 'on_val_start': [],
27 | 'on_val_batch_start': [],
28 | 'on_val_image_end': [],
29 | 'on_val_batch_end': [],
30 | 'on_val_end': [],
31 | 'on_fit_epoch_end': [], # fit = train + val
32 | 'on_model_save': [],
33 | 'on_train_end': [],
34 | 'on_params_update': [],
35 | 'teardown': [], }
36 | self.stop_training = False # set True to interrupt training
37 |
38 | def register_action(self, hook, name='', callback=None):
39 | """
40 | Register a new action to a callback hook
41 |
42 | Args:
43 | hook: The callback hook name to register the action to
44 | name: The name of the action for later reference
45 | callback: The callback to fire
46 | """
47 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
48 | assert callable(callback), f"callback '{callback}' is not callable"
49 | self._callbacks[hook].append({'name': name, 'callback': callback})
50 |
51 | def get_registered_actions(self, hook=None):
52 | """"
53 | Returns all the registered actions by callback hook
54 |
55 | Args:
56 | hook: The name of the hook to check, defaults to all
57 | """
58 | return self._callbacks[hook] if hook else self._callbacks
59 |
60 | def run(self, hook, *args, thread=False, **kwargs):
61 | """
62 | Loop through the registered actions and fire all callbacks on main thread
63 |
64 | Args:
65 | hook: The name of the hook to check, defaults to all
66 | args: Arguments to receive from YOLOv3
67 | thread: (boolean) Run callbacks in daemon thread
68 | kwargs: Keyword Arguments to receive from YOLOv3
69 | """
70 |
71 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
72 | for logger in self._callbacks[hook]:
73 | if thread:
74 | threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
75 | else:
76 | logger['callback'](*args, **kwargs)
77 |
--------------------------------------------------------------------------------
/yolov3/utils/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov3
3 | # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
4 |
5 | # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch
6 | FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
16 | # RUN alias python=python3
17 |
18 | # Security updates
19 | # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
20 | RUN apt upgrade --no-install-recommends -y openssl
21 |
22 | # Create working directory
23 | RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
24 | WORKDIR /usr/src/app
25 |
26 | # Copy contents
27 | # COPY . /usr/src/app (issues as not a .git directory)
28 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
29 |
30 | # Install pip packages
31 | COPY requirements.txt .
32 | RUN python3 -m pip install --upgrade pip wheel
33 | RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
34 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'
35 | # tensorflow tensorflowjs \
36 |
37 | # Set environment variables
38 | ENV OMP_NUM_THREADS=1
39 |
40 | # Cleanup
41 | ENV DEBIAN_FRONTEND teletype
42 |
43 |
44 | # Usage Examples -------------------------------------------------------------------------------------------------------
45 |
46 | # Build and Push
47 | # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t
48 |
49 | # Pull and Run
50 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
51 |
52 | # Pull and Run with local directory access
53 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
54 |
55 | # Kill all
56 | # sudo docker kill $(sudo docker ps -q)
57 |
58 | # Kill all image-based
59 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
60 |
61 | # DockerHub tag update
62 | # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
63 |
64 | # Clean up
65 | # sudo docker system prune -a --volumes
66 |
67 | # Update Ubuntu drivers
68 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
69 |
70 | # DDP test
71 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
72 |
73 | # GCP VM from Image
74 | # docker.io/ultralytics/yolov5:latest
75 |
--------------------------------------------------------------------------------
/yolov3/utils/docker/Dockerfile-arm64:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov3
3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM arm64v8/ubuntu:22.10
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
16 | # RUN alias python=python3
17 |
18 | # Install pip packages
19 | COPY requirements.txt .
20 | RUN python3 -m pip install --upgrade pip wheel
21 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
22 | coremltools onnx onnxruntime
23 | # tensorflow-aarch64 tensorflowjs \
24 |
25 | # Create working directory
26 | RUN mkdir -p /usr/src/app
27 | WORKDIR /usr/src/app
28 |
29 | # Copy contents
30 | # COPY . /usr/src/app (issues as not a .git directory)
31 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
32 | ENV DEBIAN_FRONTEND teletype
33 |
34 |
35 | # Usage Examples -------------------------------------------------------------------------------------------------------
36 |
37 | # Build and Push
38 | # t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t
39 |
40 | # Pull and Run
41 | # t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
42 |
--------------------------------------------------------------------------------
/yolov3/utils/docker/Dockerfile-cpu:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov3
3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM ubuntu:lunar-20230615
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
13 | RUN apt update \
14 | && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
15 | # RUN alias python=python3
16 |
17 | # Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error
18 | RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
19 |
20 | # Install pip packages
21 | COPY requirements.txt .
22 | RUN python3 -m pip install --upgrade pip wheel
23 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
24 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \
25 | # tensorflow tensorflowjs \
26 | --extra-index-url https://download.pytorch.org/whl/cpu
27 |
28 | # Create working directory
29 | RUN mkdir -p /usr/src/app
30 | WORKDIR /usr/src/app
31 |
32 | # Copy contents
33 | # COPY . /usr/src/app (issues as not a .git directory)
34 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
35 |
36 |
37 | # Usage Examples -------------------------------------------------------------------------------------------------------
38 |
39 | # Build and Push
40 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t
41 |
42 | # Pull and Run
43 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
44 |
--------------------------------------------------------------------------------
/yolov3/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/yolov3/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Perform test request
4 | """
5 |
6 | import pprint
7 |
8 | import requests
9 |
10 | DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s'
11 | IMAGE = 'zidane.jpg'
12 |
13 | # Read image
14 | with open(IMAGE, 'rb') as f:
15 | image_data = f.read()
16 |
17 | response = requests.post(DETECTION_URL, files={'image': image_data}).json()
18 |
19 | pprint.pprint(response)
20 |
--------------------------------------------------------------------------------
/yolov3/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Run a Flask REST API exposing one or more YOLOv5s models
4 | """
5 |
6 | import argparse
7 | import io
8 |
9 | import torch
10 | from flask import Flask, request
11 | from PIL import Image
12 |
13 | app = Flask(__name__)
14 | models = {}
15 |
16 | DETECTION_URL = '/v1/object-detection/'
17 |
18 |
19 | @app.route(DETECTION_URL, methods=['POST'])
20 | def predict(model):
21 | if request.method != 'POST':
22 | return
23 |
24 | if request.files.get('image'):
25 | # Method 1
26 | # with request.files["image"] as f:
27 | # im = Image.open(io.BytesIO(f.read()))
28 |
29 | # Method 2
30 | im_file = request.files['image']
31 | im_bytes = im_file.read()
32 | im = Image.open(io.BytesIO(im_bytes))
33 |
34 | if model in models:
35 | results = models[model](im, size=640) # reduce size=320 for faster inference
36 | return results.pandas().xyxy[0].to_json(orient='records')
37 |
38 |
39 | if __name__ == '__main__':
40 | parser = argparse.ArgumentParser(description='Flask API exposing YOLOv3 model')
41 | parser.add_argument('--port', default=5000, type=int, help='port number')
42 | parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
43 | opt = parser.parse_args()
44 |
45 | for m in opt.model:
46 | models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True)
47 |
48 | app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat
49 |
--------------------------------------------------------------------------------
/yolov3/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/yolov3/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==2.3.2
4 | gunicorn==19.10.0
5 | werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability
6 |
--------------------------------------------------------------------------------
/yolov3/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/yolov3/utils/loggers/clearml/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/utils/loggers/clearml/__init__.py
--------------------------------------------------------------------------------
/yolov3/utils/loggers/clearml/hpo.py:
--------------------------------------------------------------------------------
1 | from clearml import Task
2 | # Connecting ClearML with the current process,
3 | # from here on everything is logged automatically
4 | from clearml.automation import HyperParameterOptimizer, UniformParameterRange
5 | from clearml.automation.optuna import OptimizerOptuna
6 |
7 | task = Task.init(project_name='Hyper-Parameter Optimization',
8 | task_name='YOLOv3',
9 | task_type=Task.TaskTypes.optimizer,
10 | reuse_last_task_id=False)
11 |
12 | # Example use case:
13 | optimizer = HyperParameterOptimizer(
14 | # This is the experiment we want to optimize
15 | base_task_id='',
16 | # here we define the hyper-parameters to optimize
17 | # Notice: The parameter name should exactly match what you see in the UI: /
18 | # For Example, here we see in the base experiment a section Named: "General"
19 | # under it a parameter named "batch_size", this becomes "General/batch_size"
20 | # If you have `argparse` for example, then arguments will appear under the "Args" section,
21 | # and you should instead pass "Args/batch_size"
22 | hyper_parameters=[
23 | UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1),
24 | UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0),
25 | UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98),
26 | UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001),
27 | UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0),
28 | UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95),
29 | UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2),
30 | UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2),
31 | UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0),
32 | UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0),
33 | UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0),
34 | UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0),
35 | UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7),
36 | UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0),
37 | UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0),
38 | UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1),
39 | UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9),
40 | UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9),
41 | UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0),
42 | UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9),
43 | UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9),
44 | UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0),
45 | UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001),
46 | UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0),
47 | UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0),
48 | UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0),
49 | UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0),
50 | UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)],
51 | # this is the objective metric we want to maximize/minimize
52 | objective_metric_title='metrics',
53 | objective_metric_series='mAP_0.5',
54 | # now we decide if we want to maximize it or minimize it (accuracy we maximize)
55 | objective_metric_sign='max',
56 | # let us limit the number of concurrent experiments,
57 | # this in turn will make sure we do dont bombard the scheduler with experiments.
58 | # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
59 | max_number_of_concurrent_tasks=1,
60 | # this is the optimizer class (actually doing the optimization)
61 | # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
62 | optimizer_class=OptimizerOptuna,
63 | # If specified only the top K performing Tasks will be kept, the others will be automatically archived
64 | save_top_k_tasks_only=5, # 5,
65 | compute_time_limit=None,
66 | total_max_jobs=20,
67 | min_iteration_per_job=None,
68 | max_iteration_per_job=None,
69 | )
70 |
71 | # report every 10 seconds, this is way too often, but we are testing here
72 | optimizer.set_report_period(10 / 60)
73 | # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
74 | # an_optimizer.start_locally(job_complete_callback=job_complete_callback)
75 | # set the time limit for the optimization process (2 hours)
76 | optimizer.set_time_limit(in_minutes=120.0)
77 | # Start the optimization process in the local environment
78 | optimizer.start_locally()
79 | # wait until process is done (notice we are controlling the optimization process in the background)
80 | optimizer.wait()
81 | # make sure background optimization stopped
82 | optimizer.stop()
83 |
84 | print('We are done, good bye')
85 |
--------------------------------------------------------------------------------
/yolov3/utils/loggers/comet/comet_utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from urllib.parse import urlparse
4 |
5 | try:
6 | import comet_ml
7 | except (ModuleNotFoundError, ImportError):
8 | comet_ml = None
9 |
10 | import yaml
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | COMET_PREFIX = 'comet://'
15 | COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
16 | COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt')
17 |
18 |
19 | def download_model_checkpoint(opt, experiment):
20 | model_dir = f'{opt.project}/{experiment.name}'
21 | os.makedirs(model_dir, exist_ok=True)
22 |
23 | model_name = COMET_MODEL_NAME
24 | model_asset_list = experiment.get_model_asset_list(model_name)
25 |
26 | if len(model_asset_list) == 0:
27 | logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}')
28 | return
29 |
30 | model_asset_list = sorted(
31 | model_asset_list,
32 | key=lambda x: x['step'],
33 | reverse=True,
34 | )
35 | logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list}
36 |
37 | resource_url = urlparse(opt.weights)
38 | checkpoint_filename = resource_url.query
39 |
40 | if checkpoint_filename:
41 | asset_id = logged_checkpoint_map.get(checkpoint_filename)
42 | else:
43 | asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
44 | checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
45 |
46 | if asset_id is None:
47 | logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment')
48 | return
49 |
50 | try:
51 | logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}')
52 | asset_filename = checkpoint_filename
53 |
54 | model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
55 | model_download_path = f'{model_dir}/{asset_filename}'
56 | with open(model_download_path, 'wb') as f:
57 | f.write(model_binary)
58 |
59 | opt.weights = model_download_path
60 |
61 | except Exception as e:
62 | logger.warning('COMET WARNING: Unable to download checkpoint from Comet')
63 | logger.exception(e)
64 |
65 |
66 | def set_opt_parameters(opt, experiment):
67 | """Update the opts Namespace with parameters
68 | from Comet's ExistingExperiment when resuming a run
69 |
70 | Args:
71 | opt (argparse.Namespace): Namespace of command line options
72 | experiment (comet_ml.APIExperiment): Comet API Experiment object
73 | """
74 | asset_list = experiment.get_asset_list()
75 | resume_string = opt.resume
76 |
77 | for asset in asset_list:
78 | if asset['fileName'] == 'opt.yaml':
79 | asset_id = asset['assetId']
80 | asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
81 | opt_dict = yaml.safe_load(asset_binary)
82 | for key, value in opt_dict.items():
83 | setattr(opt, key, value)
84 | opt.resume = resume_string
85 |
86 | # Save hyperparameters to YAML file
87 | # Necessary to pass checks in training script
88 | save_dir = f'{opt.project}/{experiment.name}'
89 | os.makedirs(save_dir, exist_ok=True)
90 |
91 | hyp_yaml_path = f'{save_dir}/hyp.yaml'
92 | with open(hyp_yaml_path, 'w') as f:
93 | yaml.dump(opt.hyp, f)
94 | opt.hyp = hyp_yaml_path
95 |
96 |
97 | def check_comet_weights(opt):
98 | """Downloads model weights from Comet and updates the
99 | weights path to point to saved weights location
100 |
101 | Args:
102 | opt (argparse.Namespace): Command Line arguments passed
103 | to YOLOv3 training script
104 |
105 | Returns:
106 | None/bool: Return True if weights are successfully downloaded
107 | else return None
108 | """
109 | if comet_ml is None:
110 | return
111 |
112 | if isinstance(opt.weights, str):
113 | if opt.weights.startswith(COMET_PREFIX):
114 | api = comet_ml.API()
115 | resource = urlparse(opt.weights)
116 | experiment_path = f'{resource.netloc}{resource.path}'
117 | experiment = api.get(experiment_path)
118 | download_model_checkpoint(opt, experiment)
119 | return True
120 |
121 | return None
122 |
123 |
124 | def check_comet_resume(opt):
125 | """Restores run parameters to its original state based on the model checkpoint
126 | and logged Experiment parameters.
127 |
128 | Args:
129 | opt (argparse.Namespace): Command Line arguments passed
130 | to YOLOv3 training script
131 |
132 | Returns:
133 | None/bool: Return True if the run is restored successfully
134 | else return None
135 | """
136 | if comet_ml is None:
137 | return
138 |
139 | if isinstance(opt.resume, str):
140 | if opt.resume.startswith(COMET_PREFIX):
141 | api = comet_ml.API()
142 | resource = urlparse(opt.resume)
143 | experiment_path = f'{resource.netloc}{resource.path}'
144 | experiment = api.get(experiment_path)
145 | set_opt_parameters(opt, experiment)
146 | download_model_checkpoint(opt, experiment)
147 |
148 | return True
149 |
150 | return None
151 |
--------------------------------------------------------------------------------
/yolov3/utils/loggers/comet/optimizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "algorithm": "random",
3 | "parameters": {
4 | "anchor_t": {
5 | "type": "discrete",
6 | "values": [
7 | 2,
8 | 8
9 | ]
10 | },
11 | "batch_size": {
12 | "type": "discrete",
13 | "values": [
14 | 16,
15 | 32,
16 | 64
17 | ]
18 | },
19 | "box": {
20 | "type": "discrete",
21 | "values": [
22 | 0.02,
23 | 0.2
24 | ]
25 | },
26 | "cls": {
27 | "type": "discrete",
28 | "values": [
29 | 0.2
30 | ]
31 | },
32 | "cls_pw": {
33 | "type": "discrete",
34 | "values": [
35 | 0.5
36 | ]
37 | },
38 | "copy_paste": {
39 | "type": "discrete",
40 | "values": [
41 | 1
42 | ]
43 | },
44 | "degrees": {
45 | "type": "discrete",
46 | "values": [
47 | 0,
48 | 45
49 | ]
50 | },
51 | "epochs": {
52 | "type": "discrete",
53 | "values": [
54 | 5
55 | ]
56 | },
57 | "fl_gamma": {
58 | "type": "discrete",
59 | "values": [
60 | 0
61 | ]
62 | },
63 | "fliplr": {
64 | "type": "discrete",
65 | "values": [
66 | 0
67 | ]
68 | },
69 | "flipud": {
70 | "type": "discrete",
71 | "values": [
72 | 0
73 | ]
74 | },
75 | "hsv_h": {
76 | "type": "discrete",
77 | "values": [
78 | 0
79 | ]
80 | },
81 | "hsv_s": {
82 | "type": "discrete",
83 | "values": [
84 | 0
85 | ]
86 | },
87 | "hsv_v": {
88 | "type": "discrete",
89 | "values": [
90 | 0
91 | ]
92 | },
93 | "iou_t": {
94 | "type": "discrete",
95 | "values": [
96 | 0.7
97 | ]
98 | },
99 | "lr0": {
100 | "type": "discrete",
101 | "values": [
102 | 1e-05,
103 | 0.1
104 | ]
105 | },
106 | "lrf": {
107 | "type": "discrete",
108 | "values": [
109 | 0.01,
110 | 1
111 | ]
112 | },
113 | "mixup": {
114 | "type": "discrete",
115 | "values": [
116 | 1
117 | ]
118 | },
119 | "momentum": {
120 | "type": "discrete",
121 | "values": [
122 | 0.6
123 | ]
124 | },
125 | "mosaic": {
126 | "type": "discrete",
127 | "values": [
128 | 0
129 | ]
130 | },
131 | "obj": {
132 | "type": "discrete",
133 | "values": [
134 | 0.2
135 | ]
136 | },
137 | "obj_pw": {
138 | "type": "discrete",
139 | "values": [
140 | 0.5
141 | ]
142 | },
143 | "optimizer": {
144 | "type": "categorical",
145 | "values": [
146 | "SGD",
147 | "Adam",
148 | "AdamW"
149 | ]
150 | },
151 | "perspective": {
152 | "type": "discrete",
153 | "values": [
154 | 0
155 | ]
156 | },
157 | "scale": {
158 | "type": "discrete",
159 | "values": [
160 | 0
161 | ]
162 | },
163 | "shear": {
164 | "type": "discrete",
165 | "values": [
166 | 0
167 | ]
168 | },
169 | "translate": {
170 | "type": "discrete",
171 | "values": [
172 | 0
173 | ]
174 | },
175 | "warmup_bias_lr": {
176 | "type": "discrete",
177 | "values": [
178 | 0,
179 | 0.2
180 | ]
181 | },
182 | "warmup_epochs": {
183 | "type": "discrete",
184 | "values": [
185 | 5
186 | ]
187 | },
188 | "warmup_momentum": {
189 | "type": "discrete",
190 | "values": [
191 | 0,
192 | 0.95
193 | ]
194 | },
195 | "weight_decay": {
196 | "type": "discrete",
197 | "values": [
198 | 0,
199 | 0.001
200 | ]
201 | }
202 | },
203 | "spec": {
204 | "maxCombo": 0,
205 | "metric": "metrics/mAP_0.5",
206 | "objective": "maximize"
207 | },
208 | "trials": 1
209 | }
210 |
--------------------------------------------------------------------------------
/yolov3/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/yolov3/utils/segment/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenImagingLab/AdaptiveISP/a6775e64d9c3768fc964ffcb00692c5e042111f1/yolov3/utils/segment/__init__.py
--------------------------------------------------------------------------------
/yolov3/utils/segment/augmentations.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Image augmentation functions
4 | """
5 |
6 | import math
7 | import random
8 |
9 | import cv2
10 | import numpy as np
11 |
12 | from ..augmentations import box_candidates
13 | from ..general import resample_segments, segment2box
14 |
15 |
16 | def mixup(im, labels, segments, im2, labels2, segments2):
17 | # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
18 | r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
19 | im = (im * r + im2 * (1 - r)).astype(np.uint8)
20 | labels = np.concatenate((labels, labels2), 0)
21 | segments = np.concatenate((segments, segments2), 0)
22 | return im, labels, segments
23 |
24 |
25 | def random_perspective(im,
26 | targets=(),
27 | segments=(),
28 | degrees=10,
29 | translate=.1,
30 | scale=.1,
31 | shear=10,
32 | perspective=0.0,
33 | border=(0, 0)):
34 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
35 | # targets = [cls, xyxy]
36 |
37 | height = im.shape[0] + border[0] * 2 # shape(h,w,c)
38 | width = im.shape[1] + border[1] * 2
39 |
40 | # Center
41 | C = np.eye(3)
42 | C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
43 | C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
44 |
45 | # Perspective
46 | P = np.eye(3)
47 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
48 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
49 |
50 | # Rotation and Scale
51 | R = np.eye(3)
52 | a = random.uniform(-degrees, degrees)
53 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
54 | s = random.uniform(1 - scale, 1 + scale)
55 | # s = 2 ** random.uniform(-scale, scale)
56 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
57 |
58 | # Shear
59 | S = np.eye(3)
60 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
61 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
62 |
63 | # Translation
64 | T = np.eye(3)
65 | T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
66 | T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
67 |
68 | # Combined rotation matrix
69 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
70 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
71 | if perspective:
72 | im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
73 | else: # affine
74 | im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
75 |
76 | # Visualize
77 | # import matplotlib.pyplot as plt
78 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
79 | # ax[0].imshow(im[:, :, ::-1]) # base
80 | # ax[1].imshow(im2[:, :, ::-1]) # warped
81 |
82 | # Transform label coordinates
83 | n = len(targets)
84 | new_segments = []
85 | if n:
86 | new = np.zeros((n, 4))
87 | segments = resample_segments(segments) # upsample
88 | for i, segment in enumerate(segments):
89 | xy = np.ones((len(segment), 3))
90 | xy[:, :2] = segment
91 | xy = xy @ M.T # transform
92 | xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine
93 |
94 | # clip
95 | new[i] = segment2box(xy, width, height)
96 | new_segments.append(xy)
97 |
98 | # filter candidates
99 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
100 | targets = targets[i]
101 | targets[:, 1:5] = new[i]
102 | new_segments = np.array(new_segments)[i]
103 |
104 | return im, targets, new_segments
105 |
--------------------------------------------------------------------------------
/yolov3/utils/triton.py:
--------------------------------------------------------------------------------
1 | # YOLOv3 🚀 by Ultralytics, AGPL-3.0 license
2 | """ Utils to interact with the Triton Inference Server
3 | """
4 |
5 | import typing
6 | from urllib.parse import urlparse
7 |
8 | import torch
9 |
10 |
11 | class TritonRemoteModel:
12 | """ A wrapper over a model served by the Triton Inference Server. It can
13 | be configured to communicate over GRPC or HTTP. It accepts Torch Tensors
14 | as input and returns them as outputs.
15 | """
16 |
17 | def __init__(self, url: str):
18 | """
19 | Keyword arguments:
20 | url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000
21 | """
22 |
23 | parsed_url = urlparse(url)
24 | if parsed_url.scheme == 'grpc':
25 | from tritonclient.grpc import InferenceServerClient, InferInput
26 |
27 | self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client
28 | model_repository = self.client.get_model_repository_index()
29 | self.model_name = model_repository.models[0].name
30 | self.metadata = self.client.get_model_metadata(self.model_name, as_json=True)
31 |
32 | def create_input_placeholders() -> typing.List[InferInput]:
33 | return [
34 | InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']]
35 |
36 | else:
37 | from tritonclient.http import InferenceServerClient, InferInput
38 |
39 | self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client
40 | model_repository = self.client.get_model_repository_index()
41 | self.model_name = model_repository[0]['name']
42 | self.metadata = self.client.get_model_metadata(self.model_name)
43 |
44 | def create_input_placeholders() -> typing.List[InferInput]:
45 | return [
46 | InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']]
47 |
48 | self._create_input_placeholders_fn = create_input_placeholders
49 |
50 | @property
51 | def runtime(self):
52 | """Returns the model runtime"""
53 | return self.metadata.get('backend', self.metadata.get('platform'))
54 |
55 | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]:
56 | """ Invokes the model. Parameters can be provided via args or kwargs.
57 | args, if provided, are assumed to match the order of inputs of the model.
58 | kwargs are matched with the model input names.
59 | """
60 | inputs = self._create_inputs(*args, **kwargs)
61 | response = self.client.infer(model_name=self.model_name, inputs=inputs)
62 | result = []
63 | for output in self.metadata['outputs']:
64 | tensor = torch.as_tensor(response.as_numpy(output['name']))
65 | result.append(tensor)
66 | return result[0] if len(result) == 1 else result
67 |
68 | def _create_inputs(self, *args, **kwargs):
69 | args_len, kwargs_len = len(args), len(kwargs)
70 | if not args_len and not kwargs_len:
71 | raise RuntimeError('No inputs provided.')
72 | if args_len and kwargs_len:
73 | raise RuntimeError('Cannot specify args and kwargs at the same time')
74 |
75 | placeholders = self._create_input_placeholders_fn()
76 | if args_len:
77 | if args_len != len(placeholders):
78 | raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.')
79 | for input, value in zip(placeholders, args):
80 | input.set_data_from_numpy(value.cpu().numpy())
81 | else:
82 | for input in placeholders:
83 | value = kwargs[input.name]
84 | input.set_data_from_numpy(value.cpu().numpy())
85 | return placeholders
86 |
--------------------------------------------------------------------------------