├── .dockerignore
├── .github
└── workflows
│ └── main.yaml
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── app.py
├── data
├── .gitkeep
└── inputImage.jpg
├── docs
└── .gitkeep
├── flowcharts
├── Data Ingetions.png
├── Data validation.png
├── Model Pusher.png
├── Model trainer.png
└── deployment.jpeg
├── requirements.txt
├── setup.py
├── signLanguage
├── __init__.py
├── components
│ ├── __init__.py
│ ├── data_ingestion.py
│ ├── data_validation.py
│ ├── model_pusher.py
│ └── model_trainer.py
├── configuration
│ ├── __init__.py
│ └── s3_operations.py
├── constant
│ ├── __init__.py
│ ├── application.py
│ └── training_pipeline
│ │ └── __init__.py
├── entity
│ ├── __init__.py
│ ├── artifact_entity.py
│ └── config_entity.py
├── exception
│ └── __init__.py
├── logger
│ └── __init__.py
├── pipeline
│ ├── __init__.py
│ └── training_pipeline.py
└── utils
│ ├── __init__.py
│ └── main_utils.py
├── template.py
├── templates
└── index.html
└── yolov5
├── .dockerignore
├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.yml
│ ├── config.yml
│ ├── feature-request.yml
│ └── question.yml
├── PULL_REQUEST_TEMPLATE.md
├── dependabot.yml
└── workflows
│ ├── ci-testing.yml
│ ├── codeql-analysis.yml
│ ├── docker.yml
│ ├── greetings.yml
│ ├── stale.yml
│ └── translate-readme.yml
├── .pre-commit-config.yaml
├── CITATION.cff
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── README.zh-CN.md
├── benchmarks.py
├── best.pt
├── classify
├── predict.py
├── train.py
├── tutorial.ipynb
└── val.py
├── data
├── Argoverse.yaml
├── GlobalWheat2020.yaml
├── ImageNet.yaml
├── Objects365.yaml
├── SKU-110K.yaml
├── VOC.yaml
├── VisDrone.yaml
├── coco.yaml
├── coco128-seg.yaml
├── coco128.yaml
├── hyps
│ ├── hyp.Objects365.yaml
│ ├── hyp.VOC.yaml
│ ├── hyp.no-augmentation.yaml
│ ├── hyp.scratch-high.yaml
│ ├── hyp.scratch-low.yaml
│ └── hyp.scratch-med.yaml
├── images
│ ├── bus.jpg
│ └── zidane.jpg
├── scripts
│ ├── download_weights.sh
│ ├── get_coco.sh
│ ├── get_coco128.sh
│ └── get_imagenet.sh
└── xView.yaml
├── detect.py
├── export.py
├── hubconf.py
├── models
├── __init__.py
├── common.py
├── custom_yolov5s.yaml
├── experimental.py
├── hub
│ ├── anchors.yaml
│ ├── yolov3-spp.yaml
│ ├── yolov3-tiny.yaml
│ ├── yolov3.yaml
│ ├── yolov5-bifpn.yaml
│ ├── yolov5-fpn.yaml
│ ├── yolov5-p2.yaml
│ ├── yolov5-p34.yaml
│ ├── yolov5-p6.yaml
│ ├── yolov5-p7.yaml
│ ├── yolov5-panet.yaml
│ ├── yolov5l6.yaml
│ ├── yolov5m6.yaml
│ ├── yolov5n6.yaml
│ ├── yolov5s-LeakyReLU.yaml
│ ├── yolov5s-ghost.yaml
│ ├── yolov5s-transformer.yaml
│ ├── yolov5s6.yaml
│ └── yolov5x6.yaml
├── segment
│ ├── yolov5l-seg.yaml
│ ├── yolov5m-seg.yaml
│ ├── yolov5n-seg.yaml
│ ├── yolov5s-seg.yaml
│ └── yolov5x-seg.yaml
├── tf.py
├── yolo.py
├── yolov5l.yaml
├── yolov5m.yaml
├── yolov5n.yaml
├── yolov5s.yaml
└── yolov5x.yaml
├── my_model.pt
├── requirements.txt
├── segment
├── predict.py
├── train.py
├── tutorial.ipynb
└── val.py
├── setup.cfg
├── train.py
├── tutorial.ipynb
├── utils
├── __init__.py
├── activations.py
├── augmentations.py
├── autoanchor.py
├── autobatch.py
├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
├── callbacks.py
├── dataloaders.py
├── docker
│ ├── Dockerfile
│ ├── Dockerfile-arm64
│ └── Dockerfile-cpu
├── downloads.py
├── flask_rest_api
│ ├── README.md
│ ├── example_request.py
│ └── restapi.py
├── general.py
├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
├── loggers
│ ├── __init__.py
│ ├── clearml
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── clearml_utils.py
│ │ └── hpo.py
│ ├── comet
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── comet_utils.py
│ │ ├── hpo.py
│ │ └── optimizer_config.json
│ └── wandb
│ │ ├── __init__.py
│ │ └── wandb_utils.py
├── loss.py
├── metrics.py
├── plots.py
├── segment
│ ├── __init__.py
│ ├── augmentations.py
│ ├── dataloaders.py
│ ├── general.py
│ ├── loss.py
│ ├── metrics.py
│ └── plots.py
├── torch_utils.py
└── triton.py
├── val.py
└── yolov5s.pt
/.dockerignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/.dockerignore
--------------------------------------------------------------------------------
/.github/workflows/main.yaml:
--------------------------------------------------------------------------------
1 | name: workflow
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths-ignore:
8 | - 'README.md'
9 |
10 | permissions:
11 | id-token: write
12 | contents: read
13 |
14 | jobs:
15 | integration:
16 | name: Continuous Integration
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: Checkout Code
20 | uses: actions/checkout@v3
21 |
22 | - name: Lint code
23 | run: echo "Linting repository"
24 |
25 | - name: Run unit tests
26 | run: echo "Running unit tests"
27 |
28 | build-and-push-ecr-image:
29 | name: Continuous Delivery
30 | needs: integration
31 | runs-on: ubuntu-latest
32 | steps:
33 | - name: Checkout Code
34 | uses: actions/checkout@v3
35 |
36 | - name: Install Utilities
37 | run: |
38 | sudo apt-get update
39 | sudo apt-get install -y jq unzip
40 | - name: Configure AWS credentials
41 | uses: aws-actions/configure-aws-credentials@v1
42 | with:
43 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
44 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
45 | aws-region: ${{ secrets.AWS_REGION }}
46 |
47 | - name: Login to Amazon ECR
48 | id: login-ecr
49 | uses: aws-actions/amazon-ecr-login@v1
50 |
51 | - name: Build, tag, and push image to Amazon ECR
52 | id: build-image
53 | env:
54 | ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
55 | ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY_NAME }}
56 | IMAGE_TAG: latest
57 | run: |
58 | # Build a docker container and
59 | # push it to ECR so that it can
60 | # be deployed to ECS.
61 | docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG .
62 | docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
63 | echo "::set-output name=image::$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG"
64 |
65 |
66 | Continuous-Deployment:
67 | needs: build-and-push-ecr-image
68 | runs-on: self-hosted
69 | steps:
70 | - name: Checkout
71 | uses: actions/checkout@v3
72 |
73 | - name: Configure AWS credentials
74 | uses: aws-actions/configure-aws-credentials@v1
75 | with:
76 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
77 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
78 | aws-region: ${{ secrets.AWS_REGION }}
79 |
80 | - name: Login to Amazon ECR
81 | id: login-ecr
82 | uses: aws-actions/amazon-ecr-login@v1
83 |
84 |
85 | - name: Pull latest images
86 | run: |
87 | docker pull ${{secrets.AWS_ECR_LOGIN_URI}}/${{ secrets.ECR_REPOSITORY_NAME }}:latest
88 |
89 | # - name: Stop and remove container if running
90 | # run: |
91 | # docker ps -q --filter "name=sign" | grep -q . && docker stop sign && docker rm -fv sign
92 |
93 | - name: Run Docker Image to serve users
94 | run: |
95 | docker run -d -p 8080:8080 --ipc="host" --name=sign -e 'AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }}' -e 'AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}' -e 'AWS_REGION=${{ secrets.AWS_REGION }}' ${{secrets.AWS_ECR_LOGIN_URI}}/${{ secrets.ECR_REPOSITORY_NAME }}:latest
96 | - name: Clean previous images and containers
97 | run: |
98 | docker system prune -f
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 | artifacts/*
131 | Sign_language_data.zip
132 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-slim-buster
2 | WORKDIR /app
3 | COPY . /app
4 |
5 | RUN apt update -y && apt install awscli -y
6 |
7 | RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 unzip -y && pip install -r requirements.txt
8 | CMD ["python3", "app.py"]
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 BAPPY AHMED
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # End-to-end-Sign-Language-Detection
2 |
3 | 1. constants
4 | 2. entity
5 | 3. components
6 | 4. pipelines
7 | 5. app.py
8 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import sys,os
2 | from signLanguage.pipeline.training_pipeline import TrainPipeline
3 | from signLanguage.exception import SignException
4 | from signLanguage.utils.main_utils import decodeImage, encodeImageIntoBase64
5 | from flask import Flask, request, jsonify, render_template,Response
6 | from flask_cors import CORS, cross_origin
7 | from signLanguage.constant.application import APP_HOST, APP_PORT
8 |
9 |
10 |
11 | app = Flask(__name__)
12 | CORS(app)
13 |
14 | class ClientApp:
15 | def __init__(self):
16 | self.filename = "inputImage.jpg"
17 |
18 |
19 |
20 |
21 | @app.route("/train")
22 | def trainRoute():
23 | obj = TrainPipeline()
24 | obj.run_pipeline()
25 | return "Training Successfull!!"
26 |
27 |
28 |
29 | @app.route("/")
30 | def home():
31 | return render_template("index.html")
32 |
33 |
34 |
35 | @app.route("/predict", methods=['POST','GET'])
36 | @cross_origin()
37 | def predictRoute():
38 | try:
39 | image = request.json['image']
40 | decodeImage(image, clApp.filename)
41 |
42 | os.system("cd yolov5/ && python detect.py --weights my_model.pt --img 416 --conf 0.5 --source ../data/inputImage.jpg")
43 |
44 | opencodedbase64 = encodeImageIntoBase64("yolov5/runs/detect/exp/inputImage.jpg")
45 | result = {"image": opencodedbase64.decode('utf-8')}
46 | os.system("rm -rf yolov5/runs")
47 |
48 | except ValueError as val:
49 | print(val)
50 | return Response("Value not found inside json data")
51 | except KeyError:
52 | return Response("Key value error incorrect key passed")
53 | except Exception as e:
54 | print(e)
55 | result = "Invalid input"
56 |
57 | return jsonify(result)
58 |
59 |
60 |
61 |
62 | @app.route("/live", methods=['GET'])
63 | @cross_origin()
64 | def predictLive():
65 | try:
66 | os.system("cd yolov5/ && python detect.py --weights my_model.pt --img 416 --conf 0.5 --source 0")
67 | os.system("rm -rf yolov5/runs")
68 | return "Camera starting!!"
69 |
70 | except ValueError as val:
71 | print(val)
72 | return Response("Value not found inside json data")
73 |
74 |
75 |
76 |
77 |
78 |
79 | if __name__ == "__main__":
80 | clApp = ClientApp()
81 | app.run(host=APP_HOST, port=APP_PORT)
82 |
83 |
--------------------------------------------------------------------------------
/data/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/data/.gitkeep
--------------------------------------------------------------------------------
/data/inputImage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/data/inputImage.jpg
--------------------------------------------------------------------------------
/docs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/docs/.gitkeep
--------------------------------------------------------------------------------
/flowcharts/Data Ingetions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/flowcharts/Data Ingetions.png
--------------------------------------------------------------------------------
/flowcharts/Data validation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/flowcharts/Data validation.png
--------------------------------------------------------------------------------
/flowcharts/Model Pusher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/flowcharts/Model Pusher.png
--------------------------------------------------------------------------------
/flowcharts/Model trainer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/flowcharts/Model trainer.png
--------------------------------------------------------------------------------
/flowcharts/deployment.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/flowcharts/deployment.jpeg
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | dill==0.3.5.1
2 | from-root==1.0.2
3 | notebook==7.0.0a7
4 | boto3
5 | mypy-boto3-s3
6 | flask-cors
7 | flask
8 |
9 | # YOLOv5 requirements
10 | # Usage: pip install -r requirements.txt
11 |
12 | # Base ----------------------------------------
13 | matplotlib>=3.2.2
14 | numpy>=1.18.5
15 | opencv-python>=4.1.1
16 | Pillow>=7.1.2
17 | PyYAML>=5.3.1
18 | requests>=2.23.0
19 | scipy>=1.4.1
20 | torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended)
21 | torchvision>=0.8.1
22 | tqdm>=4.64.0
23 | # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
24 |
25 | # Logging -------------------------------------
26 | tensorboard>=2.4.1
27 | # clearml>=1.2.0
28 | # comet
29 |
30 | # Plotting ------------------------------------
31 | pandas>=1.1.4
32 | seaborn>=0.11.0
33 |
34 | # Export --------------------------------------
35 | # coremltools>=6.0 # CoreML export
36 | # onnx>=1.9.0 # ONNX export
37 | # onnx-simplifier>=0.4.1 # ONNX simplifier
38 | # nvidia-pyindex # TensorRT export
39 | # nvidia-tensorrt # TensorRT export
40 | # scikit-learn<=1.1.2 # CoreML quantization
41 | # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
42 | # tensorflowjs>=3.9.0 # TF.js export
43 | # openvino-dev # OpenVINO export
44 |
45 | # Deploy --------------------------------------
46 | # tritonclient[all]~=2.24.0
47 |
48 | # Extras --------------------------------------
49 | ipython # interactive notebook
50 | psutil # system utilization
51 | thop>=0.1.1 # FLOPs computation
52 | # mss # screenshots
53 | # albumentations>=1.0.3
54 | # pycocotools>=2.0 # COCO mAP
55 | # roboflow
56 |
57 |
58 | -e .
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 | setup(
4 | name = 'signLanguages',
5 | version= '0.0.0',
6 | author= 'iNeuron',
7 | author_email= 'boktiar@ineuron.ai',
8 | packages= find_packages(),
9 | install_requires = []
10 |
11 | )
--------------------------------------------------------------------------------
/signLanguage/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/__init__.py
--------------------------------------------------------------------------------
/signLanguage/components/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/components/__init__.py
--------------------------------------------------------------------------------
/signLanguage/components/data_ingestion.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from six.moves import urllib
4 | import zipfile
5 | from signLanguage.logger import logging
6 | from signLanguage.exception import SignException
7 | from signLanguage.entity.config_entity import DataIngestionConfig
8 | from signLanguage.entity.artifact_entity import DataIngestionArtifact
9 |
10 |
11 |
12 | class DataIngestion:
13 | def __init__(self, data_ingestion_config: DataIngestionConfig = DataIngestionConfig()):
14 | try:
15 | self.data_ingestion_config = data_ingestion_config
16 | except Exception as e:
17 | raise SignException(e, sys)
18 |
19 |
20 |
21 | def download_data(self)-> str:
22 | '''
23 | Fetch data from the url
24 | '''
25 |
26 | try:
27 | dataset_url = self.data_ingestion_config.data_download_url
28 | zip_download_dir = self.data_ingestion_config.data_ingestion_dir
29 | os.makedirs(zip_download_dir, exist_ok=True)
30 | data_file_name = os.path.basename(dataset_url)
31 | zip_file_path = os.path.join(zip_download_dir, data_file_name)
32 | logging.info(f"Downloading data from {dataset_url} into file {zip_file_path}")
33 | urllib.request.urlretrieve(dataset_url, zip_file_path)
34 | logging.info(f"Downloaded data from {dataset_url} into file {zip_file_path}")
35 | return zip_file_path
36 |
37 | except Exception as e:
38 | raise SignException(e, sys)
39 |
40 |
41 |
42 | def extract_zip_file(self,zip_file_path: str)-> str:
43 | """
44 | zip_file_path: str
45 | Extracts the zip file into the data directory
46 | Function returns None
47 | """
48 | try:
49 | feature_store_path = self.data_ingestion_config.feature_store_file_path
50 | os.makedirs(feature_store_path, exist_ok=True)
51 | with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
52 | zip_ref.extractall(feature_store_path)
53 | logging.info(f"Extracting zip file: {zip_file_path} into dir: {feature_store_path}")
54 |
55 | return feature_store_path
56 |
57 | except Exception as e:
58 | raise SignException(e, sys)
59 |
60 |
61 |
62 | def initiate_data_ingestion(self)-> DataIngestionArtifact:
63 | logging.info("Entered initiate_data_ingestion method of Data_Ingestion class")
64 | try:
65 | zip_file_path = self.download_data()
66 | feature_store_path = self.extract_zip_file(zip_file_path)
67 |
68 | data_ingestion_artifact = DataIngestionArtifact(
69 | data_zip_file_path = zip_file_path,
70 | feature_store_path = feature_store_path
71 | )
72 |
73 | logging.info("Exited initiate_data_ingestion method of Data_Ingestion class")
74 | logging.info(f"Data ingestion artifact: {data_ingestion_artifact}")
75 |
76 | return data_ingestion_artifact
77 |
78 | except Exception as e:
79 | raise SignException(e, sys)
80 |
81 |
--------------------------------------------------------------------------------
/signLanguage/components/data_validation.py:
--------------------------------------------------------------------------------
1 | import os,sys
2 | import shutil
3 | from signLanguage.logger import logging
4 | from signLanguage.exception import SignException
5 | from signLanguage.entity.config_entity import DataValidationConfig
6 | from signLanguage.entity.artifact_entity import (DataIngestionArtifact,
7 | DataValidationArtifact)
8 |
9 |
10 |
11 |
12 | class DataValidation:
13 | def __init__(
14 | self,
15 | data_ingestion_artifact: DataIngestionArtifact,
16 | data_validation_config: DataValidationConfig,
17 | ):
18 | try:
19 | self.data_ingestion_artifact = data_ingestion_artifact
20 | self.data_validation_config = data_validation_config
21 |
22 | except Exception as e:
23 | raise SignException(e, sys)
24 |
25 |
26 |
27 | def validate_all_files_exist(self)-> bool:
28 | try:
29 | validation_status = None
30 |
31 | all_files = os.listdir(self.data_ingestion_artifact.feature_store_path)
32 |
33 | for file in all_files:
34 | if file not in self.data_validation_config.required_file_list:
35 | validation_status = False
36 | os.makedirs(self.data_validation_config.data_validation_dir, exist_ok=True)
37 | with open(self.data_validation_config.valid_status_file_dir, 'w') as f:
38 | f.write(f"Validation status: {validation_status}")
39 | else:
40 | validation_status = True
41 | os.makedirs(self.data_validation_config.data_validation_dir, exist_ok=True)
42 | with open(self.data_validation_config.valid_status_file_dir, 'w') as f:
43 | f.write(f"Validation status: {validation_status}")
44 |
45 | return validation_status
46 |
47 |
48 | except Exception as e:
49 | raise SignException(e, sys)
50 |
51 |
52 |
53 | def initiate_data_validation(self) -> DataValidationArtifact:
54 | logging.info("Entered initiate_data_validation method of DataValidation class")
55 | try:
56 | status = self.validate_all_files_exist()
57 | data_validation_artifact = DataValidationArtifact(
58 | validation_status=status)
59 |
60 | logging.info("Exited initiate_data_validation method of DataValidation class")
61 | logging.info(f"Data validation artifact: {data_validation_artifact}")
62 |
63 | if status:
64 | shutil.copy(self.data_ingestion_artifact.data_zip_file_path, os.getcwd())
65 |
66 | return data_validation_artifact
67 |
68 | except Exception as e:
69 | raise SignException(e, sys)
70 |
71 |
--------------------------------------------------------------------------------
/signLanguage/components/model_pusher.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from signLanguage.configuration.s3_operations import S3Operation
3 | from signLanguage.entity.artifact_entity import (
4 | ModelPusherArtifacts,
5 | ModelTrainerArtifact
6 | )
7 | from signLanguage.entity.config_entity import ModelPusherConfig
8 | from signLanguage.exception import SignException
9 | from signLanguage.logger import logging
10 |
11 |
12 |
13 | class ModelPusher:
14 | def __init__(self,model_pusher_config: ModelPusherConfig,model_trainer_artifact: ModelTrainerArtifact, s3: S3Operation):
15 |
16 | self.model_pusher_config = model_pusher_config
17 | self.model_trainer_artifacts = model_trainer_artifact
18 | self.s3 = s3
19 |
20 |
21 |
22 | def initiate_model_pusher(self) -> ModelPusherArtifacts:
23 |
24 | """
25 | Method Name : initiate_model_pusher
26 |
27 | Description : This method initiates model pusher.
28 |
29 | Output : Model pusher artifact
30 | """
31 | logging.info("Entered initiate_model_pusher method of Modelpusher class")
32 | try:
33 | # Uploading the best model to s3 bucket
34 | self.s3.upload_file(
35 | self.model_trainer_artifacts.trained_model_file_path,
36 | self.model_pusher_config.S3_MODEL_KEY_PATH,
37 | self.model_pusher_config.BUCKET_NAME,
38 | remove=False,
39 | )
40 | logging.info("Uploaded best model to s3 bucket")
41 | logging.info("Exited initiate_model_pusher method of ModelTrainer class")
42 |
43 | # Saving the model pusher artifacts
44 | model_pusher_artifact = ModelPusherArtifacts(
45 | bucket_name=self.model_pusher_config.BUCKET_NAME,
46 | s3_model_path=self.model_pusher_config.S3_MODEL_KEY_PATH,
47 | )
48 |
49 | return model_pusher_artifact
50 |
51 | except Exception as e:
52 | raise SignException(e, sys) from e
53 |
54 |
--------------------------------------------------------------------------------
/signLanguage/components/model_trainer.py:
--------------------------------------------------------------------------------
1 | import os,sys
2 | import yaml
3 | from signLanguage.utils.main_utils import read_yaml_file
4 | from signLanguage.logger import logging
5 | from signLanguage.exception import SignException
6 | from signLanguage.entity.config_entity import ModelTrainerConfig
7 | from signLanguage.entity.artifact_entity import ModelTrainerArtifact
8 |
9 |
10 | class ModelTrainer:
11 | def __init__(
12 | self,
13 | model_trainer_config: ModelTrainerConfig,
14 | ):
15 | self.model_trainer_config = model_trainer_config
16 |
17 |
18 |
19 | def initiate_model_trainer(self,) -> ModelTrainerArtifact:
20 | logging.info("Entered initiate_model_trainer method of ModelTrainer class")
21 |
22 | try:
23 | logging.info("Unzipping data")
24 | os.system("unzip Sign_language_data.zip")
25 | os.system("rm Sign_language_data.zip")
26 |
27 | with open("data.yaml", 'r') as stream:
28 | num_classes = str(yaml.safe_load(stream)['nc'])
29 |
30 | model_config_file_name = self.model_trainer_config.weight_name.split(".")[0]
31 | print(model_config_file_name)
32 |
33 | config = read_yaml_file(f"yolov5/models/{model_config_file_name}.yaml")
34 |
35 | config['nc'] = int(num_classes)
36 |
37 |
38 | with open(f'yolov5/models/custom_{model_config_file_name}.yaml', 'w') as f:
39 | yaml.dump(config, f)
40 |
41 | os.system(f"cd yolov5/ && python train.py --img 416 --batch {self.model_trainer_config.batch_size} --epochs {self.model_trainer_config.no_epochs} --data ../data.yaml --cfg ./models/custom_yolov5s.yaml --weights {self.model_trainer_config.weight_name} --name yolov5s_results --cache")
42 | os.system("cp yolov5/runs/train/yolov5s_results/weights/best.pt yolov5/")
43 | os.makedirs(self.model_trainer_config.model_trainer_dir, exist_ok=True)
44 | os.system(f"cp yolov5/runs/train/yolov5s_results/weights/best.pt {self.model_trainer_config.model_trainer_dir}/")
45 |
46 | os.system("rm -rf yolov5/runs")
47 | os.system("rm -rf train")
48 | os.system("rm -rf test")
49 | os.system("rm -rf data.yaml")
50 |
51 | model_trainer_artifact = ModelTrainerArtifact(
52 | trained_model_file_path="yolov5/best.pt",
53 | )
54 |
55 | logging.info("Exited initiate_model_trainer method of ModelTrainer class")
56 | logging.info(f"Model trainer artifact: {model_trainer_artifact}")
57 |
58 | return model_trainer_artifact
59 |
60 |
61 | except Exception as e:
62 | raise SignException(e, sys)
63 |
64 |
65 |
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/signLanguage/configuration/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/configuration/__init__.py
--------------------------------------------------------------------------------
/signLanguage/constant/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/constant/__init__.py
--------------------------------------------------------------------------------
/signLanguage/constant/application.py:
--------------------------------------------------------------------------------
1 | APP_HOST = "0.0.0.0"
2 | APP_PORT = 8080
--------------------------------------------------------------------------------
/signLanguage/constant/training_pipeline/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | ARTIFACTS_DIR: str = "artifacts"
4 |
5 |
6 | """
7 | Data Ingestion related constant start with DATA_INGESTION VAR NAME
8 | """
9 | DATA_INGESTION_DIR_NAME: str = "data_ingestion"
10 |
11 | DATA_INGESTION_FEATURE_STORE_DIR: str = "feature_store"
12 |
13 | DATA_DOWNLOAD_URL: str = "https://github.com/entbappy/Branching-tutorial/raw/master/Sign_language_data.zip"
14 |
15 |
16 |
17 | """
18 | Data Validation realted contant start with DATA_VALIDATION VAR NAME
19 | """
20 |
21 | DATA_VALIDATION_DIR_NAME: str = "data_validation"
22 |
23 | DATA_VALIDATION_STATUS_FILE = 'status.txt'
24 |
25 | DATA_VALIDATION_ALL_REQUIRED_FILES = ["train", "test", "data.yaml"]
26 |
27 |
28 |
29 | """
30 | MODEL TRAINER related constant start with MODEL_TRAINER var name
31 | """
32 | MODEL_TRAINER_DIR_NAME: str = "model_trainer"
33 |
34 | MODEL_TRAINER_PRETRAINED_WEIGHT_NAME: str = "yolov5s.pt"
35 |
36 | MODEL_TRAINER_NO_EPOCHS: int = 1
37 |
38 | MODEL_TRAINER_BATCH_SIZE: int = 16
39 |
40 |
41 |
42 | """
43 | MODEL PUSHER related constant start with MODEL_PUSHER var name
44 | """
45 | BUCKET_NAME = "sign-lang-23"
46 | S3_MODEL_NAME = "best.pt"
47 |
--------------------------------------------------------------------------------
/signLanguage/entity/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/entity/__init__.py
--------------------------------------------------------------------------------
/signLanguage/entity/artifact_entity.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 |
3 | @dataclass
4 | class DataIngestionArtifact:
5 | data_zip_file_path:str
6 | feature_store_path:str
7 |
8 |
9 | @dataclass
10 | class DataValidationArtifact:
11 | validation_status: bool
12 |
13 |
14 |
15 | @dataclass
16 | class ModelTrainerArtifact:
17 | trained_model_file_path: str
18 |
19 |
20 |
21 |
22 | @dataclass
23 | class ModelPusherArtifacts:
24 | bucket_name: str
25 | s3_model_path: str
26 |
--------------------------------------------------------------------------------
/signLanguage/entity/config_entity.py:
--------------------------------------------------------------------------------
1 | import os
2 | from dataclasses import dataclass
3 | from datetime import datetime
4 | from signLanguage.constant.training_pipeline import *
5 |
6 | TIMESTAMP: str = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
7 |
8 | @dataclass
9 | class TrainingPipelineConfig:
10 | artifacts_dir: str = os.path.join(ARTIFACTS_DIR,TIMESTAMP)
11 |
12 |
13 |
14 | training_pipeline_config:TrainingPipelineConfig = TrainingPipelineConfig()
15 |
16 |
17 |
18 | @dataclass
19 | class DataIngestionConfig:
20 | data_ingestion_dir: str = os.path.join(
21 | training_pipeline_config.artifacts_dir, DATA_INGESTION_DIR_NAME
22 | )
23 |
24 | feature_store_file_path: str = os.path.join(
25 | data_ingestion_dir, DATA_INGESTION_FEATURE_STORE_DIR
26 | )
27 |
28 | data_download_url: str = DATA_DOWNLOAD_URL
29 |
30 |
31 |
32 | @dataclass
33 | class DataValidationConfig:
34 | data_validation_dir: str = os.path.join(
35 | training_pipeline_config.artifacts_dir, DATA_VALIDATION_DIR_NAME
36 | )
37 |
38 | valid_status_file_dir: str = os.path.join(data_validation_dir, DATA_VALIDATION_STATUS_FILE)
39 |
40 | required_file_list = DATA_VALIDATION_ALL_REQUIRED_FILES
41 |
42 |
43 |
44 |
45 | @dataclass
46 | class ModelTrainerConfig:
47 | model_trainer_dir: str = os.path.join(
48 | training_pipeline_config.artifacts_dir, MODEL_TRAINER_DIR_NAME
49 | )
50 |
51 | weight_name = MODEL_TRAINER_PRETRAINED_WEIGHT_NAME
52 |
53 | no_epochs = MODEL_TRAINER_NO_EPOCHS
54 |
55 | batch_size = MODEL_TRAINER_BATCH_SIZE
56 |
57 |
58 |
59 | @dataclass
60 | class ModelPusherConfig:
61 | BUCKET_NAME: str = BUCKET_NAME
62 | S3_MODEL_KEY_PATH: str = S3_MODEL_NAME
--------------------------------------------------------------------------------
/signLanguage/exception/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 |
4 | def error_message_detail(error, error_detail: sys):
5 | _, _, exc_tb = error_detail.exc_info()
6 |
7 | file_name = exc_tb.tb_frame.f_code.co_filename
8 |
9 | error_message = "Error occurred python script name [{0}] line number [{1}] error message [{2}]".format(
10 | file_name, exc_tb.tb_lineno, str(error)
11 | )
12 |
13 | return error_message
14 |
15 |
16 | class SignException(Exception):
17 | def __init__(self, error_message, error_detail):
18 | """
19 | :param error_message: error message in string format
20 | """
21 | super().__init__(error_message)
22 |
23 | self.error_message = error_message_detail(
24 | error_message, error_detail=error_detail
25 | )
26 |
27 | def __str__(self):
28 | return self.error_message
29 |
30 |
31 |
--------------------------------------------------------------------------------
/signLanguage/logger/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from datetime import datetime
4 | from from_root import from_root
5 |
6 |
7 | LOG_FILE = f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S')}.log"
8 |
9 | log_path = os.path.join(from_root(), 'log', LOG_FILE)
10 |
11 | os.makedirs(log_path, exist_ok=True)
12 |
13 | lOG_FILE_PATH = os.path.join(log_path, LOG_FILE)
14 |
15 | logging.basicConfig(
16 | filename=lOG_FILE_PATH,
17 | format= "[ %(asctime)s ] %(name)s - %(levelname)s - %(message)s",
18 | level= logging.INFO
19 | )
--------------------------------------------------------------------------------
/signLanguage/pipeline/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/pipeline/__init__.py
--------------------------------------------------------------------------------
/signLanguage/pipeline/training_pipeline.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | from signLanguage.logger import logging
3 | from signLanguage.exception import SignException
4 | from signLanguage.components.data_ingestion import DataIngestion
5 | from signLanguage.components.data_validation import DataValidation
6 | from signLanguage.components.model_trainer import ModelTrainer
7 | from signLanguage.components.model_pusher import ModelPusher
8 | from signLanguage.configuration.s3_operations import S3Operation
9 |
10 | from signLanguage.entity.config_entity import (DataIngestionConfig,
11 | DataValidationConfig,
12 | ModelTrainerConfig,
13 | ModelPusherConfig)
14 |
15 | from signLanguage.entity.artifact_entity import (DataIngestionArtifact,
16 | DataValidationArtifact,
17 | ModelTrainerArtifact,
18 | ModelPusherArtifacts)
19 |
20 |
21 | class TrainPipeline:
22 | def __init__(self):
23 | self.data_ingestion_config = DataIngestionConfig()
24 | self.data_validation_config = DataValidationConfig()
25 | self.model_trainer_config = ModelTrainerConfig()
26 | self.model_pusher_config = ModelPusherConfig()
27 | self.s3_operations = S3Operation()
28 |
29 |
30 |
31 | def start_data_ingestion(self)-> DataIngestionArtifact:
32 | try:
33 | logging.info(
34 | "Entered the start_data_ingestion method of TrainPipeline class"
35 | )
36 | logging.info("Getting the data from URL")
37 |
38 | data_ingestion = DataIngestion(
39 | data_ingestion_config = self.data_ingestion_config
40 | )
41 |
42 | data_ingestion_artifact = data_ingestion.initiate_data_ingestion()
43 | logging.info("Got the data from URL")
44 | logging.info(
45 | "Exited the start_data_ingestion method of TrainPipeline class"
46 | )
47 |
48 | return data_ingestion_artifact
49 |
50 | except Exception as e:
51 | raise SignException(e, sys)
52 |
53 |
54 |
55 | def start_data_validation(
56 | self, data_ingestion_artifact: DataIngestionArtifact
57 | ) -> DataValidationArtifact:
58 | logging.info("Entered the start_data_validation method of TrainPipeline class")
59 |
60 | try:
61 | data_validation = DataValidation(
62 | data_ingestion_artifact=data_ingestion_artifact,
63 | data_validation_config=self.data_validation_config,
64 | )
65 |
66 | data_validation_artifact = data_validation.initiate_data_validation()
67 |
68 | logging.info("Performed the data validation operation")
69 |
70 | logging.info(
71 | "Exited the start_data_validation method of TrainPipeline class"
72 | )
73 |
74 | return data_validation_artifact
75 |
76 | except Exception as e:
77 | raise SignException(e, sys) from e
78 |
79 |
80 |
81 | def start_model_trainer(self
82 | ) -> ModelTrainerArtifact:
83 | try:
84 | model_trainer = ModelTrainer(
85 | model_trainer_config=self.model_trainer_config,
86 | )
87 | model_trainer_artifact = model_trainer.initiate_model_trainer()
88 | return model_trainer_artifact
89 |
90 | except Exception as e:
91 | raise SignException(e, sys)
92 |
93 |
94 |
95 | def start_model_pusher(self, model_trainer_artifact: ModelTrainerArtifact, s3: S3Operation):
96 |
97 | try:
98 | model_pusher = ModelPusher(
99 | model_pusher_config=self.model_pusher_config,
100 | model_trainer_artifact= model_trainer_artifact,
101 | s3=s3
102 |
103 | )
104 | model_pusher_artifact = model_pusher.initiate_model_pusher()
105 | return model_pusher_artifact
106 | except Exception as e:
107 | raise SignException(e, sys)
108 |
109 |
110 |
111 |
112 |
113 | def run_pipeline(self) -> None:
114 | try:
115 | data_ingestion_artifact = self.start_data_ingestion()
116 | data_validation_artifact = self.start_data_validation(
117 | data_ingestion_artifact=data_ingestion_artifact
118 | )
119 | if data_validation_artifact.validation_status == True:
120 | model_trainer_artifact = self.start_model_trainer()
121 | #model_pusher_artifact = self.start_model_pusher(model_trainer_artifact=model_trainer_artifact,s3=self.s3_operations)
122 |
123 |
124 | else:
125 | raise Exception("Your data is not in correct format")
126 |
127 |
128 | except Exception as e:
129 | raise SignException(e, sys)
130 |
--------------------------------------------------------------------------------
/signLanguage/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/signLanguage/utils/__init__.py
--------------------------------------------------------------------------------
/signLanguage/utils/main_utils.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import sys
3 | import yaml
4 | import base64
5 |
6 | from signLanguage.exception import SignException
7 | from signLanguage.logger import logging
8 |
9 |
10 | def read_yaml_file(file_path: str) -> dict:
11 | try:
12 | with open(file_path, "rb") as yaml_file:
13 | logging.info("Read yaml file successfully")
14 | return yaml.safe_load(yaml_file)
15 |
16 | except Exception as e:
17 | raise SignException(e, sys) from e
18 |
19 |
20 | def write_yaml_file(file_path: str, content: object, replace: bool = False) -> None:
21 | try:
22 | if replace:
23 | if os.path.exists(file_path):
24 | os.remove(file_path)
25 |
26 | os.makedirs(os.path.dirname(file_path), exist_ok=True)
27 |
28 | with open(file_path, "w") as file:
29 | yaml.dump(content, file)
30 | logging.info("Successfully write_yaml_file")
31 |
32 | except Exception as e:
33 | raise SignException(e, sys)
34 |
35 |
36 |
37 |
38 | def decodeImage(imgstring, fileName):
39 | imgdata = base64.b64decode(imgstring)
40 | with open("./data/" + fileName, 'wb') as f:
41 | f.write(imgdata)
42 | f.close()
43 |
44 |
45 | def encodeImageIntoBase64(croppedImagePath):
46 | with open(croppedImagePath, "rb") as f:
47 | return base64.b64encode(f.read())
48 |
--------------------------------------------------------------------------------
/template.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | import logging
4 |
5 | logging.basicConfig(level=logging.INFO, format='[%(asctime)s]: %(message)s:')
6 |
7 |
8 | project_name = "signLanguage"
9 |
10 |
11 | list_of_files = [
12 | ".github/workflows/.gitkeep",
13 | "data/.gitkeep",
14 | "docs/.gitkeep",
15 | f"{project_name}/__init__.py",
16 | f"{project_name}/components/__init__.py",
17 | f"{project_name}/components/data_ingestion.py",
18 | f"{project_name}/components/data_validation.py",
19 | f"{project_name}/components/model_trainer.py",
20 | f"{project_name}/components/model_pusher.py",
21 | f"{project_name}/configuration/__init__.py",
22 | f"{project_name}/configuration/s3_operations.py",
23 | f"{project_name}/constant/__init__.py",
24 | f"{project_name}/constant/training_pipeline/__init__.py",
25 | f"{project_name}/constant/application.py",
26 | f"{project_name}/entity/__init__.py",
27 | f"{project_name}/entity/artifacts_entity.py",
28 | f"{project_name}/entity/config_entity.py",
29 | f"{project_name}/exception/__init__.py",
30 | f"{project_name}/logger/__init__.py",
31 | f"{project_name}/pipeline/__init__.py",
32 | f"{project_name}/pipeline/training_pipeline.py",
33 | f"{project_name}/utils/__init__.py",
34 | f"{project_name}/utils/main_utils.py",
35 | "template/index.html",
36 | ".dockerignore",
37 | "app.py",
38 | "Dockerfile",
39 | "requirements.txt",
40 | "setup.py"
41 |
42 |
43 | ]
44 |
45 |
46 | for filepath in list_of_files:
47 | filepath = Path(filepath)
48 |
49 | filedir, filename = os.path.split(filepath)
50 |
51 | if filedir !="":
52 | os.makedirs(filedir, exist_ok=True)
53 | logging.info(f"Creating directory: {filedir} for the file {filename}")
54 |
55 |
56 | if(not os.path.exists(filename)) or (os.path.getsize(filename) == 0):
57 | with open(filepath, 'w') as f:
58 | pass
59 | logging.info(f"Creating empty file: {filename}")
60 |
61 |
62 | else:
63 | logging.info(f"{filename} is already created")
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/yolov5/.dockerignore:
--------------------------------------------------------------------------------
1 | # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2 | .git
3 | .cache
4 | .idea
5 | runs
6 | output
7 | coco
8 | storage.googleapis.com
9 |
10 | data/samples/*
11 | **/results*.csv
12 | *.jpg
13 |
14 | # Neural Network weights -----------------------------------------------------------------------------------------------
15 | **/*.pt
16 | **/*.pth
17 | **/*.onnx
18 | **/*.engine
19 | **/*.mlmodel
20 | **/*.torchscript
21 | **/*.torchscript.pt
22 | **/*.tflite
23 | **/*.h5
24 | **/*.pb
25 | *_saved_model/
26 | *_web_model/
27 | *_openvino_model/
28 |
29 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30 | # Below Copied From .gitignore -----------------------------------------------------------------------------------------
31 |
32 |
33 | # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
34 | # Byte-compiled / optimized / DLL files
35 | __pycache__/
36 | *.py[cod]
37 | *$py.class
38 |
39 | # C extensions
40 | *.so
41 |
42 | # Distribution / packaging
43 | .Python
44 | env/
45 | build/
46 | develop-eggs/
47 | dist/
48 | downloads/
49 | eggs/
50 | .eggs/
51 | lib/
52 | lib64/
53 | parts/
54 | sdist/
55 | var/
56 | wheels/
57 | *.egg-info/
58 | wandb/
59 | .installed.cfg
60 | *.egg
61 |
62 | # PyInstaller
63 | # Usually these files are written by a python script from a template
64 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
65 | *.manifest
66 | *.spec
67 |
68 | # Installer logs
69 | pip-log.txt
70 | pip-delete-this-directory.txt
71 |
72 | # Unit test / coverage reports
73 | htmlcov/
74 | .tox/
75 | .coverage
76 | .coverage.*
77 | .cache
78 | nosetests.xml
79 | coverage.xml
80 | *.cover
81 | .hypothesis/
82 |
83 | # Translations
84 | *.mo
85 | *.pot
86 |
87 | # Django stuff:
88 | *.log
89 | local_settings.py
90 |
91 | # Flask stuff:
92 | instance/
93 | .webassets-cache
94 |
95 | # Scrapy stuff:
96 | .scrapy
97 |
98 | # Sphinx documentation
99 | docs/_build/
100 |
101 | # PyBuilder
102 | target/
103 |
104 | # Jupyter Notebook
105 | .ipynb_checkpoints
106 |
107 | # pyenv
108 | .python-version
109 |
110 | # celery beat schedule file
111 | celerybeat-schedule
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # dotenv
117 | .env
118 |
119 | # virtualenv
120 | .venv*
121 | venv*/
122 | ENV*/
123 |
124 | # Spyder project settings
125 | .spyderproject
126 | .spyproject
127 |
128 | # Rope project settings
129 | .ropeproject
130 |
131 | # mkdocs documentation
132 | /site
133 |
134 | # mypy
135 | .mypy_cache/
136 |
137 |
138 | # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
139 |
140 | # General
141 | .DS_Store
142 | .AppleDouble
143 | .LSOverride
144 |
145 | # Icon must end with two \r
146 | Icon
147 | Icon?
148 |
149 | # Thumbnails
150 | ._*
151 |
152 | # Files that might appear in the root of a volume
153 | .DocumentRevisions-V100
154 | .fseventsd
155 | .Spotlight-V100
156 | .TemporaryItems
157 | .Trashes
158 | .VolumeIcon.icns
159 | .com.apple.timemachine.donotpresent
160 |
161 | # Directories potentially created on remote AFP share
162 | .AppleDB
163 | .AppleDesktop
164 | Network Trash Folder
165 | Temporary Items
166 | .apdisk
167 |
168 |
169 | # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
170 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
171 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
172 |
173 | # User-specific stuff:
174 | .idea/*
175 | .idea/**/workspace.xml
176 | .idea/**/tasks.xml
177 | .idea/dictionaries
178 | .html # Bokeh Plots
179 | .pg # TensorFlow Frozen Graphs
180 | .avi # videos
181 |
182 | # Sensitive or high-churn files:
183 | .idea/**/dataSources/
184 | .idea/**/dataSources.ids
185 | .idea/**/dataSources.local.xml
186 | .idea/**/sqlDataSources.xml
187 | .idea/**/dynamic.xml
188 | .idea/**/uiDesigner.xml
189 |
190 | # Gradle:
191 | .idea/**/gradle.xml
192 | .idea/**/libraries
193 |
194 | # CMake
195 | cmake-build-debug/
196 | cmake-build-release/
197 |
198 | # Mongo Explorer plugin:
199 | .idea/**/mongoSettings.xml
200 |
201 | ## File-based project format:
202 | *.iws
203 |
204 | ## Plugin-specific files:
205 |
206 | # IntelliJ
207 | out/
208 |
209 | # mpeltonen/sbt-idea plugin
210 | .idea_modules/
211 |
212 | # JIRA plugin
213 | atlassian-ide-plugin.xml
214 |
215 | # Cursive Clojure plugin
216 | .idea/replstate.xml
217 |
218 | # Crashlytics plugin (for Android Studio and IntelliJ)
219 | com_crashlytics_export_strings.xml
220 | crashlytics.properties
221 | crashlytics-build.properties
222 | fabric.properties
223 |
--------------------------------------------------------------------------------
/yolov5/.gitattributes:
--------------------------------------------------------------------------------
1 | # this drop notebooks from GitHub language stats
2 | *.ipynb linguist-vendored
3 |
--------------------------------------------------------------------------------
/yolov5/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: 🐛 Bug Report
2 | # title: " "
3 | description: Problems with YOLOv5
4 | labels: [bug, triage]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for submitting a YOLOv5 🐛 Bug Report!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.
19 | required: true
20 |
21 | - type: dropdown
22 | attributes:
23 | label: YOLOv5 Component
24 | description: |
25 | Please select the part of YOLOv5 where you found the bug.
26 | multiple: true
27 | options:
28 | - "Training"
29 | - "Validation"
30 | - "Detection"
31 | - "Export"
32 | - "PyTorch Hub"
33 | - "Multi-GPU"
34 | - "Evolution"
35 | - "Integrations"
36 | - "Other"
37 | validations:
38 | required: false
39 |
40 | - type: textarea
41 | attributes:
42 | label: Bug
43 | description: Provide console output with error messages and/or screenshots of the bug.
44 | placeholder: |
45 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
46 | validations:
47 | required: true
48 |
49 | - type: textarea
50 | attributes:
51 | label: Environment
52 | description: Please specify the software and hardware you used to produce the bug.
53 | placeholder: |
54 | - YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB)
55 | - OS: Ubuntu 20.04
56 | - Python: 3.9.0
57 | validations:
58 | required: false
59 |
60 | - type: textarea
61 | attributes:
62 | label: Minimal Reproducible Example
63 | description: >
64 | When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem.
65 | This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
66 | placeholder: |
67 | ```
68 | # Code to reproduce your issue here
69 | ```
70 | validations:
71 | required: false
72 |
73 | - type: textarea
74 | attributes:
75 | label: Additional
76 | description: Anything else you would like to share?
77 |
78 | - type: checkboxes
79 | attributes:
80 | label: Are you willing to submit a PR?
81 | description: >
82 | (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature.
83 | See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started.
84 | options:
85 | - label: Yes I'd like to help by submitting a PR!
86 |
--------------------------------------------------------------------------------
/yolov5/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: 📄 Docs
4 | url: https://docs.ultralytics.com/yolov5
5 | about: View Ultralytics YOLOv5 Docs
6 | - name: 💬 Forum
7 | url: https://community.ultralytics.com/
8 | about: Ask on Ultralytics Community Forum
9 | - name: 🎧 Discord
10 | url: https://discord.gg/n6cFeSPZdD
11 | about: Ask on Ultralytics Discord
12 |
--------------------------------------------------------------------------------
/yolov5/.github/ISSUE_TEMPLATE/feature-request.yml:
--------------------------------------------------------------------------------
1 | name: 🚀 Feature Request
2 | description: Suggest a YOLOv5 idea
3 | # title: " "
4 | labels: [enhancement]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for submitting a YOLOv5 🚀 Feature Request!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests.
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: Description
24 | description: A short description of your feature.
25 | placeholder: |
26 | What new feature would you like to see in YOLOv5?
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Use case
33 | description: |
34 | Describe the use case of your feature request. It will help us understand and prioritize the feature request.
35 | placeholder: |
36 | How would this feature be used, and who would use it?
37 |
38 | - type: textarea
39 | attributes:
40 | label: Additional
41 | description: Anything else you would like to share?
42 |
43 | - type: checkboxes
44 | attributes:
45 | label: Are you willing to submit a PR?
46 | description: >
47 | (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature.
48 | See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started.
49 | options:
50 | - label: Yes I'd like to help by submitting a PR!
51 |
--------------------------------------------------------------------------------
/yolov5/.github/ISSUE_TEMPLATE/question.yml:
--------------------------------------------------------------------------------
1 | name: ❓ Question
2 | description: Ask a YOLOv5 question
3 | # title: " "
4 | labels: [question]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for asking a YOLOv5 ❓ Question!
10 |
11 | - type: checkboxes
12 | attributes:
13 | label: Search before asking
14 | description: >
15 | Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists.
16 | options:
17 | - label: >
18 | I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions.
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: Question
24 | description: What is your question?
25 | placeholder: |
26 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response.
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Additional
33 | description: Anything else you would like to share?
34 |
--------------------------------------------------------------------------------
/yolov5/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
12 |
13 | copilot:all
14 |
--------------------------------------------------------------------------------
/yolov5/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: pip
4 | directory: "/"
5 | schedule:
6 | interval: weekly
7 | time: "04:00"
8 | open-pull-requests-limit: 10
9 | reviewers:
10 | - glenn-jocher
11 | labels:
12 | - dependencies
13 |
14 | - package-ecosystem: github-actions
15 | directory: "/"
16 | schedule:
17 | interval: weekly
18 | time: "04:00"
19 | open-pull-requests-limit: 5
20 | reviewers:
21 | - glenn-jocher
22 | labels:
23 | - dependencies
24 |
--------------------------------------------------------------------------------
/yolov5/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
2 | # https://github.com/github/codeql-action
3 |
4 | name: "CodeQL"
5 |
6 | on:
7 | schedule:
8 | - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month
9 |
10 | jobs:
11 | analyze:
12 | name: Analyze
13 | runs-on: ubuntu-latest
14 |
15 | strategy:
16 | fail-fast: false
17 | matrix:
18 | language: ['python']
19 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
20 | # Learn more:
21 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
22 |
23 | steps:
24 | - name: Checkout repository
25 | uses: actions/checkout@v3
26 |
27 | # Initializes the CodeQL tools for scanning.
28 | - name: Initialize CodeQL
29 | uses: github/codeql-action/init@v2
30 | with:
31 | languages: ${{ matrix.language }}
32 | # If you wish to specify custom queries, you can do so here or in a config file.
33 | # By default, queries listed here will override any specified in a config file.
34 | # Prefix the list here with "+" to use these queries and those in the config file.
35 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
36 |
37 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
38 | # If this step fails, then you should remove it and run the build manually (see below)
39 | - name: Autobuild
40 | uses: github/codeql-action/autobuild@v2
41 |
42 | # ℹ️ Command-line programs to run using the OS shell.
43 | # 📚 https://git.io/JvXDl
44 |
45 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
46 | # and modify them (or add more) to build your code if your project
47 | # uses a compiled language
48 |
49 | #- run: |
50 | # make bootstrap
51 | # make release
52 |
53 | - name: Perform CodeQL Analysis
54 | uses: github/codeql-action/analyze@v2
55 |
--------------------------------------------------------------------------------
/yolov5/.github/workflows/docker.yml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 |
4 | name: Publish Docker Images
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 |
10 | jobs:
11 | docker:
12 | if: github.repository == 'ultralytics/yolov5'
13 | name: Push Docker image to Docker Hub
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout repo
17 | uses: actions/checkout@v3
18 |
19 | - name: Set up QEMU
20 | uses: docker/setup-qemu-action@v2
21 |
22 | - name: Set up Docker Buildx
23 | uses: docker/setup-buildx-action@v2
24 |
25 | - name: Login to Docker Hub
26 | uses: docker/login-action@v2
27 | with:
28 | username: ${{ secrets.DOCKERHUB_USERNAME }}
29 | password: ${{ secrets.DOCKERHUB_TOKEN }}
30 |
31 | - name: Build and push arm64 image
32 | uses: docker/build-push-action@v4
33 | continue-on-error: true
34 | with:
35 | context: .
36 | platforms: linux/arm64
37 | file: utils/docker/Dockerfile-arm64
38 | push: true
39 | tags: ultralytics/yolov5:latest-arm64
40 |
41 | - name: Build and push CPU image
42 | uses: docker/build-push-action@v4
43 | continue-on-error: true
44 | with:
45 | context: .
46 | file: utils/docker/Dockerfile-cpu
47 | push: true
48 | tags: ultralytics/yolov5:latest-cpu
49 |
50 | - name: Build and push GPU image
51 | uses: docker/build-push-action@v4
52 | continue-on-error: true
53 | with:
54 | context: .
55 | file: utils/docker/Dockerfile
56 | push: true
57 | tags: ultralytics/yolov5:latest
58 |
--------------------------------------------------------------------------------
/yolov5/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 |
3 | name: Close stale issues
4 | on:
5 | schedule:
6 | - cron: '0 0 * * *' # Runs at 00:00 UTC every day
7 |
8 | jobs:
9 | stale:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/stale@v8
13 | with:
14 | repo-token: ${{ secrets.GITHUB_TOKEN }}
15 | stale-issue-message: |
16 | 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs.
17 |
18 | Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources:
19 | - **Wiki** – https://github.com/ultralytics/yolov5/wiki
20 | - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials
21 | - **Docs** – https://docs.ultralytics.com
22 |
23 | Access additional [Ultralytics](https://ultralytics.com) ⚡ resources:
24 | - **Ultralytics HUB** – https://ultralytics.com/hub
25 | - **Vision API** – https://ultralytics.com/yolov5
26 | - **About Us** – https://ultralytics.com/about
27 | - **Join Our Team** – https://ultralytics.com/work
28 | - **Contact Us** – https://ultralytics.com/contact
29 |
30 | Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed!
31 |
32 | Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐!
33 |
34 | stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.'
35 | days-before-issue-stale: 30
36 | days-before-issue-close: 10
37 | days-before-pr-stale: 90
38 | days-before-pr-close: 30
39 | exempt-issue-labels: 'documentation,tutorial,TODO'
40 | operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting.
41 |
--------------------------------------------------------------------------------
/yolov5/.github/workflows/translate-readme.yml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md
3 |
4 | name: Translate README
5 |
6 | on:
7 | push:
8 | branches:
9 | - translate_readme # replace with 'master' to enable action
10 | paths:
11 | - README.md
12 |
13 | jobs:
14 | Translate:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@v3
18 | - name: Setup Node.js
19 | uses: actions/setup-node@v3
20 | with:
21 | node-version: 16
22 | # ISO Language Codes: https://cloud.google.com/translate/docs/languages
23 | - name: Adding README - Chinese Simplified
24 | uses: dephraiim/translate-readme@main
25 | with:
26 | LANG: zh-CN
27 |
--------------------------------------------------------------------------------
/yolov5/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, AGPL-3.0 license
2 | # Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md
3 |
4 | exclude: 'docs/'
5 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
6 | ci:
7 | autofix_prs: true
8 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
9 | autoupdate_schedule: monthly
10 | # submodules: true
11 |
12 | repos:
13 | - repo: https://github.com/pre-commit/pre-commit-hooks
14 | rev: v4.4.0
15 | hooks:
16 | - id: end-of-file-fixer
17 | - id: trailing-whitespace
18 | - id: check-case-conflict
19 | - id: check-yaml
20 | - id: check-docstring-first
21 | - id: double-quote-string-fixer
22 | - id: detect-private-key
23 |
24 | - repo: https://github.com/asottile/pyupgrade
25 | rev: v3.3.1
26 | hooks:
27 | - id: pyupgrade
28 | name: Upgrade code
29 | args: [--py37-plus]
30 |
31 | - repo: https://github.com/PyCQA/isort
32 | rev: 5.12.0
33 | hooks:
34 | - id: isort
35 | name: Sort imports
36 |
37 | - repo: https://github.com/google/yapf
38 | rev: v0.32.0
39 | hooks:
40 | - id: yapf
41 | name: YAPF formatting
42 |
43 | - repo: https://github.com/executablebooks/mdformat
44 | rev: 0.7.16
45 | hooks:
46 | - id: mdformat
47 | name: MD formatting
48 | additional_dependencies:
49 | - mdformat-gfm
50 | - mdformat-black
51 | # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
52 |
53 | - repo: https://github.com/PyCQA/flake8
54 | rev: 6.0.0
55 | hooks:
56 | - id: flake8
57 | name: PEP8
58 |
59 | - repo: https://github.com/codespell-project/codespell
60 | rev: v2.2.4
61 | hooks:
62 | - id: codespell
63 | args:
64 | - --ignore-words-list=crate,nd,strack,dota
65 |
66 | #- repo: https://github.com/asottile/yesqa
67 | # rev: v1.4.0
68 | # hooks:
69 | # - id: yesqa
70 |
--------------------------------------------------------------------------------
/yolov5/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | preferred-citation:
3 | type: software
4 | message: If you use YOLOv5, please cite it as below.
5 | authors:
6 | - family-names: Jocher
7 | given-names: Glenn
8 | orcid: "https://orcid.org/0000-0001-5950-6979"
9 | title: "YOLOv5 by Ultralytics"
10 | version: 7.0
11 | doi: 10.5281/zenodo.3908559
12 | date-released: 2020-5-29
13 | license: AGPL-3.0
14 | url: "https://github.com/ultralytics/yolov5"
15 |
--------------------------------------------------------------------------------
/yolov5/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing to YOLOv5 🚀
2 |
3 | We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing a new feature
9 | - Becoming a maintainer
10 |
11 | YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12 | helping push the frontiers of what's possible in AI 😃!
13 |
14 | ## Submitting a Pull Request (PR) 🛠️
15 |
16 | Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17 |
18 | ### 1. Select File to Update
19 |
20 | Select `requirements.txt` to update by clicking on it in GitHub.
21 |
22 |

23 |
24 | ### 2. Click 'Edit this file'
25 |
26 | The button is in the top-right corner.
27 |
28 | 
29 |
30 | ### 3. Make Changes
31 |
32 | Change the `matplotlib` version from `3.2.2` to `3.3`.
33 |
34 | 
35 |
36 | ### 4. Preview Changes and Submit PR
37 |
38 | Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
39 | for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
40 | changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
41 |
42 | 
43 |
44 | ### PR recommendations
45 |
46 | To allow your work to be integrated as seamlessly as possible, we advise you to:
47 |
48 | - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update
49 | your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally.
50 |
51 | 
52 |
53 | - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**.
54 |
55 | 
56 |
57 | - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
58 | but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
59 |
60 | ## Submitting a Bug Report 🐛
61 |
62 | If you spot a problem with YOLOv5 please submit a Bug Report!
63 |
64 | For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
65 | short guidelines below to help users provide what we need to get started.
66 |
67 | When asking a question, people will be better able to provide help if you provide **code** that they can easily
68 | understand and use to **reproduce** the problem. This is referred to by community members as creating
69 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
70 | the problem should be:
71 |
72 | - ✅ **Minimal** – Use as little code as possible that still produces the same problem
73 | - ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
74 | - ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
75 |
76 | In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
77 | should be:
78 |
79 | - ✅ **Current** – Verify that your code is up-to-date with the current
80 | GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
81 | copy to ensure your problem has not already been resolved by previous commits.
82 | - ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
83 | repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
84 |
85 | If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛
86 | **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide
87 | a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
88 | understand and diagnose your problem.
89 |
90 | ## License
91 |
92 | By contributing, you agree that your contributions will be licensed under
93 | the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/)
94 |
--------------------------------------------------------------------------------
/yolov5/best.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/best.pt
--------------------------------------------------------------------------------
/yolov5/data/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: python train.py --data Argoverse.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── Argoverse ← downloads here (31.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: bus
23 | 5: truck
24 | 6: traffic_light
25 | 7: stop_sign
26 |
27 |
28 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
29 | download: |
30 | import json
31 |
32 | from tqdm import tqdm
33 | from utils.general import download, Path
34 |
35 |
36 | def argoverse2yolo(set):
37 | labels = {}
38 | a = json.load(open(set, "rb"))
39 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
40 | img_id = annot['image_id']
41 | img_name = a['images'][img_id]['name']
42 | img_label_name = f'{img_name[:-3]}txt'
43 |
44 | cls = annot['category_id'] # instance class id
45 | x_center, y_center, width, height = annot['bbox']
46 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
47 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
48 | width /= 1920.0 # scale
49 | height /= 1200.0 # scale
50 |
51 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
52 | if not img_dir.exists():
53 | img_dir.mkdir(parents=True, exist_ok=True)
54 |
55 | k = str(img_dir / img_label_name)
56 | if k not in labels:
57 | labels[k] = []
58 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
59 |
60 | for k in labels:
61 | with open(k, "w") as f:
62 | f.writelines(labels[k])
63 |
64 |
65 | # Download
66 | dir = Path(yaml['path']) # dataset root dir
67 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
68 | download(urls, dir=dir, delete=False)
69 |
70 | # Convert
71 | annotations_dir = 'Argoverse-HD/annotations/'
72 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
73 | for d in "train.json", "val.json":
74 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
75 |
--------------------------------------------------------------------------------
/yolov5/data/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: python train.py --data GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | names:
30 | 0: wheat_head
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from utils.general import download, Path
36 |
37 |
38 | # Download
39 | dir = Path(yaml['path']) # dataset root dir
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42 | download(urls, dir=dir)
43 |
44 | # Make Directories
45 | for p in 'annotations', 'images', 'labels':
46 | (dir / p).mkdir(parents=True, exist_ok=True)
47 |
48 | # Move
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51 | (dir / p).rename(dir / 'images' / p) # move to /images
52 | f = (dir / p).with_suffix('.json') # json file
53 | if f.exists():
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
55 |
--------------------------------------------------------------------------------
/yolov5/data/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: python train.py --data SKU-110K.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── SKU-110K ← downloads here (13.6 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | names:
18 | 0: object
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from tqdm import tqdm
25 | from utils.general import np, pd, Path, download, xyxy2xywh
26 |
27 |
28 | # Download
29 | dir = Path(yaml['path']) # dataset root dir
30 | parent = Path(dir.parent) # download dir
31 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
32 | download(urls, dir=parent, delete=False)
33 |
34 | # Rename directories
35 | if dir.exists():
36 | shutil.rmtree(dir)
37 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
38 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
39 |
40 | # Convert labels
41 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
42 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
43 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
44 | images, unique_images = x[:, 0], np.unique(x[:, 0])
45 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
46 | f.writelines(f'./images/{s}\n' for s in unique_images)
47 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
48 | cls = 0 # single-class dataset
49 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
50 | for r in x[images == im]:
51 | w, h = r[6], r[7] # image width, height
52 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
53 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
54 |
--------------------------------------------------------------------------------
/yolov5/data/VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: python train.py --data VOC.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VOC ← downloads here (2.8 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC
12 | train: # train images (relative to 'path') 16551 images
13 | - images/train2012
14 | - images/train2007
15 | - images/val2012
16 | - images/val2007
17 | val: # val images (relative to 'path') 4952 images
18 | - images/test2007
19 | test: # test images (optional)
20 | - images/test2007
21 |
22 | # Classes
23 | names:
24 | 0: aeroplane
25 | 1: bicycle
26 | 2: bird
27 | 3: boat
28 | 4: bottle
29 | 5: bus
30 | 6: car
31 | 7: cat
32 | 8: chair
33 | 9: cow
34 | 10: diningtable
35 | 11: dog
36 | 12: horse
37 | 13: motorbike
38 | 14: person
39 | 15: pottedplant
40 | 16: sheep
41 | 17: sofa
42 | 18: train
43 | 19: tvmonitor
44 |
45 |
46 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
47 | download: |
48 | import xml.etree.ElementTree as ET
49 |
50 | from tqdm import tqdm
51 | from utils.general import download, Path
52 |
53 |
54 | def convert_label(path, lb_path, year, image_id):
55 | def convert_box(size, box):
56 | dw, dh = 1. / size[0], 1. / size[1]
57 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
58 | return x * dw, y * dh, w * dw, h * dh
59 |
60 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
61 | out_file = open(lb_path, 'w')
62 | tree = ET.parse(in_file)
63 | root = tree.getroot()
64 | size = root.find('size')
65 | w = int(size.find('width').text)
66 | h = int(size.find('height').text)
67 |
68 | names = list(yaml['names'].values()) # names list
69 | for obj in root.iter('object'):
70 | cls = obj.find('name').text
71 | if cls in names and int(obj.find('difficult').text) != 1:
72 | xmlbox = obj.find('bndbox')
73 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
74 | cls_id = names.index(cls) # class id
75 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
76 |
77 |
78 | # Download
79 | dir = Path(yaml['path']) # dataset root dir
80 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
81 | urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
82 | f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
83 | f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
84 | download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
85 |
86 | # Convert
87 | path = dir / 'images/VOCdevkit'
88 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
89 | imgs_path = dir / 'images' / f'{image_set}{year}'
90 | lbs_path = dir / 'labels' / f'{image_set}{year}'
91 | imgs_path.mkdir(exist_ok=True, parents=True)
92 | lbs_path.mkdir(exist_ok=True, parents=True)
93 |
94 | with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
95 | image_ids = f.read().strip().split()
96 | for id in tqdm(image_ids, desc=f'{image_set}{year}'):
97 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
98 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
99 | f.rename(imgs_path / f.name) # move image
100 | convert_label(path, lb_path, year, id) # convert labels to YOLO format
101 |
--------------------------------------------------------------------------------
/yolov5/data/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: python train.py --data VisDrone.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VisDrone ← downloads here (2.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | names:
18 | 0: pedestrian
19 | 1: people
20 | 2: bicycle
21 | 3: car
22 | 4: van
23 | 5: truck
24 | 6: tricycle
25 | 7: awning-tricycle
26 | 8: bus
27 | 9: motor
28 |
29 |
30 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
31 | download: |
32 | from utils.general import download, os, Path
33 |
34 | def visdrone2yolo(dir):
35 | from PIL import Image
36 | from tqdm import tqdm
37 |
38 | def convert_box(size, box):
39 | # Convert VisDrone box to YOLO xywh box
40 | dw = 1. / size[0]
41 | dh = 1. / size[1]
42 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
43 |
44 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
45 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
46 | for f in pbar:
47 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
48 | lines = []
49 | with open(f, 'r') as file: # read annotation.txt
50 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
51 | if row[4] == '0': # VisDrone 'ignored regions' class 0
52 | continue
53 | cls = int(row[5]) - 1
54 | box = convert_box(img_size, tuple(map(int, row[:4])))
55 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
56 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
57 | fl.writelines(lines) # write label.txt
58 |
59 |
60 | # Download
61 | dir = Path(yaml['path']) # dataset root dir
62 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
63 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
64 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
65 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
66 | download(urls, dir=dir, curl=True, threads=4)
67 |
68 | # Convert
69 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
70 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
71 |
--------------------------------------------------------------------------------
/yolov5/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: |
102 | from utils.general import download, Path
103 |
104 |
105 | # Download labels
106 | segments = False # segment or box labels
107 | dir = Path(yaml['path']) # dataset root dir
108 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | download(urls, dir=dir.parent)
111 |
112 | # Download data
113 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
114 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
115 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
116 | download(urls, dir=dir / 'images', threads=3)
117 |
--------------------------------------------------------------------------------
/yolov5/data/coco128-seg.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128-seg # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128-seg.zip
102 |
--------------------------------------------------------------------------------
/yolov5/data/coco128.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128.zip
102 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.Objects365.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for Objects365 training
3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.00258
7 | lrf: 0.17
8 | momentum: 0.779
9 | weight_decay: 0.00058
10 | warmup_epochs: 1.33
11 | warmup_momentum: 0.86
12 | warmup_bias_lr: 0.0711
13 | box: 0.0539
14 | cls: 0.299
15 | cls_pw: 0.825
16 | obj: 0.632
17 | obj_pw: 1.0
18 | iou_t: 0.2
19 | anchor_t: 3.44
20 | anchors: 3.2
21 | fl_gamma: 0.0
22 | hsv_h: 0.0188
23 | hsv_s: 0.704
24 | hsv_v: 0.36
25 | degrees: 0.0
26 | translate: 0.0902
27 | scale: 0.491
28 | shear: 0.0
29 | perspective: 0.0
30 | flipud: 0.0
31 | fliplr: 0.5
32 | mosaic: 1.0
33 | mixup: 0.0
34 | copy_paste: 0.0
35 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for VOC training
3 | # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # YOLOv5 Hyperparameter Evolution Results
7 | # Best generation: 467
8 | # Last generation: 996
9 | # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
10 | # 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865
11 |
12 | lr0: 0.00334
13 | lrf: 0.15135
14 | momentum: 0.74832
15 | weight_decay: 0.00025
16 | warmup_epochs: 3.3835
17 | warmup_momentum: 0.59462
18 | warmup_bias_lr: 0.18657
19 | box: 0.02
20 | cls: 0.21638
21 | cls_pw: 0.5
22 | obj: 0.51728
23 | obj_pw: 0.67198
24 | iou_t: 0.2
25 | anchor_t: 3.3744
26 | fl_gamma: 0.0
27 | hsv_h: 0.01041
28 | hsv_s: 0.54703
29 | hsv_v: 0.27739
30 | degrees: 0.0
31 | translate: 0.04591
32 | scale: 0.75544
33 | shear: 0.0
34 | perspective: 0.0
35 | flipud: 0.0
36 | fliplr: 0.5
37 | mosaic: 0.85834
38 | mixup: 0.04266
39 | copy_paste: 0.0
40 | anchors: 3.412
41 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.no-augmentation.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters when using Albumentations frameworks
3 | # python train.py --hyp hyp.no-augmentation.yaml
4 | # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | # this parameters are all zero since we want to use albumentation framework
22 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
23 | hsv_h: 0 # image HSV-Hue augmentation (fraction)
24 | hsv_s: 00 # image HSV-Saturation augmentation (fraction)
25 | hsv_v: 0 # image HSV-Value augmentation (fraction)
26 | degrees: 0.0 # image rotation (+/- deg)
27 | translate: 0 # image translation (+/- fraction)
28 | scale: 0 # image scale (+/- gain)
29 | shear: 0 # image shear (+/- deg)
30 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
31 | flipud: 0.0 # image flip up-down (probability)
32 | fliplr: 0.0 # image flip left-right (probability)
33 | mosaic: 0.0 # image mosaic (probability)
34 | mixup: 0.0 # image mixup (probability)
35 | copy_paste: 0.0 # segment copy-paste (probability)
36 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch-high.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for high-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.1 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch-low.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for low-augmentation COCO training from scratch
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch-med.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Hyperparameters for medium-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/data/images/bus.jpg
--------------------------------------------------------------------------------
/yolov5/data/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/data/images/zidane.jpg
--------------------------------------------------------------------------------
/yolov5/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases
4 | # Example usage: bash data/scripts/download_weights.sh
5 | # parent
6 | # └── yolov5
7 | # ├── yolov5s.pt ← downloads here
8 | # ├── yolov5m.pt
9 | # └── ...
10 |
11 | python - <=3.1.30
6 | matplotlib>=3.3
7 | numpy>=1.18.5
8 | opencv-python>=4.1.1
9 | Pillow>=7.1.2
10 | psutil # system resources
11 | PyYAML>=5.3.1
12 | requests>=2.23.0
13 | scipy>=1.4.1
14 | thop>=0.1.1 # FLOPs computation
15 | torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended)
16 | torchvision>=0.8.1
17 | tqdm>=4.64.0
18 | # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
19 |
20 | # Logging ---------------------------------------------------------------------
21 | # tensorboard>=2.4.1
22 | # clearml>=1.2.0
23 | # comet
24 |
25 | # Plotting --------------------------------------------------------------------
26 | pandas>=1.1.4
27 | seaborn>=0.11.0
28 |
29 | # Export ----------------------------------------------------------------------
30 | # coremltools>=6.0 # CoreML export
31 | # onnx>=1.12.0 # ONNX export
32 | # onnx-simplifier>=0.4.1 # ONNX simplifier
33 | # nvidia-pyindex # TensorRT export
34 | # nvidia-tensorrt # TensorRT export
35 | # scikit-learn<=1.1.2 # CoreML quantization
36 | # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
37 | # tensorflowjs>=3.9.0 # TF.js export
38 | # openvino-dev # OpenVINO export
39 |
40 | # Deploy ----------------------------------------------------------------------
41 | setuptools>=65.5.1 # Snyk vulnerability fix
42 | # tritonclient[all]~=2.24.0
43 |
44 | # Extras ----------------------------------------------------------------------
45 | # ipython # interactive notebook
46 | # mss # screenshots
47 | # albumentations>=1.0.3
48 | # pycocotools>=2.0.6 # COCO mAP
49 | # ultralytics # HUB https://hub.ultralytics.com
50 |
--------------------------------------------------------------------------------
/yolov5/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 | # Local usage: pip install pre-commit, pre-commit run --all-files
4 |
5 | [metadata]
6 | license_file = LICENSE
7 | description_file = README.md
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 | [flake8]
20 | max-line-length = 120
21 | exclude = .tox,*.egg,build,temp
22 | select = E,W,F
23 | doctests = True
24 | verbose = 2
25 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
26 | format = pylint
27 | # see: https://www.flake8rules.com/
28 | ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403
29 | # E731: Do not assign a lambda expression, use a def
30 | # F405: name may be undefined, or defined from star imports: module
31 | # E402: module level import not at top of file
32 | # F401: module imported but unused
33 | # W504: line break after binary operator
34 | # E127: continuation line over-indented for visual indent
35 | # E231: missing whitespace after ‘,’, ‘;’, or ‘:’
36 | # E501: line too long
37 | # F403: ‘from module import *’ used; unable to detect undefined names
38 |
39 | [isort]
40 | # https://pycqa.github.io/isort/docs/configuration/options.html
41 | line_length = 120
42 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html
43 | multi_line_output = 0
44 |
45 | [yapf]
46 | based_on_style = pep8
47 | spaces_before_comment = 2
48 | COLUMN_LIMIT = 120
49 | COALESCE_BRACKETS = True
50 | SPACES_AROUND_POWER_OPERATOR = True
51 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False
52 | SPLIT_BEFORE_CLOSING_BRACKET = False
53 | SPLIT_BEFORE_FIRST_ARGUMENT = False
54 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False
55 |
--------------------------------------------------------------------------------
/yolov5/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 | import contextlib
7 | import platform
8 | import threading
9 |
10 |
11 | def emojis(str=''):
12 | # Return platform-dependent emoji-safe version of string
13 | return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
14 |
15 |
16 | class TryExcept(contextlib.ContextDecorator):
17 | # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
18 | def __init__(self, msg=''):
19 | self.msg = msg
20 |
21 | def __enter__(self):
22 | pass
23 |
24 | def __exit__(self, exc_type, value, traceback):
25 | if value:
26 | print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
27 | return True
28 |
29 |
30 | def threaded(func):
31 | # Multi-threads a target function and returns thread. Usage: @threaded decorator
32 | def wrapper(*args, **kwargs):
33 | thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
34 | thread.start()
35 | return thread
36 |
37 | return wrapper
38 |
39 |
40 | def join_threads(verbose=False):
41 | # Join all daemon threads, i.e. atexit.register(lambda: join_threads())
42 | main_thread = threading.current_thread()
43 | for t in threading.enumerate():
44 | if t is not main_thread:
45 | if verbose:
46 | print(f'Joining thread {t.name}')
47 | t.join()
48 |
49 |
50 | def notebook_init(verbose=True):
51 | # Check system software and hardware
52 | print('Checking setup...')
53 |
54 | import os
55 | import shutil
56 |
57 | from utils.general import check_font, check_requirements, is_colab
58 | from utils.torch_utils import select_device # imports
59 |
60 | check_font()
61 |
62 | import psutil
63 |
64 | if is_colab():
65 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
66 |
67 | # System info
68 | display = None
69 | if verbose:
70 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
71 | ram = psutil.virtual_memory().total
72 | total, used, free = shutil.disk_usage('/')
73 | with contextlib.suppress(Exception): # clear display if ipython is installed
74 | from IPython import display
75 | display.clear_output()
76 | s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
77 | else:
78 | s = ''
79 |
80 | select_device(newline=False)
81 | print(emojis(f'Setup complete ✅ {s}'))
82 | return display
83 |
--------------------------------------------------------------------------------
/yolov5/utils/activations.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Activation functions
4 | """
5 |
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 |
10 |
11 | class SiLU(nn.Module):
12 | # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
13 | @staticmethod
14 | def forward(x):
15 | return x * torch.sigmoid(x)
16 |
17 |
18 | class Hardswish(nn.Module):
19 | # Hard-SiLU activation
20 | @staticmethod
21 | def forward(x):
22 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML
23 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
24 |
25 |
26 | class Mish(nn.Module):
27 | # Mish activation https://github.com/digantamisra98/Mish
28 | @staticmethod
29 | def forward(x):
30 | return x * F.softplus(x).tanh()
31 |
32 |
33 | class MemoryEfficientMish(nn.Module):
34 | # Mish activation memory-efficient
35 | class F(torch.autograd.Function):
36 |
37 | @staticmethod
38 | def forward(ctx, x):
39 | ctx.save_for_backward(x)
40 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
41 |
42 | @staticmethod
43 | def backward(ctx, grad_output):
44 | x = ctx.saved_tensors[0]
45 | sx = torch.sigmoid(x)
46 | fx = F.softplus(x).tanh()
47 | return grad_output * (fx + x * sx * (1 - fx * fx))
48 |
49 | def forward(self, x):
50 | return self.F.apply(x)
51 |
52 |
53 | class FReLU(nn.Module):
54 | # FReLU activation https://arxiv.org/abs/2007.11824
55 | def __init__(self, c1, k=3): # ch_in, kernel
56 | super().__init__()
57 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
58 | self.bn = nn.BatchNorm2d(c1)
59 |
60 | def forward(self, x):
61 | return torch.max(x, self.bn(self.conv(x)))
62 |
63 |
64 | class AconC(nn.Module):
65 | r""" ACON activation (activate or not)
66 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
67 | according to "Activate or Not: Learning Customized Activation" .
68 | """
69 |
70 | def __init__(self, c1):
71 | super().__init__()
72 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
73 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
74 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
75 |
76 | def forward(self, x):
77 | dpx = (self.p1 - self.p2) * x
78 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
79 |
80 |
81 | class MetaAconC(nn.Module):
82 | r""" ACON activation (activate or not)
83 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
84 | according to "Activate or Not: Learning Customized Activation" .
85 | """
86 |
87 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
88 | super().__init__()
89 | c2 = max(r, c1 // r)
90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
92 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
93 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
94 | # self.bn1 = nn.BatchNorm2d(c2)
95 | # self.bn2 = nn.BatchNorm2d(c1)
96 |
97 | def forward(self, x):
98 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
99 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
100 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
101 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
102 | dpx = (self.p1 - self.p2) * x
103 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
104 |
--------------------------------------------------------------------------------
/yolov5/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 |
11 | from utils.general import LOGGER, colorstr
12 | from utils.torch_utils import profile
13 |
14 |
15 | def check_train_batch_size(model, imgsz=640, amp=True):
16 | # Check YOLOv5 training batch size
17 | with torch.cuda.amp.autocast(amp):
18 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
19 |
20 |
21 | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
22 | # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
23 | # Usage:
24 | # import torch
25 | # from utils.autobatch import autobatch
26 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
27 | # print(autobatch(model))
28 |
29 | # Check device
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 | if torch.backends.cudnn.benchmark:
37 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
38 | return batch_size
39 |
40 | # Inspect CUDA memory
41 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
42 | d = str(device).upper() # 'CUDA:0'
43 | properties = torch.cuda.get_device_properties(device) # device properties
44 | t = properties.total_memory / gb # GiB total
45 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved
46 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated
47 | f = t - (r + a) # GiB free
48 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
49 |
50 | # Profile batch sizes
51 | batch_sizes = [1, 2, 4, 8, 16]
52 | try:
53 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
54 | results = profile(img, model, n=3, device=device)
55 | except Exception as e:
56 | LOGGER.warning(f'{prefix}{e}')
57 |
58 | # Fit a solution
59 | y = [x[2] for x in results if x] # memory [2]
60 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
61 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
62 | if None in results: # some sizes failed
63 | i = results.index(None) # first fail index
64 | if b >= batch_sizes[i]: # y intercept above failure point
65 | b = batch_sizes[max(i - 1, 0)] # select prior safe point
66 | if b < 1 or b > 1024: # b outside of safe range
67 | b = batch_size
68 | LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
69 |
70 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
71 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
72 | return b
73 |
--------------------------------------------------------------------------------
/yolov5/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/utils/aws/__init__.py
--------------------------------------------------------------------------------
/yolov5/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/yolov5/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/yolov5/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/yolov5/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Callback utils
4 | """
5 |
6 | import threading
7 |
8 |
9 | class Callbacks:
10 | """"
11 | Handles all registered callbacks for YOLOv5 Hooks
12 | """
13 |
14 | def __init__(self):
15 | # Define the available callbacks
16 | self._callbacks = {
17 | 'on_pretrain_routine_start': [],
18 | 'on_pretrain_routine_end': [],
19 | 'on_train_start': [],
20 | 'on_train_epoch_start': [],
21 | 'on_train_batch_start': [],
22 | 'optimizer_step': [],
23 | 'on_before_zero_grad': [],
24 | 'on_train_batch_end': [],
25 | 'on_train_epoch_end': [],
26 | 'on_val_start': [],
27 | 'on_val_batch_start': [],
28 | 'on_val_image_end': [],
29 | 'on_val_batch_end': [],
30 | 'on_val_end': [],
31 | 'on_fit_epoch_end': [], # fit = train + val
32 | 'on_model_save': [],
33 | 'on_train_end': [],
34 | 'on_params_update': [],
35 | 'teardown': [],}
36 | self.stop_training = False # set True to interrupt training
37 |
38 | def register_action(self, hook, name='', callback=None):
39 | """
40 | Register a new action to a callback hook
41 |
42 | Args:
43 | hook: The callback hook name to register the action to
44 | name: The name of the action for later reference
45 | callback: The callback to fire
46 | """
47 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
48 | assert callable(callback), f"callback '{callback}' is not callable"
49 | self._callbacks[hook].append({'name': name, 'callback': callback})
50 |
51 | def get_registered_actions(self, hook=None):
52 | """"
53 | Returns all the registered actions by callback hook
54 |
55 | Args:
56 | hook: The name of the hook to check, defaults to all
57 | """
58 | return self._callbacks[hook] if hook else self._callbacks
59 |
60 | def run(self, hook, *args, thread=False, **kwargs):
61 | """
62 | Loop through the registered actions and fire all callbacks on main thread
63 |
64 | Args:
65 | hook: The name of the hook to check, defaults to all
66 | args: Arguments to receive from YOLOv5
67 | thread: (boolean) Run callbacks in daemon thread
68 | kwargs: Keyword Arguments to receive from YOLOv5
69 | """
70 |
71 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
72 | for logger in self._callbacks[hook]:
73 | if thread:
74 | threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
75 | else:
76 | logger['callback'](*args, **kwargs)
77 |
--------------------------------------------------------------------------------
/yolov5/utils/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 | # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
4 |
5 | # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch
6 | FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
16 | # RUN alias python=python3
17 |
18 | # Security updates
19 | # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
20 | RUN apt upgrade --no-install-recommends -y openssl
21 |
22 | # Create working directory
23 | RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
24 | WORKDIR /usr/src/app
25 |
26 | # Copy contents
27 | # COPY . /usr/src/app (issues as not a .git directory)
28 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
29 |
30 | # Install pip packages
31 | COPY requirements.txt .
32 | RUN python3 -m pip install --upgrade pip wheel
33 | RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
34 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3'
35 | # tensorflow tensorflowjs \
36 |
37 | # Set environment variables
38 | ENV OMP_NUM_THREADS=1
39 |
40 | # Cleanup
41 | ENV DEBIAN_FRONTEND teletype
42 |
43 |
44 | # Usage Examples -------------------------------------------------------------------------------------------------------
45 |
46 | # Build and Push
47 | # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t
48 |
49 | # Pull and Run
50 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
51 |
52 | # Pull and Run with local directory access
53 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
54 |
55 | # Kill all
56 | # sudo docker kill $(sudo docker ps -q)
57 |
58 | # Kill all image-based
59 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
60 |
61 | # DockerHub tag update
62 | # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
63 |
64 | # Clean up
65 | # sudo docker system prune -a --volumes
66 |
67 | # Update Ubuntu drivers
68 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
69 |
70 | # DDP test
71 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
72 |
73 | # GCP VM from Image
74 | # docker.io/ultralytics/yolov5:latest
75 |
--------------------------------------------------------------------------------
/yolov5/utils/docker/Dockerfile-arm64:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM arm64v8/ubuntu:rolling
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev
16 | # RUN alias python=python3
17 |
18 | # Install pip packages
19 | COPY requirements.txt .
20 | RUN python3 -m pip install --upgrade pip wheel
21 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
22 | coremltools onnx onnxruntime
23 | # tensorflow-aarch64 tensorflowjs \
24 |
25 | # Create working directory
26 | RUN mkdir -p /usr/src/app
27 | WORKDIR /usr/src/app
28 |
29 | # Copy contents
30 | # COPY . /usr/src/app (issues as not a .git directory)
31 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
32 | ENV DEBIAN_FRONTEND teletype
33 |
34 |
35 | # Usage Examples -------------------------------------------------------------------------------------------------------
36 |
37 | # Build and Push
38 | # t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t
39 |
40 | # Pull and Run
41 | # t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
42 |
--------------------------------------------------------------------------------
/yolov5/utils/docker/Dockerfile-cpu:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments
4 |
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
6 | FROM ubuntu:rolling
7 |
8 | # Downloads to user config dir
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10 |
11 | # Install linux packages
12 | ENV DEBIAN_FRONTEND noninteractive
13 | RUN apt update
14 | RUN TZ=Etc/UTC apt install -y tzdata
15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
16 | # RUN alias python=python3
17 |
18 | # Install pip packages
19 | COPY requirements.txt .
20 | RUN python3 -m pip install --upgrade pip wheel
21 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \
22 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \
23 | # tensorflow tensorflowjs \
24 | --extra-index-url https://download.pytorch.org/whl/cpu
25 |
26 | # Create working directory
27 | RUN mkdir -p /usr/src/app
28 | WORKDIR /usr/src/app
29 |
30 | # Copy contents
31 | # COPY . /usr/src/app (issues as not a .git directory)
32 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
33 | ENV DEBIAN_FRONTEND teletype
34 |
35 |
36 | # Usage Examples -------------------------------------------------------------------------------------------------------
37 |
38 | # Build and Push
39 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t
40 |
41 | # Pull and Run
42 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
43 |
--------------------------------------------------------------------------------
/yolov5/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/yolov5/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Perform test request
4 | """
5 |
6 | import pprint
7 |
8 | import requests
9 |
10 | DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s'
11 | IMAGE = 'zidane.jpg'
12 |
13 | # Read image
14 | with open(IMAGE, 'rb') as f:
15 | image_data = f.read()
16 |
17 | response = requests.post(DETECTION_URL, files={'image': image_data}).json()
18 |
19 | pprint.pprint(response)
20 |
--------------------------------------------------------------------------------
/yolov5/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Run a Flask REST API exposing one or more YOLOv5s models
4 | """
5 |
6 | import argparse
7 | import io
8 |
9 | import torch
10 | from flask import Flask, request
11 | from PIL import Image
12 |
13 | app = Flask(__name__)
14 | models = {}
15 |
16 | DETECTION_URL = '/v1/object-detection/'
17 |
18 |
19 | @app.route(DETECTION_URL, methods=['POST'])
20 | def predict(model):
21 | if request.method != 'POST':
22 | return
23 |
24 | if request.files.get('image'):
25 | # Method 1
26 | # with request.files["image"] as f:
27 | # im = Image.open(io.BytesIO(f.read()))
28 |
29 | # Method 2
30 | im_file = request.files['image']
31 | im_bytes = im_file.read()
32 | im = Image.open(io.BytesIO(im_bytes))
33 |
34 | if model in models:
35 | results = models[model](im, size=640) # reduce size=320 for faster inference
36 | return results.pandas().xyxy[0].to_json(orient='records')
37 |
38 |
39 | if __name__ == '__main__':
40 | parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model')
41 | parser.add_argument('--port', default=5000, type=int, help='port number')
42 | parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
43 | opt = parser.parse_args()
44 |
45 | for m in opt.model:
46 | models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True)
47 |
48 | app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat
49 |
--------------------------------------------------------------------------------
/yolov5/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/yolov5/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==1.0.2
4 | gunicorn==19.10.0
5 | werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability
6 |
--------------------------------------------------------------------------------
/yolov5/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/yolov5/utils/loggers/clearml/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/utils/loggers/clearml/__init__.py
--------------------------------------------------------------------------------
/yolov5/utils/loggers/comet/comet_utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from urllib.parse import urlparse
4 |
5 | try:
6 | import comet_ml
7 | except (ModuleNotFoundError, ImportError):
8 | comet_ml = None
9 |
10 | import yaml
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | COMET_PREFIX = 'comet://'
15 | COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
16 | COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt')
17 |
18 |
19 | def download_model_checkpoint(opt, experiment):
20 | model_dir = f'{opt.project}/{experiment.name}'
21 | os.makedirs(model_dir, exist_ok=True)
22 |
23 | model_name = COMET_MODEL_NAME
24 | model_asset_list = experiment.get_model_asset_list(model_name)
25 |
26 | if len(model_asset_list) == 0:
27 | logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}')
28 | return
29 |
30 | model_asset_list = sorted(
31 | model_asset_list,
32 | key=lambda x: x['step'],
33 | reverse=True,
34 | )
35 | logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list}
36 |
37 | resource_url = urlparse(opt.weights)
38 | checkpoint_filename = resource_url.query
39 |
40 | if checkpoint_filename:
41 | asset_id = logged_checkpoint_map.get(checkpoint_filename)
42 | else:
43 | asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
44 | checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
45 |
46 | if asset_id is None:
47 | logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment')
48 | return
49 |
50 | try:
51 | logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}')
52 | asset_filename = checkpoint_filename
53 |
54 | model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
55 | model_download_path = f'{model_dir}/{asset_filename}'
56 | with open(model_download_path, 'wb') as f:
57 | f.write(model_binary)
58 |
59 | opt.weights = model_download_path
60 |
61 | except Exception as e:
62 | logger.warning('COMET WARNING: Unable to download checkpoint from Comet')
63 | logger.exception(e)
64 |
65 |
66 | def set_opt_parameters(opt, experiment):
67 | """Update the opts Namespace with parameters
68 | from Comet's ExistingExperiment when resuming a run
69 |
70 | Args:
71 | opt (argparse.Namespace): Namespace of command line options
72 | experiment (comet_ml.APIExperiment): Comet API Experiment object
73 | """
74 | asset_list = experiment.get_asset_list()
75 | resume_string = opt.resume
76 |
77 | for asset in asset_list:
78 | if asset['fileName'] == 'opt.yaml':
79 | asset_id = asset['assetId']
80 | asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
81 | opt_dict = yaml.safe_load(asset_binary)
82 | for key, value in opt_dict.items():
83 | setattr(opt, key, value)
84 | opt.resume = resume_string
85 |
86 | # Save hyperparameters to YAML file
87 | # Necessary to pass checks in training script
88 | save_dir = f'{opt.project}/{experiment.name}'
89 | os.makedirs(save_dir, exist_ok=True)
90 |
91 | hyp_yaml_path = f'{save_dir}/hyp.yaml'
92 | with open(hyp_yaml_path, 'w') as f:
93 | yaml.dump(opt.hyp, f)
94 | opt.hyp = hyp_yaml_path
95 |
96 |
97 | def check_comet_weights(opt):
98 | """Downloads model weights from Comet and updates the
99 | weights path to point to saved weights location
100 |
101 | Args:
102 | opt (argparse.Namespace): Command Line arguments passed
103 | to YOLOv5 training script
104 |
105 | Returns:
106 | None/bool: Return True if weights are successfully downloaded
107 | else return None
108 | """
109 | if comet_ml is None:
110 | return
111 |
112 | if isinstance(opt.weights, str):
113 | if opt.weights.startswith(COMET_PREFIX):
114 | api = comet_ml.API()
115 | resource = urlparse(opt.weights)
116 | experiment_path = f'{resource.netloc}{resource.path}'
117 | experiment = api.get(experiment_path)
118 | download_model_checkpoint(opt, experiment)
119 | return True
120 |
121 | return None
122 |
123 |
124 | def check_comet_resume(opt):
125 | """Restores run parameters to its original state based on the model checkpoint
126 | and logged Experiment parameters.
127 |
128 | Args:
129 | opt (argparse.Namespace): Command Line arguments passed
130 | to YOLOv5 training script
131 |
132 | Returns:
133 | None/bool: Return True if the run is restored successfully
134 | else return None
135 | """
136 | if comet_ml is None:
137 | return
138 |
139 | if isinstance(opt.resume, str):
140 | if opt.resume.startswith(COMET_PREFIX):
141 | api = comet_ml.API()
142 | resource = urlparse(opt.resume)
143 | experiment_path = f'{resource.netloc}{resource.path}'
144 | experiment = api.get(experiment_path)
145 | set_opt_parameters(opt, experiment)
146 | download_model_checkpoint(opt, experiment)
147 |
148 | return True
149 |
150 | return None
151 |
--------------------------------------------------------------------------------
/yolov5/utils/loggers/comet/optimizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "algorithm": "random",
3 | "parameters": {
4 | "anchor_t": {
5 | "type": "discrete",
6 | "values": [
7 | 2,
8 | 8
9 | ]
10 | },
11 | "batch_size": {
12 | "type": "discrete",
13 | "values": [
14 | 16,
15 | 32,
16 | 64
17 | ]
18 | },
19 | "box": {
20 | "type": "discrete",
21 | "values": [
22 | 0.02,
23 | 0.2
24 | ]
25 | },
26 | "cls": {
27 | "type": "discrete",
28 | "values": [
29 | 0.2
30 | ]
31 | },
32 | "cls_pw": {
33 | "type": "discrete",
34 | "values": [
35 | 0.5
36 | ]
37 | },
38 | "copy_paste": {
39 | "type": "discrete",
40 | "values": [
41 | 1
42 | ]
43 | },
44 | "degrees": {
45 | "type": "discrete",
46 | "values": [
47 | 0,
48 | 45
49 | ]
50 | },
51 | "epochs": {
52 | "type": "discrete",
53 | "values": [
54 | 5
55 | ]
56 | },
57 | "fl_gamma": {
58 | "type": "discrete",
59 | "values": [
60 | 0
61 | ]
62 | },
63 | "fliplr": {
64 | "type": "discrete",
65 | "values": [
66 | 0
67 | ]
68 | },
69 | "flipud": {
70 | "type": "discrete",
71 | "values": [
72 | 0
73 | ]
74 | },
75 | "hsv_h": {
76 | "type": "discrete",
77 | "values": [
78 | 0
79 | ]
80 | },
81 | "hsv_s": {
82 | "type": "discrete",
83 | "values": [
84 | 0
85 | ]
86 | },
87 | "hsv_v": {
88 | "type": "discrete",
89 | "values": [
90 | 0
91 | ]
92 | },
93 | "iou_t": {
94 | "type": "discrete",
95 | "values": [
96 | 0.7
97 | ]
98 | },
99 | "lr0": {
100 | "type": "discrete",
101 | "values": [
102 | 1e-05,
103 | 0.1
104 | ]
105 | },
106 | "lrf": {
107 | "type": "discrete",
108 | "values": [
109 | 0.01,
110 | 1
111 | ]
112 | },
113 | "mixup": {
114 | "type": "discrete",
115 | "values": [
116 | 1
117 | ]
118 | },
119 | "momentum": {
120 | "type": "discrete",
121 | "values": [
122 | 0.6
123 | ]
124 | },
125 | "mosaic": {
126 | "type": "discrete",
127 | "values": [
128 | 0
129 | ]
130 | },
131 | "obj": {
132 | "type": "discrete",
133 | "values": [
134 | 0.2
135 | ]
136 | },
137 | "obj_pw": {
138 | "type": "discrete",
139 | "values": [
140 | 0.5
141 | ]
142 | },
143 | "optimizer": {
144 | "type": "categorical",
145 | "values": [
146 | "SGD",
147 | "Adam",
148 | "AdamW"
149 | ]
150 | },
151 | "perspective": {
152 | "type": "discrete",
153 | "values": [
154 | 0
155 | ]
156 | },
157 | "scale": {
158 | "type": "discrete",
159 | "values": [
160 | 0
161 | ]
162 | },
163 | "shear": {
164 | "type": "discrete",
165 | "values": [
166 | 0
167 | ]
168 | },
169 | "translate": {
170 | "type": "discrete",
171 | "values": [
172 | 0
173 | ]
174 | },
175 | "warmup_bias_lr": {
176 | "type": "discrete",
177 | "values": [
178 | 0,
179 | 0.2
180 | ]
181 | },
182 | "warmup_epochs": {
183 | "type": "discrete",
184 | "values": [
185 | 5
186 | ]
187 | },
188 | "warmup_momentum": {
189 | "type": "discrete",
190 | "values": [
191 | 0,
192 | 0.95
193 | ]
194 | },
195 | "weight_decay": {
196 | "type": "discrete",
197 | "values": [
198 | 0,
199 | 0.001
200 | ]
201 | }
202 | },
203 | "spec": {
204 | "maxCombo": 0,
205 | "metric": "metrics/mAP_0.5",
206 | "objective": "maximize"
207 | },
208 | "trials": 1
209 | }
210 |
--------------------------------------------------------------------------------
/yolov5/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/yolov5/utils/segment/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/utils/segment/__init__.py
--------------------------------------------------------------------------------
/yolov5/utils/segment/augmentations.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """
3 | Image augmentation functions
4 | """
5 |
6 | import math
7 | import random
8 |
9 | import cv2
10 | import numpy as np
11 |
12 | from ..augmentations import box_candidates
13 | from ..general import resample_segments, segment2box
14 |
15 |
16 | def mixup(im, labels, segments, im2, labels2, segments2):
17 | # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
18 | r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
19 | im = (im * r + im2 * (1 - r)).astype(np.uint8)
20 | labels = np.concatenate((labels, labels2), 0)
21 | segments = np.concatenate((segments, segments2), 0)
22 | return im, labels, segments
23 |
24 |
25 | def random_perspective(im,
26 | targets=(),
27 | segments=(),
28 | degrees=10,
29 | translate=.1,
30 | scale=.1,
31 | shear=10,
32 | perspective=0.0,
33 | border=(0, 0)):
34 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
35 | # targets = [cls, xyxy]
36 |
37 | height = im.shape[0] + border[0] * 2 # shape(h,w,c)
38 | width = im.shape[1] + border[1] * 2
39 |
40 | # Center
41 | C = np.eye(3)
42 | C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
43 | C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
44 |
45 | # Perspective
46 | P = np.eye(3)
47 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
48 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
49 |
50 | # Rotation and Scale
51 | R = np.eye(3)
52 | a = random.uniform(-degrees, degrees)
53 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
54 | s = random.uniform(1 - scale, 1 + scale)
55 | # s = 2 ** random.uniform(-scale, scale)
56 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
57 |
58 | # Shear
59 | S = np.eye(3)
60 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
61 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
62 |
63 | # Translation
64 | T = np.eye(3)
65 | T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
66 | T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
67 |
68 | # Combined rotation matrix
69 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
70 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
71 | if perspective:
72 | im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
73 | else: # affine
74 | im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
75 |
76 | # Visualize
77 | # import matplotlib.pyplot as plt
78 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
79 | # ax[0].imshow(im[:, :, ::-1]) # base
80 | # ax[1].imshow(im2[:, :, ::-1]) # warped
81 |
82 | # Transform label coordinates
83 | n = len(targets)
84 | new_segments = []
85 | if n:
86 | new = np.zeros((n, 4))
87 | segments = resample_segments(segments) # upsample
88 | for i, segment in enumerate(segments):
89 | xy = np.ones((len(segment), 3))
90 | xy[:, :2] = segment
91 | xy = xy @ M.T # transform
92 | xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine
93 |
94 | # clip
95 | new[i] = segment2box(xy, width, height)
96 | new_segments.append(xy)
97 |
98 | # filter candidates
99 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)
100 | targets = targets[i]
101 | targets[:, 1:5] = new[i]
102 | new_segments = np.array(new_segments)[i]
103 |
104 | return im, targets, new_segments
105 |
--------------------------------------------------------------------------------
/yolov5/utils/triton.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2 | """ Utils to interact with the Triton Inference Server
3 | """
4 |
5 | import typing
6 | from urllib.parse import urlparse
7 |
8 | import torch
9 |
10 |
11 | class TritonRemoteModel:
12 | """ A wrapper over a model served by the Triton Inference Server. It can
13 | be configured to communicate over GRPC or HTTP. It accepts Torch Tensors
14 | as input and returns them as outputs.
15 | """
16 |
17 | def __init__(self, url: str):
18 | """
19 | Keyword arguments:
20 | url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000
21 | """
22 |
23 | parsed_url = urlparse(url)
24 | if parsed_url.scheme == 'grpc':
25 | from tritonclient.grpc import InferenceServerClient, InferInput
26 |
27 | self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client
28 | model_repository = self.client.get_model_repository_index()
29 | self.model_name = model_repository.models[0].name
30 | self.metadata = self.client.get_model_metadata(self.model_name, as_json=True)
31 |
32 | def create_input_placeholders() -> typing.List[InferInput]:
33 | return [
34 | InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']]
35 |
36 | else:
37 | from tritonclient.http import InferenceServerClient, InferInput
38 |
39 | self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client
40 | model_repository = self.client.get_model_repository_index()
41 | self.model_name = model_repository[0]['name']
42 | self.metadata = self.client.get_model_metadata(self.model_name)
43 |
44 | def create_input_placeholders() -> typing.List[InferInput]:
45 | return [
46 | InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']]
47 |
48 | self._create_input_placeholders_fn = create_input_placeholders
49 |
50 | @property
51 | def runtime(self):
52 | """Returns the model runtime"""
53 | return self.metadata.get('backend', self.metadata.get('platform'))
54 |
55 | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]:
56 | """ Invokes the model. Parameters can be provided via args or kwargs.
57 | args, if provided, are assumed to match the order of inputs of the model.
58 | kwargs are matched with the model input names.
59 | """
60 | inputs = self._create_inputs(*args, **kwargs)
61 | response = self.client.infer(model_name=self.model_name, inputs=inputs)
62 | result = []
63 | for output in self.metadata['outputs']:
64 | tensor = torch.as_tensor(response.as_numpy(output['name']))
65 | result.append(tensor)
66 | return result[0] if len(result) == 1 else result
67 |
68 | def _create_inputs(self, *args, **kwargs):
69 | args_len, kwargs_len = len(args), len(kwargs)
70 | if not args_len and not kwargs_len:
71 | raise RuntimeError('No inputs provided.')
72 | if args_len and kwargs_len:
73 | raise RuntimeError('Cannot specify args and kwargs at the same time')
74 |
75 | placeholders = self._create_input_placeholders_fn()
76 | if args_len:
77 | if args_len != len(placeholders):
78 | raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.')
79 | for input, value in zip(placeholders, args):
80 | input.set_data_from_numpy(value.cpu().numpy())
81 | else:
82 | for input in placeholders:
83 | value = kwargs[input.name]
84 | input.set_data_from_numpy(value.cpu().numpy())
85 | return placeholders
86 |
--------------------------------------------------------------------------------
/yolov5/yolov5s.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/entbappy/End-to-end-Sign-Language-Detection/0bbe4e9fb7fccec9869dad566fec7c83c0497da9/yolov5/yolov5s.pt
--------------------------------------------------------------------------------