├── .flaskenv ├── .gitignore ├── LICENSE ├── README.md ├── app.py ├── classes-pest.txt ├── detect.py ├── errors.py ├── export.py ├── forms.py ├── initsql.py ├── model.py ├── models ├── SimAM.py ├── __init__.py ├── common.py ├── experimental.py ├── hub │ ├── anchors.yaml │ ├── yolov3-spp.yaml │ ├── yolov3-tiny.yaml │ ├── yolov3.yaml │ ├── yolov5-bifpn.yaml │ ├── yolov5-fpn.yaml │ ├── yolov5-p2.yaml │ ├── yolov5-p34.yaml │ ├── yolov5-p6.yaml │ ├── yolov5-p7.yaml │ ├── yolov5-panet.yaml │ ├── yolov5l6.yaml │ ├── yolov5m6.yaml │ ├── yolov5n6.yaml │ ├── yolov5s-LeakyReLU.yaml │ ├── yolov5s-ghost.yaml │ ├── yolov5s-transformer.yaml │ ├── yolov5s6.yaml │ └── yolov5x6.yaml ├── segment │ ├── yolov5l-seg.yaml │ ├── yolov5m-seg.yaml │ ├── yolov5n-seg.yaml │ ├── yolov5s-seg.yaml │ └── yolov5x-seg.yaml ├── tf.py ├── yolo.py ├── yolov5l.yaml ├── yolov5m.yaml ├── yolov5n.yaml ├── yolov5s.yaml └── yolov5x.yaml ├── shibie.py ├── static ├── Chart.js ├── amaze.css ├── bootstrap.min.css ├── detection_image.css ├── image │ └── loading01.png ├── jquery-3.1.1.min.js ├── jquery.table2excel.js ├── js │ ├── Trash_disposal.js │ ├── load_file.js │ └── recycling_up.js ├── style.css └── weight │ ├── best.pt │ └── laji.yaml ├── templates ├── 400.html ├── 404.html ├── 500.html ├── Trash_detect.html ├── Trash_disposal.html ├── base.html ├── blacklist.html ├── commodity.html ├── commodity_add.html ├── commodity_change.html ├── community.html ├── community_add.html ├── community_change.html ├── detect_result.html ├── disposal.html ├── disposal_trash.html ├── index.html ├── logging.html ├── login.html ├── popular_science.html ├── popular_science_add.html ├── popular_science_change.html ├── popular_science_instance.html ├── recycling.html ├── recycling_add.html └── user_role.html ├── test ├── 1.jpg ├── 2.jpg ├── 20210000115.jpg ├── 20210000116.jpg ├── 20210000131.jpg ├── 20210000160.jpg ├── 20210000169.jpg ├── 20210000192.jpg ├── 20210000232.jpg ├── 20210000242.jpg ├── 20210000273.jpg ├── 20210000310.jpg ├── 20210000319.jpg ├── 20210000321.jpg ├── 20210000365.jpg ├── 20210000401.jpg ├── 20210001074.jpg ├── 20210001588.jpg ├── 20210001757.jpg ├── 20210003389.jpg ├── 20210003444.jpg ├── 20210004790.jpg ├── 20210007463.jpg ├── 20210007469.jpg ├── 20210007481.jpg ├── 20210008626.jpg ├── 20210008674.jpg ├── 20210009108.jpg ├── 20210009665.jpg ├── 20210011644.jpg ├── 20210012487.jpg └── 3.jpg ├── utils ├── __init__.py ├── activations.py ├── augmentations.py ├── autoanchor.py ├── autobatch.py ├── aws │ ├── __init__.py │ ├── mime.sh │ ├── resume.py │ └── userdata.sh ├── callbacks.py ├── dataloaders.py ├── docker │ ├── Dockerfile │ ├── Dockerfile-arm64 │ └── Dockerfile-cpu ├── downloads.py ├── flask_rest_api │ ├── README.md │ ├── example_request.py │ └── restapi.py ├── general.py ├── google_app_engine │ ├── Dockerfile │ ├── additional_requirements.txt │ └── app.yaml ├── loggers │ ├── __init__.py │ ├── clearml │ │ ├── README.md │ │ ├── __init__.py │ │ ├── clearml_utils.py │ │ └── hpo.py │ ├── comet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── comet_utils.py │ │ ├── hpo.py │ │ └── optimizer_config.json │ └── wandb │ │ ├── __init__.py │ │ └── wandb_utils.py ├── loss.py ├── metrics.py ├── plots.py ├── segment │ ├── __init__.py │ ├── augmentations.py │ ├── dataloaders.py │ ├── general.py │ ├── loss.py │ ├── metrics.py │ └── plots.py ├── torch_utils.py └── triton.py ├── views.py └── 演示视频.mp4 /.flaskenv: -------------------------------------------------------------------------------- 1 | FLASK_ENV=development -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Garbage-Identification-System 2 | 本项目基于YOLOv8目标检测技术开发了一套智慧垃圾管理平台,集成了垃圾分类识别、积分奖励、旧物回收、社区管理等功能。通过用户权限管理体系,平台能够自动识别用户投放的垃圾种类,记录垃圾分类行为并根据表现给予相应的积分或处罚。回收员与管理员可管理社区垃圾处理和旧物回收流程,监督垃圾分类情况并维护积分商城,实现垃圾分类智能化、旧物回收便捷化,提升社区环保意识和管理效率。 3 | 4 | 5 | # 功能 6 | ## 用户管理 7 | - 登录 8 | - 注册 9 | - 用户: 10 | - 用户名 11 | - 密码 12 | - 积分 13 | - 不文明次数 14 | - 是否拉黑 15 | 16 | ## 权限管理 17 | - 权限 18 | - 用户 回收员 超级管理员 19 | 20 | ## 不文明管理 21 | - 超级管理员 22 | - 拉黑 23 | 24 | ## 首页 25 | 26 | ## 垃圾识别 27 | - 上传图片识别 28 | - 返回识别结果显示详细信息 29 | 30 | ## 垃圾投放 31 | - 上传图片识别 32 | - 返回识别后结果,并自动记录积分和不文明行为 33 | - 不文明行为:垃圾混投放 34 | - 垃圾: 35 | - 用户名 36 | - 社区 37 | - 可回收数量 38 | - 不可回收数量 39 | - 有害数量 40 | - 厨余垃圾数量 41 | 42 | ## 积分商城 43 | - 商城列表 44 | - 用户 45 | - 兑换商品-减少积分 46 | - 回收员、管理员 47 | - 上传商品 48 | - 商品: 49 | - 商品名称 50 | - 商品数量 51 | - 商品积分 52 | - 修改商品 53 | - 删除商品 54 | 55 | ## 垃圾科普 56 | - 科普列表 57 | - 用户 58 | - 详情 59 | - 回收员、管理员 60 | - 添加科普 61 | - 科普: 62 | - 用户名 63 | - 大类 64 | - 垃圾名称 65 | - 垃圾介绍 66 | - 删除 67 | - 修改 68 | 69 | ## 旧物回收 70 | - 旧物列表 71 | - 用户 72 | - 添加 73 | - 删除 74 | - 旧物: 75 | - 用户名 76 | - 旧物名称 77 | - 旧物类别 78 | - 旧物照片 79 | - 取件码 80 | - 是否已回收 81 | - 回收员、管理员 82 | - 回收 83 | - 弹框输入取件码 84 | 85 | ## 社区 86 | - 社区列表 87 | - 社区名 88 | - 可回收剩余数量 89 | - 不可回收剩余数量 90 | - 有害剩余数量 91 | - 厨余剩余数量 92 | - 回收员、管理员 93 | - 添加 94 | - 修改 95 | - 删除 96 | - 回收 97 | - 社区: 98 | - 社区名 99 | - 可回收剩余数量 100 | - 不可回收剩余数量 101 | - 有害剩余数量 102 | - 厨余剩余数量 103 | 104 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask_sqlalchemy import SQLAlchemy 3 | from flask_login import LoginManager 4 | 5 | # 实例初始化环境信息文件 6 | app = Flask(__name__) 7 | app.config['SECRET_KEY'] = 'dev' # 等同于 app.secret_key = 'dev' 设置签名所需的钥匙 8 | login_manager = LoginManager(app) # 实例化扩展类 9 | 10 | app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:qwe123@192.168.3.25/laji' 11 | 12 | app.jinja_env.auto_reload = True 13 | app.config['TEMPLATES_AUTO_RELOAD'] = True 14 | # 热更新HTML模板文件 15 | 16 | db = SQLAlchemy(app) 17 | # 在扩展类实例化前加载配置 18 | login_manager.login_view = 'login' # 定义错误提示信息 19 | 20 | 21 | @login_manager.user_loader 22 | def load_user(user_id): # 创建用户加载回调函数,接受用户ID作为参数 23 | from model import User 24 | user = User.query.get(int(user_id)) # 用ID作为user模型的主键查询对应的用户 25 | return user # 返回用户对象 26 | 27 | 28 | 29 | import views, errors, forms 30 | 31 | if __name__ == '__main__': 32 | app.run(debug=True) 33 | -------------------------------------------------------------------------------- /classes-pest.txt: -------------------------------------------------------------------------------- 1 | 可回收:'书籍纸张', '金属厨具', '易拉罐', '饮料瓶', '食用油桶', '快递纸袋', '金属食品罐', '酒瓶', '调料瓶, '包', '塑料衣架','旧衣服', '锅', '金属器皿', '纸盒纸箱','毛绒玩具', '玻璃器皿' , '塑料器皿', '插头电线', '塑料玩具', '鞋', '垃圾桶', '枕头' 2 | 3 | 不可回收:'污损塑料', '一次性快餐盒', '花盆, '牙签','污损用纸', '筷子', '陶瓷器皿', '洗护用品', '软膏' 4 | 5 | 有害垃圾:'烟蒂', '过期药物', '干电池', '充电宝' 6 | 7 | 厨余垃圾:'大骨头', '鱼骨', '蛋壳', '菜帮菜叶', '剩饭剩菜', '茶叶渣', '果皮果肉', '砧板' 8 | 9 | 10 | 11 | classes = ['书籍纸张', '金属厨具', '砧板', '污损塑料', '筷子', '陶瓷器皿', '插头电线', '洗护用品', '塑料玩具', '鞋', '果皮果肉', '玻璃器皿', '毛绒玩具', '污损用纸', '塑料器皿', '纸盒纸箱', '花盆', '包', '金属器皿', '干电池', '调料瓶', '菜帮菜叶', '锅', '食用油桶', '饮料瓶', '充电宝', '易拉罐', '牙签', '剩饭剩菜', '大骨头', '鱼骨', '垃圾桶', '酒瓶', '金属食品罐', '一次性快餐盒', '烟蒂', '旧衣服', '塑料衣架', '枕头', '过期药物', '茶叶渣', '软膏', '蛋壳', '快递纸袋'] -------------------------------------------------------------------------------- /errors.py: -------------------------------------------------------------------------------- 1 | from app import app 2 | from flask import render_template 3 | 4 | # 错误状态处理文件 5 | @app.errorhandler(404) # 传入要处理的错误代码 6 | def page_not_found(e): # 接受异常对象作为参数 7 | return render_template('404.html'), 404 # 返回模板和状态码 8 | 9 | 10 | @app.errorhandler(400) # 传入要处理的错误代码 11 | def page_not_found(e): # 接受异常对象作为参数 12 | return render_template('400.html'), 400 # 返回模板和状态码 13 | 14 | 15 | @app.errorhandler(500) # 传入要处理的错误代码 16 | def page_not_found(e): # 接受异常对象作为参数 17 | return render_template('500.html'), 500 # 返回模板和状态码 18 | -------------------------------------------------------------------------------- /forms.py: -------------------------------------------------------------------------------- 1 | from flask_wtf import FlaskForm # FlaskForm 为表单基类 2 | from wtforms import StringField, PasswordField, SubmitField # 导入字符串字段,密码字段,提交字段 3 | from wtforms.validators import DataRequired, ValidationError 4 | 5 | 6 | class LoginForm(FlaskForm): 7 | account = StringField( 8 | # 标签 9 | label="账号", 10 | # 验证器 11 | validators=[ 12 | DataRequired('请输入用户名') 13 | ], 14 | description="账号", 15 | # 附加选项,会自动在前端判别 16 | render_kw={ 17 | "class": "form-control", 18 | "placeholder": "请输入账号!", 19 | "required": 'required' # 表示输入框不能为空,并有提示信息 20 | } 21 | ) 22 | 23 | pwd = PasswordField( 24 | # 标签 25 | label="密码", 26 | # 验证器 27 | validators=[ 28 | DataRequired('请输入密码') 29 | ], 30 | description="密码", 31 | 32 | # 附加选项(主要是前端样式),会自动在前端判别 33 | render_kw={ 34 | "class": "form-control", 35 | "placeholder": "请输入密码!", 36 | "required": 'required' # 表示输入框不能为空 37 | } 38 | ) 39 | 40 | submit = SubmitField( 41 | label="登录", 42 | render_kw={ 43 | "class": "btn btn-primary btn-block btn-flat", 44 | } 45 | ) 46 | -------------------------------------------------------------------------------- /initsql.py: -------------------------------------------------------------------------------- 1 | from app import db, app 2 | from views import user_datastore 3 | from werkzeug.security import generate_password_hash,check_password_hash 4 | 5 | # 权限初始化文件 6 | # 初始化超级管理员账号 7 | with app.app_context(): 8 | admin = user_datastore.create_user(username='admin', password='123456') 9 | # 生成普通用户角色和admin用户角色 10 | user_role = user_datastore.create_role(name='User', description='Generic user role') 11 | admin_role = user_datastore.create_role(name='Admin', description='Admin user role') 12 | Root_role = user_datastore.create_role(name='Root', description='Root user role') 13 | # 为admin添加Admin角色 14 | user_datastore.add_role_to_user(admin, Root_role) 15 | 16 | # 创建应用程序上下文 17 | 18 | # 提交数据库会话 19 | db.session.commit() 20 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | from app import db, app 2 | from flask_security import RoleMixin, UserMixin 3 | 4 | 5 | 6 | roles_users = db.Table('roles_users', 7 | db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), 8 | db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))) 9 | 10 | 11 | class Role(db.Model, RoleMixin): # 权限 12 | # __tablename__ = 'role' 13 | id = db.Column(db.Integer(), primary_key=True) 14 | name = db.Column(db.String(80), unique=True) 15 | description = db.Column(db.String(255)) 16 | 17 | 18 | class User(db.Model, UserMixin): # 用户 19 | # __tablename__ = 'user' 20 | id = db.Column(db.Integer, primary_key=True) 21 | username = db.Column(db.String(128), nullable=False) 22 | password = db.Column(db.String(128), nullable=False) 23 | integral = db.Column(db.Integer, nullable=False,default=0) 24 | uncivilized = db.Column(db.Integer, nullable=False,default=0) 25 | blacklist = db.Column(db.Integer, nullable=False,default=0) 26 | active = db.Column(db.Boolean()) 27 | roles = db.relationship('Role', secondary=roles_users, 28 | backref=db.backref('users', lazy='dynamic')) 29 | 30 | class Trash(db.Model): 31 | id = db.Column(db.Integer, primary_key=True) 32 | username = db.Column(db.String(128), nullable=False) 33 | community = db.Column(db.String(128), nullable=False) 34 | recyclable_trash = db.Column(db.Integer, nullable=False,default=0) 35 | non_recyclable_trash = db.Column(db.Integer, nullable=False,default=0) 36 | hazardous_trash = db.Column(db.Integer, nullable=False,default=0) 37 | kitchen_trash = db.Column(db.Integer, nullable=False,default=0) 38 | time = db.Column(db.DateTime, nullable=False) 39 | 40 | class Commodity(db.Model): 41 | id = db.Column(db.Integer, primary_key=True) 42 | commodity = db.Column(db.String(128), nullable=False) 43 | number = db.Column(db.Integer, nullable=False) 44 | integral = db.Column(db.Integer, nullable=False,default=1000) 45 | 46 | 47 | class Popular_science(db.Model): 48 | id = db.Column(db.Integer, primary_key=True) 49 | username = db.Column(db.String(128), nullable=False) 50 | classification = db.Column(db.String(128), nullable=False) 51 | name = db.Column(db.String(128), nullable=False) 52 | introduction = db.Column(db.Text) 53 | 54 | class Recycling(db.Model): 55 | id = db.Column(db.Integer, primary_key=True) 56 | username = db.Column(db.String(128), nullable=False) 57 | name = db.Column(db.String(128), nullable=False) 58 | classification = db.Column(db.String(128), nullable=False) 59 | image_path = db.Column(db.String(128), nullable=False) 60 | pickup_code = db.Column(db.Integer, nullable=False) 61 | recycling = db.Column(db.Integer, nullable=False,default=0) 62 | 63 | 64 | class Community(db.Model): 65 | id = db.Column(db.Integer, primary_key=True) 66 | community = db.Column(db.String(128), nullable=False) 67 | recyclable_trash = db.Column(db.Integer, nullable=False, default=0) 68 | non_recyclable_trash = db.Column(db.Integer, nullable=False, default=0) 69 | hazardous_trash = db.Column(db.Integer, nullable=False, default=0) 70 | kitchen_trash = db.Column(db.Integer, nullable=False, default=0) 71 | recyclable_trash_original = db.Column(db.Integer, nullable=False, default=0) 72 | non_recyclable_trash_original = db.Column(db.Integer, nullable=False, default=0) 73 | hazardous_trash_original = db.Column(db.Integer, nullable=False, default=0) 74 | kitchen_trash_original = db.Column(db.Integer, nullable=False, default=0) 75 | 76 | 77 | class Linshi(db.Model): 78 | id = db.Column(db.Integer, primary_key=True) 79 | username = db.Column(db.String(128), nullable=False) 80 | image_path = db.Column(db.String(128), nullable=False) 81 | cls = db.Column(db.Text, nullable=False) 82 | cls_number = db.Column(db.String(128), nullable=False) 83 | cls_4 = db.Column(db.String(128), nullable=False) 84 | cls_4_counts = db.Column(db.String(128), nullable=False) 85 | 86 | with app.app_context(): 87 | db.create_all() 88 | -------------------------------------------------------------------------------- /models/SimAM.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SimAM(torch.nn.Module): 6 | def __init__(self, e_lambda=1e-4): 7 | super(SimAM, self).__init__() 8 | 9 | self.activaton = nn.Sigmoid() 10 | self.e_lambda = e_lambda 11 | 12 | def __repr__(self): 13 | s = self.__class__.__name__ + '(' 14 | s += ('lambda=%f)' % self.e_lambda) 15 | return s 16 | 17 | @staticmethod 18 | def get_module_name(): 19 | return "simam" 20 | 21 | def forward(self, x): 22 | b, c, h, w = x.size() 23 | 24 | n = w * h - 1 25 | 26 | x_minus_mu_square = (x - x.mean(dim=[2, 3], keepdim=True)).pow(2) 27 | y = x_minus_mu_square / (4 * (x_minus_mu_square.sum(dim=[2, 3], keepdim=True) / n + self.e_lambda)) + 0.5 28 | 29 | return x * self.activaton(y) 30 | 31 | 32 | if __name__ == '__main__': 33 | input = torch.randn(3, 64, 7, 7) 34 | model = SimAM() 35 | outputs = model(input) 36 | print(outputs.shape) -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/models/__init__.py -------------------------------------------------------------------------------- /models/experimental.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Experimental modules.""" 3 | 4 | import math 5 | 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | 10 | from utils.downloads import attempt_download 11 | 12 | 13 | class Sum(nn.Module): 14 | """Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070.""" 15 | 16 | def __init__(self, n, weight=False): 17 | """Initializes a module to sum outputs of layers with number of inputs `n` and optional weighting, supporting 2+ 18 | inputs. 19 | """ 20 | super().__init__() 21 | self.weight = weight # apply weights boolean 22 | self.iter = range(n - 1) # iter object 23 | if weight: 24 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights 25 | 26 | def forward(self, x): 27 | """Processes input through a customizable weighted sum of `n` inputs, optionally applying learned weights.""" 28 | y = x[0] # no weight 29 | if self.weight: 30 | w = torch.sigmoid(self.w) * 2 31 | for i in self.iter: 32 | y = y + x[i + 1] * w[i] 33 | else: 34 | for i in self.iter: 35 | y = y + x[i + 1] 36 | return y 37 | 38 | 39 | class MixConv2d(nn.Module): 40 | """Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595.""" 41 | 42 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): 43 | """Initializes MixConv2d with mixed depth-wise convolutional layers, taking input and output channels (c1, c2), 44 | kernel sizes (k), stride (s), and channel distribution strategy (equal_ch). 45 | """ 46 | super().__init__() 47 | n = len(k) # number of convolutions 48 | if equal_ch: # equal c_ per group 49 | i = torch.linspace(0, n - 1e-6, c2).floor() # c2 indices 50 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels 51 | else: # equal weight.numel() per group 52 | b = [c2] + [0] * n 53 | a = np.eye(n + 1, n, k=-1) 54 | a -= np.roll(a, 1, axis=1) 55 | a *= np.array(k) ** 2 56 | a[0] = 1 57 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 58 | 59 | self.m = nn.ModuleList( 60 | [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)] 61 | ) 62 | self.bn = nn.BatchNorm2d(c2) 63 | self.act = nn.SiLU() 64 | 65 | def forward(self, x): 66 | """Performs forward pass by applying SiLU activation on batch-normalized concatenated convolutional layer 67 | outputs. 68 | """ 69 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 70 | 71 | 72 | class Ensemble(nn.ModuleList): 73 | """Ensemble of models.""" 74 | 75 | def __init__(self): 76 | """Initializes an ensemble of models to be used for aggregated predictions.""" 77 | super().__init__() 78 | 79 | def forward(self, x, augment=False, profile=False, visualize=False): 80 | """Performs forward pass aggregating outputs from an ensemble of models..""" 81 | y = [module(x, augment, profile, visualize)[0] for module in self] 82 | # y = torch.stack(y).max(0)[0] # max ensemble 83 | # y = torch.stack(y).mean(0) # mean ensemble 84 | y = torch.cat(y, 1) # nms ensemble 85 | return y, None # inference, train output 86 | 87 | 88 | def attempt_load(weights, device=None, inplace=True, fuse=True): 89 | """ 90 | Loads and fuses an ensemble or single YOLOv5 model from weights, handling device placement and model adjustments. 91 | 92 | Example inputs: weights=[a,b,c] or a single model weights=[a] or weights=a. 93 | """ 94 | from models.yolo import Detect, Model 95 | 96 | model = Ensemble() 97 | for w in weights if isinstance(weights, list) else [weights]: 98 | ckpt = torch.load(attempt_download(w), map_location="cpu") # load 99 | ckpt = (ckpt.get("ema") or ckpt["model"]).to(device).float() # FP32 model 100 | 101 | # Model compatibility updates 102 | if not hasattr(ckpt, "stride"): 103 | ckpt.stride = torch.tensor([32.0]) 104 | if hasattr(ckpt, "names") and isinstance(ckpt.names, (list, tuple)): 105 | ckpt.names = dict(enumerate(ckpt.names)) # convert to dict 106 | 107 | model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, "fuse") else ckpt.eval()) # model in eval mode 108 | 109 | # Module updates 110 | for m in model.modules(): 111 | t = type(m) 112 | if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): 113 | m.inplace = inplace 114 | if t is Detect and not isinstance(m.anchor_grid, list): 115 | delattr(m, "anchor_grid") 116 | setattr(m, "anchor_grid", [torch.zeros(1)] * m.nl) 117 | elif t is nn.Upsample and not hasattr(m, "recompute_scale_factor"): 118 | m.recompute_scale_factor = None # torch 1.11.0 compatibility 119 | 120 | # Return model 121 | if len(model) == 1: 122 | return model[-1] 123 | 124 | # Return detection ensemble 125 | print(f"Ensemble created with {weights}\n") 126 | for k in "names", "nc", "yaml": 127 | setattr(model, k, getattr(model[0], k)) 128 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride 129 | assert all(model[0].nc == m.nc for m in model), f"Models have different class counts: {[m.nc for m in model]}" 130 | return model 131 | -------------------------------------------------------------------------------- /models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Default anchors for COCO data 3 | 4 | # P5 ------------------------------------------------------------------------------------------------------------------- 5 | # P5-640: 6 | anchors_p5_640: 7 | - [10, 13, 16, 30, 33, 23] # P3/8 8 | - [30, 61, 62, 45, 59, 119] # P4/16 9 | - [116, 90, 156, 198, 373, 326] # P5/32 10 | 11 | # P6 ------------------------------------------------------------------------------------------------------------------- 12 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 13 | anchors_p6_640: 14 | - [9, 11, 21, 19, 17, 41] # P3/8 15 | - [43, 32, 39, 70, 86, 64] # P4/16 16 | - [65, 131, 134, 130, 120, 265] # P5/32 17 | - [282, 180, 247, 354, 512, 387] # P6/64 18 | 19 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 20 | anchors_p6_1280: 21 | - [19, 27, 44, 40, 38, 94] # P3/8 22 | - [96, 68, 86, 152, 180, 137] # P4/16 23 | - [140, 301, 303, 264, 238, 542] # P5/32 24 | - [436, 615, 739, 380, 925, 792] # P6/64 25 | 26 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 27 | anchors_p6_1920: 28 | - [28, 41, 67, 59, 57, 141] # P3/8 29 | - [144, 103, 129, 227, 270, 205] # P4/16 30 | - [209, 452, 455, 396, 358, 812] # P5/32 31 | - [653, 922, 1109, 570, 1387, 1187] # P6/64 32 | 33 | # P7 ------------------------------------------------------------------------------------------------------------------- 34 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 35 | anchors_p7_640: 36 | - [11, 11, 13, 30, 29, 20] # P3/8 37 | - [30, 46, 61, 38, 39, 92] # P4/16 38 | - [78, 80, 146, 66, 79, 163] # P5/32 39 | - [149, 150, 321, 143, 157, 303] # P6/64 40 | - [257, 402, 359, 290, 524, 372] # P7/128 41 | 42 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 43 | anchors_p7_1280: 44 | - [19, 22, 54, 36, 32, 77] # P3/8 45 | - [70, 83, 138, 71, 75, 173] # P4/16 46 | - [165, 159, 148, 334, 375, 151] # P5/32 47 | - [334, 317, 251, 626, 499, 474] # P6/64 48 | - [750, 326, 534, 814, 1079, 818] # P7/128 49 | 50 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 51 | anchors_p7_1920: 52 | - [29, 34, 81, 55, 47, 115] # P3/8 53 | - [105, 124, 207, 107, 113, 259] # P4/16 54 | - [247, 238, 222, 500, 563, 227] # P5/32 55 | - [501, 476, 376, 939, 749, 711] # P6/64 56 | - [1126, 489, 801, 1222, 1618, 1227] # P7/128 57 | -------------------------------------------------------------------------------- /models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [32, 3, 1]], # 0 17 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 18 | [-1, 1, Bottleneck, [64]], 19 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 20 | [-1, 2, Bottleneck, [128]], 21 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 22 | [-1, 8, Bottleneck, [256]], 23 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 24 | [-1, 8, Bottleneck, [512]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 26 | [-1, 4, Bottleneck, [1024]], # 10 27 | ] 28 | 29 | # YOLOv3-SPP head 30 | head: [ 31 | [-1, 1, Bottleneck, [1024, False]], 32 | [-1, 1, SPP, [512, [5, 9, 13]]], 33 | [-1, 1, Conv, [1024, 3, 1]], 34 | [-1, 1, Conv, [512, 1, 1]], 35 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 36 | 37 | [-2, 1, Conv, [256, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 39 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Bottleneck, [512, False]], 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 44 | 45 | [-2, 1, Conv, [128, 1, 1]], 46 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 47 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 48 | [-1, 1, Bottleneck, [256, False]], 49 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 50 | 51 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 52 | ] 53 | -------------------------------------------------------------------------------- /models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 14, 23, 27, 37, 58] # P4/16 9 | - [81, 82, 135, 169, 344, 319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [ 15 | [-1, 1, Conv, [16, 3, 1]], # 0 16 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 17 | [-1, 1, Conv, [32, 3, 1]], 18 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 19 | [-1, 1, Conv, [64, 3, 1]], 20 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 21 | [-1, 1, Conv, [128, 3, 1]], 22 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 23 | [-1, 1, Conv, [256, 3, 1]], 24 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 25 | [-1, 1, Conv, [512, 3, 1]], 26 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 27 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 28 | ] 29 | 30 | # YOLOv3-tiny head 31 | head: [ 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [256, 1, 1]], 34 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [128, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 40 | 41 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [32, 3, 1]], # 0 17 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 18 | [-1, 1, Bottleneck, [64]], 19 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 20 | [-1, 2, Bottleneck, [128]], 21 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 22 | [-1, 8, Bottleneck, [256]], 23 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 24 | [-1, 8, Bottleneck, [512]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 26 | [-1, 4, Bottleneck, [1024]], # 10 27 | ] 28 | 29 | # YOLOv3 head 30 | head: [ 31 | [-1, 1, Bottleneck, [1024, False]], 32 | [-1, 1, Conv, [512, 1, 1]], 33 | [-1, 1, Conv, [1024, 3, 1]], 34 | [-1, 1, Conv, [512, 1, 1]], 35 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 36 | 37 | [-2, 1, Conv, [256, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 39 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Bottleneck, [512, False]], 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 44 | 45 | [-2, 1, Conv, [128, 1, 1]], 46 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 47 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 48 | [-1, 1, Bottleneck, [256, False]], 49 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 50 | 51 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 52 | ] 53 | -------------------------------------------------------------------------------- /models/hub/yolov5-bifpn.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 BiFPN head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 FPN head 29 | head: [ 30 | [-1, 3, C3, [1024, False]], # 10 (P5/32-large) 31 | 32 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 33 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 34 | [-1, 1, Conv, [512, 1, 1]], 35 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium) 36 | 37 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 38 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 39 | [-1, 1, Conv, [256, 1, 1]], 40 | [-1, 3, C3, [256, False]], # 18 (P3/8-small) 41 | 42 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 43 | ] 44 | -------------------------------------------------------------------------------- /models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ 13 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 6, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 3, C3, [1024]], 22 | [-1, 1, SPPF, [1024, 5]], # 9 23 | ] 24 | 25 | # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs 26 | head: [ 27 | [-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [128, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 39 | [[-1, 2], 1, Concat, [1]], # cat backbone P2 40 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) 41 | 42 | [-1, 1, Conv, [128, 3, 2]], 43 | [[-1, 18], 1, Concat, [1]], # cat head P3 44 | [-1, 3, C3, [256, False]], # 24 (P3/8-small) 45 | 46 | [-1, 1, Conv, [256, 3, 2]], 47 | [[-1, 14], 1, Concat, [1]], # cat head P4 48 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium) 49 | 50 | [-1, 1, Conv, [512, 3, 2]], 51 | [[-1, 10], 1, Concat, [1]], # cat head P5 52 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large) 53 | 54 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) 55 | ] 56 | -------------------------------------------------------------------------------- /models/hub/yolov5-p34.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ 13 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 6, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 21 | [-1, 3, C3, [1024]], 22 | [-1, 1, SPPF, [1024, 5]], # 9 23 | ] 24 | 25 | # YOLOv5 v6.0 head with (P3, P4) outputs 26 | head: [ 27 | [-1, 1, Conv, [512, 1, 1]], 28 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 29 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 30 | [-1, 3, C3, [512, False]], # 13 31 | 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 34 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 35 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 36 | 37 | [-1, 1, Conv, [256, 3, 2]], 38 | [[-1, 14], 1, Concat, [1]], # cat head P4 39 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 40 | 41 | [[17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4) 42 | ] 43 | -------------------------------------------------------------------------------- /models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ 13 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 6, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 21 | [-1, 3, C3, [768]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 11 25 | ] 26 | 27 | # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs 28 | head: [ 29 | [-1, 1, Conv, [768, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 31 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 32 | [-1, 3, C3, [768, False]], # 15 33 | 34 | [-1, 1, Conv, [512, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 36 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 37 | [-1, 3, C3, [512, False]], # 19 38 | 39 | [-1, 1, Conv, [256, 1, 1]], 40 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 41 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 42 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 43 | 44 | [-1, 1, Conv, [256, 3, 2]], 45 | [[-1, 20], 1, Concat, [1]], # cat head P4 46 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 47 | 48 | [-1, 1, Conv, [512, 3, 2]], 49 | [[-1, 16], 1, Concat, [1]], # cat head P5 50 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 51 | 52 | [-1, 1, Conv, [768, 3, 2]], 53 | [[-1, 12], 1, Concat, [1]], # cat head P6 54 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 55 | 56 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 57 | ] 58 | -------------------------------------------------------------------------------- /models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ 13 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 14 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 15 | [-1, 3, C3, [128]], 16 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 17 | [-1, 6, C3, [256]], 18 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 19 | [-1, 9, C3, [512]], 20 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 21 | [-1, 3, C3, [768]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 25 | [-1, 3, C3, [1280]], 26 | [-1, 1, SPPF, [1280, 5]], # 13 27 | ] 28 | 29 | # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs 30 | head: [ 31 | [-1, 1, Conv, [1024, 1, 1]], 32 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 33 | [[-1, 10], 1, Concat, [1]], # cat backbone P6 34 | [-1, 3, C3, [1024, False]], # 17 35 | 36 | [-1, 1, Conv, [768, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 39 | [-1, 3, C3, [768, False]], # 21 40 | 41 | [-1, 1, Conv, [512, 1, 1]], 42 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 43 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 44 | [-1, 3, C3, [512, False]], # 25 45 | 46 | [-1, 1, Conv, [256, 1, 1]], 47 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 48 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 49 | [-1, 3, C3, [256, False]], # 29 (P3/8-small) 50 | 51 | [-1, 1, Conv, [256, 3, 2]], 52 | [[-1, 26], 1, Concat, [1]], # cat head P4 53 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium) 54 | 55 | [-1, 1, Conv, [512, 3, 2]], 56 | [[-1, 22], 1, Concat, [1]], # cat head P5 57 | [-1, 3, C3, [768, False]], # 35 (P5/32-large) 58 | 59 | [-1, 1, Conv, [768, 3, 2]], 60 | [[-1, 18], 1, Concat, [1]], # cat head P6 61 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) 62 | 63 | [-1, 1, Conv, [1024, 3, 2]], 64 | [[-1, 14], 1, Concat, [1]], # cat head P7 65 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) 66 | 67 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) 68 | ] 69 | -------------------------------------------------------------------------------- /models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 PANet head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/hub/yolov5l6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [19, 27, 44, 40, 38, 94] # P3/8 9 | - [96, 68, 86, 152, 180, 137] # P4/16 10 | - [140, 301, 303, 264, 238, 542] # P5/32 11 | - [436, 615, 739, 380, 925, 792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ 17 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 19 | [-1, 3, C3, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 21 | [-1, 6, C3, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 23 | [-1, 9, C3, [512]], 24 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 25 | [-1, 3, C3, [768]], 26 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 27 | [-1, 3, C3, [1024]], 28 | [-1, 1, SPPF, [1024, 5]], # 11 29 | ] 30 | 31 | # YOLOv5 v6.0 head 32 | head: [ 33 | [-1, 1, Conv, [768, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 36 | [-1, 3, C3, [768, False]], # 15 37 | 38 | [-1, 1, Conv, [512, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 40 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 41 | [-1, 3, C3, [512, False]], # 19 42 | 43 | [-1, 1, Conv, [256, 1, 1]], 44 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 45 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 46 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 47 | 48 | [-1, 1, Conv, [256, 3, 2]], 49 | [[-1, 20], 1, Concat, [1]], # cat head P4 50 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 51 | 52 | [-1, 1, Conv, [512, 3, 2]], 53 | [[-1, 16], 1, Concat, [1]], # cat head P5 54 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 55 | 56 | [-1, 1, Conv, [768, 3, 2]], 57 | [[-1, 12], 1, Concat, [1]], # cat head P6 58 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 59 | 60 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 61 | ] 62 | -------------------------------------------------------------------------------- /models/hub/yolov5m6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [19, 27, 44, 40, 38, 94] # P3/8 9 | - [96, 68, 86, 152, 180, 137] # P4/16 10 | - [140, 301, 303, 264, 238, 542] # P5/32 11 | - [436, 615, 739, 380, 925, 792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ 17 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 19 | [-1, 3, C3, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 21 | [-1, 6, C3, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 23 | [-1, 9, C3, [512]], 24 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 25 | [-1, 3, C3, [768]], 26 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 27 | [-1, 3, C3, [1024]], 28 | [-1, 1, SPPF, [1024, 5]], # 11 29 | ] 30 | 31 | # YOLOv5 v6.0 head 32 | head: [ 33 | [-1, 1, Conv, [768, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 36 | [-1, 3, C3, [768, False]], # 15 37 | 38 | [-1, 1, Conv, [512, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 40 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 41 | [-1, 3, C3, [512, False]], # 19 42 | 43 | [-1, 1, Conv, [256, 1, 1]], 44 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 45 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 46 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 47 | 48 | [-1, 1, Conv, [256, 3, 2]], 49 | [[-1, 20], 1, Concat, [1]], # cat head P4 50 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 51 | 52 | [-1, 1, Conv, [512, 3, 2]], 53 | [[-1, 16], 1, Concat, [1]], # cat head P5 54 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 55 | 56 | [-1, 1, Conv, [768, 3, 2]], 57 | [[-1, 12], 1, Concat, [1]], # cat head P6 58 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 59 | 60 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 61 | ] 62 | -------------------------------------------------------------------------------- /models/hub/yolov5n6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [19, 27, 44, 40, 38, 94] # P3/8 9 | - [96, 68, 86, 152, 180, 137] # P4/16 10 | - [140, 301, 303, 264, 238, 542] # P5/32 11 | - [436, 615, 739, 380, 925, 792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ 17 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 19 | [-1, 3, C3, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 21 | [-1, 6, C3, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 23 | [-1, 9, C3, [512]], 24 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 25 | [-1, 3, C3, [768]], 26 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 27 | [-1, 3, C3, [1024]], 28 | [-1, 1, SPPF, [1024, 5]], # 11 29 | ] 30 | 31 | # YOLOv5 v6.0 head 32 | head: [ 33 | [-1, 1, Conv, [768, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 36 | [-1, 3, C3, [768, False]], # 15 37 | 38 | [-1, 1, Conv, [512, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 40 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 41 | [-1, 3, C3, [512, False]], # 19 42 | 43 | [-1, 1, Conv, [256, 1, 1]], 44 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 45 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 46 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 47 | 48 | [-1, 1, Conv, [256, 3, 2]], 49 | [[-1, 20], 1, Concat, [1]], # cat head P4 50 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 51 | 52 | [-1, 1, Conv, [512, 3, 2]], 53 | [[-1, 16], 1, Concat, [1]], # cat head P5 54 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 55 | 56 | [-1, 1, Conv, [768, 3, 2]], 57 | [[-1, 12], 1, Concat, [1]], # cat head P6 58 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 59 | 60 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 61 | ] 62 | -------------------------------------------------------------------------------- /models/hub/yolov5s-LeakyReLU.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model 6 | depth_multiple: 0.33 # model depth multiple 7 | width_multiple: 0.50 # layer channel multiple 8 | anchors: 9 | - [10, 13, 16, 30, 33, 23] # P3/8 10 | - [30, 61, 62, 45, 59, 119] # P4/16 11 | - [116, 90, 156, 198, 373, 326] # P5/32 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ 17 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 19 | [-1, 3, C3, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 21 | [-1, 6, C3, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 23 | [-1, 9, C3, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 25 | [-1, 3, C3, [1024]], 26 | [-1, 1, SPPF, [1024, 5]], # 9 27 | ] 28 | 29 | # YOLOv5 v6.0 head 30 | head: [ 31 | [-1, 1, Conv, [512, 1, 1]], 32 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 33 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 34 | [-1, 3, C3, [512, False]], # 13 35 | 36 | [-1, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 38 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 39 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 40 | 41 | [-1, 1, Conv, [256, 3, 2]], 42 | [[-1, 14], 1, Concat, [1]], # cat head P4 43 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 44 | 45 | [-1, 1, Conv, [512, 3, 2]], 46 | [[-1, 10], 1, Concat, [1]], # cat head P5 47 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 48 | 49 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 50 | ] 51 | -------------------------------------------------------------------------------- /models/hub/yolov5s-ghost.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3Ghost, [128]], 19 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3Ghost, [256]], 21 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3Ghost, [512]], 23 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3Ghost, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, GhostConv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3Ghost, [512, False]], # 13 34 | 35 | [-1, 1, GhostConv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, GhostConv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, GhostConv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/hub/yolov5s-transformer.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/hub/yolov5s6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [19, 27, 44, 40, 38, 94] # P3/8 9 | - [96, 68, 86, 152, 180, 137] # P4/16 10 | - [140, 301, 303, 264, 238, 542] # P5/32 11 | - [436, 615, 739, 380, 925, 792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ 17 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 19 | [-1, 3, C3, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 21 | [-1, 6, C3, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 23 | [-1, 9, C3, [512]], 24 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 25 | [-1, 3, C3, [768]], 26 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 27 | [-1, 3, C3, [1024]], 28 | [-1, 1, SPPF, [1024, 5]], # 11 29 | ] 30 | 31 | # YOLOv5 v6.0 head 32 | head: [ 33 | [-1, 1, Conv, [768, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 36 | [-1, 3, C3, [768, False]], # 15 37 | 38 | [-1, 1, Conv, [512, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 40 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 41 | [-1, 3, C3, [512, False]], # 19 42 | 43 | [-1, 1, Conv, [256, 1, 1]], 44 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 45 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 46 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 47 | 48 | [-1, 1, Conv, [256, 3, 2]], 49 | [[-1, 20], 1, Concat, [1]], # cat head P4 50 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 51 | 52 | [-1, 1, Conv, [512, 3, 2]], 53 | [[-1, 16], 1, Concat, [1]], # cat head P5 54 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 55 | 56 | [-1, 1, Conv, [768, 3, 2]], 57 | [[-1, 12], 1, Concat, [1]], # cat head P6 58 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 59 | 60 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 61 | ] 62 | -------------------------------------------------------------------------------- /models/hub/yolov5x6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [19, 27, 44, 40, 38, 94] # P3/8 9 | - [96, 68, 86, 152, 180, 137] # P4/16 10 | - [140, 301, 303, 264, 238, 542] # P5/32 11 | - [436, 615, 739, 380, 925, 792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [ 17 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 18 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 19 | [-1, 3, C3, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 21 | [-1, 6, C3, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 23 | [-1, 9, C3, [512]], 24 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 25 | [-1, 3, C3, [768]], 26 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 27 | [-1, 3, C3, [1024]], 28 | [-1, 1, SPPF, [1024, 5]], # 11 29 | ] 30 | 31 | # YOLOv5 v6.0 head 32 | head: [ 33 | [-1, 1, Conv, [768, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 35 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 36 | [-1, 3, C3, [768, False]], # 15 37 | 38 | [-1, 1, Conv, [512, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 40 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 41 | [-1, 3, C3, [512, False]], # 19 42 | 43 | [-1, 1, Conv, [256, 1, 1]], 44 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 45 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 46 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 47 | 48 | [-1, 1, Conv, [256, 3, 2]], 49 | [[-1, 20], 1, Concat, [1]], # cat head P4 50 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 51 | 52 | [-1, 1, Conv, [512, 3, 2]], 53 | [[-1, 16], 1, Concat, [1]], # cat head P5 54 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 55 | 56 | [-1, 1, Conv, [768, 3, 2]], 57 | [[-1, 12], 1, Concat, [1]], # cat head P6 58 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 59 | 60 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 61 | ] 62 | -------------------------------------------------------------------------------- /models/segment/yolov5l-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/segment/yolov5m-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/segment/yolov5n-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/segment/yolov5s-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.5 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/segment/yolov5x-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 44 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1,SimAM, [1e-4]], 34 | [-1, 3, C3, [512, False]], # 13 35 | 36 | [-1, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 38 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 39 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 40 | 41 | [-1, 1, Conv, [256, 3, 2]], 42 | [[-1, 15], 1, Concat, [1]], # cat head P4 43 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 44 | 45 | [-1, 1, Conv, [512, 3, 2]], 46 | [[-1, 10], 1, Concat, [1]], # cat head P5 47 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 48 | 49 | [[18, 21, 24], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 17.20.23 50 | ] 51 | -------------------------------------------------------------------------------- /models/yolov5n.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 44 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1,SimAM, [1e-4]], 34 | [-1, 3, C3, [512, False]], # 13 35 | 36 | [-1, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 38 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 39 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 40 | 41 | [-1, 1, Conv, [256, 3, 2]], 42 | [[-1, 15], 1, Concat, [1]], # cat head P4 14 43 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 44 | 45 | [-1, 1, Conv, [512, 3, 2]], 46 | [[-1, 10], 1, Concat, [1]], # cat head P5 47 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 48 | 49 | [[18, 21, 24], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 17.20.23 50 | ] 51 | -------------------------------------------------------------------------------- /models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [10, 13, 16, 30, 33, 23] # P3/8 9 | - [30, 61, 62, 45, 59, 119] # P4/16 10 | - [116, 90, 156, 198, 373, 326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [ 16 | [-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: [ 30 | [-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, "nearest"]], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /shibie.py: -------------------------------------------------------------------------------- 1 | 2 | from detect import run 3 | 4 | classes = ['书籍纸张', '金属厨具', '砧板', '污损塑料', '筷子', 5 | '陶瓷器皿', '插头电线', '洗护用品', '塑料玩具', '鞋', 6 | '果皮果肉', '玻璃器皿', '毛绒玩具', '污损用纸', '塑料器皿', 7 | '纸盒纸箱', '花盆', '包', '金属器皿', '干电池', '调料瓶', 8 | '菜帮菜叶', '锅', '食用油桶', '饮料瓶', '充电宝', '易拉罐', 9 | '牙签', '剩饭剩菜', '大骨头', '鱼骨', '垃圾桶', '酒瓶', 10 | '金属食品罐', '一次性快餐盒', '烟蒂', '旧衣服', '塑料衣架', 11 | '枕头', '过期药物', '茶叶渣', '软膏', '蛋壳', '快递纸袋'] 12 | 13 | classes_4 = {'可回收': ['书籍纸张', '金属厨具', '易拉罐', '饮料瓶', '食用油桶', 14 | '快递纸袋', '金属食品罐', '酒瓶', '调料瓶', '包', '塑料衣架', 15 | '旧衣服', '锅', '金属器皿', '纸盒纸箱', '毛绒玩具', '玻璃器皿', 16 | '塑料器皿', '插头电线', '塑料玩具', '鞋', '垃圾桶', '枕头'], 17 | '不可回收': ['污损塑料', '一次性快餐盒', '花盆', '牙签', '污损用纸', '筷子', 18 | '陶瓷器皿', '洗护用品', '软膏'], 19 | '有害垃圾': ['烟蒂', '过期药物', '干电池', '充电宝'], 20 | '厨余垃圾': ['大骨头', '鱼骨', '蛋壳', '菜帮菜叶', '剩饭剩菜', '茶叶渣', '果皮果肉', '砧板']} 21 | 22 | def get_detection(image_path): 23 | shibie_path,cls,cls_num = run(weights="static/weight/best.pt", 24 | source='static/image/up/'+image_path, 25 | imgsz=(640, 640), 26 | project='static/image/out', 27 | name=image_path.split('.')[0] 28 | ) 29 | 30 | # 根据索引获取对应的类别名 31 | cls_names = [classes[i] for i in cls] 32 | 33 | # 根据类别名获取对应的类别 34 | cls_4 = [] 35 | for cls_name in cls_names: 36 | for key, value in classes_4.items(): 37 | if cls_name in value: 38 | cls_4.append(key) 39 | break 40 | 41 | # 计算四个类别的数量 42 | cls_4_counts = {} 43 | for category in cls_4: 44 | cls_4_counts[category] = sum(cls_num[i] for i, c in enumerate(cls_4) if c == category) 45 | 46 | return '/'+str(shibie_path)+'/'+image_path, cls_names, cls_num, cls_4, cls_4_counts 47 | 48 | # get_detection('test/1.jpg') -------------------------------------------------------------------------------- /static/detection_image.css: -------------------------------------------------------------------------------- 1 | /* layout */ 2 | * { 3 | margin: 0; 4 | padding: 0; 5 | } 6 | 7 | header { 8 | position: absolute; 9 | width: 100%; 10 | height: 12%; 11 | text-align: center; 12 | background-color: #2980b9; 13 | } 14 | 15 | .main { 16 | position: absolute; 17 | width: 100%; 18 | height: 88%; 19 | background: #3498db; 20 | bottom: 0; 21 | } 22 | 23 | .main:after { 24 | content: ""; 25 | display: block; 26 | clear: both; 27 | } 28 | 29 | .main .manipulation { 30 | height: 100%; 31 | width: 20%; 32 | float: left; 33 | background: #2980b9; 34 | } 35 | 36 | .main .display { 37 | height: 100%; 38 | width: 80%; 39 | float: left; 40 | background: #3498db; 41 | } 42 | 43 | #font_piggy { 44 | color: #ecf0f1; 45 | font-family: "宋体", serif; 46 | font-size: larger; 47 | } 48 | 49 | 50 | /* header */ 51 | header h1 { 52 | color: #ecf0f1; 53 | font-weight: bold; 54 | font-family: "宋体", serif; 55 | line-height: 100px; 56 | } 57 | 58 | 59 | /* manipulation */ 60 | .manipulation .model_select { 61 | margin: 35px 30px; 62 | } 63 | 64 | .manipulation .image_load { 65 | width: 0.1px; 66 | height: 0.1px; 67 | opacity: 0; 68 | overflow: hidden; 69 | position: absolute; 70 | z-index: -1; 71 | } 72 | 73 | 74 | 75 | 76 | 77 | 78 | /* display */ 79 | .display .return { 80 | margin: 30px 20px; 81 | } 82 | 83 | .display .return:after { 84 | content: ""; 85 | display: table; 86 | clear: both; 87 | } 88 | 89 | .display .pictures_exhibit { 90 | padding: 10px 15px; 91 | height: 420px; 92 | width: 100%; 93 | background: #ecf0f1; 94 | } 95 | 96 | .display .pictures_exhibit:after { 97 | content: ""; 98 | display: table; 99 | clear: both; 100 | } 101 | 102 | .display .pictures_exhibit .picture_l { 103 | float: left; 104 | height: 100%; 105 | width: 550px; 106 | background: #bdc3c7; 107 | } 108 | 109 | .display .pictures_exhibit .picture_r { 110 | float: right; 111 | height: 100%; 112 | width: 550px; 113 | background: #bdc3c7; 114 | } 115 | 116 | .display .generation { 117 | margin: 30px 30px; 118 | } 119 | 120 | .display .generation:after { 121 | content: ""; 122 | display: table; 123 | clear: both; 124 | } 125 | 126 | -------------------------------------------------------------------------------- /static/image/loading01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/static/image/loading01.png -------------------------------------------------------------------------------- /static/js/Trash_disposal.js: -------------------------------------------------------------------------------- 1 | // Load image and preview 2 | $(function (){ 3 | "use strict"; 4 | $("#load_img01").change(function(){ 5 | console.log(this.files[0]); 6 | let objUrl = getObjectURL(this.files[0]) ; // 获取文件信息 7 | console.log("objUrl = "+objUrl); 8 | if (objUrl) { 9 | $("#img_loaded01").attr("src", objUrl); 10 | } 11 | }); 12 | }); 13 | 14 | 15 | function getObjectURL(file) { 16 | "use strict"; 17 | let url = null; 18 | if(Window.createObjectURL !== undefined) { 19 | url = window.createObjectURL(file) ; 20 | }else if (window.URL !== undefined) { // mozilla(firefox) 21 | url = window.URL.createObjectURL(file) ; 22 | }else if (window.webkitURL !== undefined) { // webkit or chrome 23 | url = window.webkitURL.createObjectURL(file) ; 24 | } 25 | return url ; 26 | } 27 | 28 | 29 | // Detect image 30 | function detect() { 31 | "use strict"; 32 | let fileObj = $("#load_img01")[0].files[0]; 33 | console.log(fileObj); 34 | let form = new FormData(); 35 | 36 | form.append("file", fileObj); 37 | 38 | $.ajax({ 39 | type: 'POST', 40 | url: "imageDetect", 41 | data: form, 42 | async: false, 43 | processData: false, 44 | contentType: false, 45 | success: function (res){ 46 | alert("检测完成!"); 47 | console.log(res); 48 | // img_src = "data:image/png;base64,"+res; 49 | // console.log(img_src); 50 | // $("#img_detected").attr("src",res); 51 | window.location.href = "disposal/"+res; 52 | }, 53 | error: function (){ 54 | alert("检测失败!"); 55 | console.log("后台处理错误"); 56 | } 57 | }); 58 | 59 | } -------------------------------------------------------------------------------- /static/js/load_file.js: -------------------------------------------------------------------------------- 1 | // Load image and preview 2 | $(function (){ 3 | "use strict"; 4 | $("#load_img01").change(function(){ 5 | console.log(this.files[0]); 6 | let objUrl = getObjectURL(this.files[0]) ; // 获取文件信息 7 | console.log("objUrl = "+objUrl); 8 | if (objUrl) { 9 | $("#img_loaded01").attr("src", objUrl); 10 | } 11 | }); 12 | }); 13 | 14 | 15 | function getObjectURL(file) { 16 | "use strict"; 17 | let url = null; 18 | if(Window.createObjectURL !== undefined) { 19 | url = window.createObjectURL(file) ; 20 | }else if (window.URL !== undefined) { // mozilla(firefox) 21 | url = window.URL.createObjectURL(file) ; 22 | }else if (window.webkitURL !== undefined) { // webkit or chrome 23 | url = window.webkitURL.createObjectURL(file) ; 24 | } 25 | return url ; 26 | } 27 | 28 | 29 | // Detect image 30 | function detect() { 31 | "use strict"; 32 | let fileObj = $("#load_img01")[0].files[0]; 33 | console.log(fileObj); 34 | let form = new FormData(); 35 | 36 | form.append("file", fileObj); 37 | 38 | $.ajax({ 39 | type: 'POST', 40 | url: "imageDetect", 41 | data: form, 42 | async: false, 43 | processData: false, 44 | contentType: false, 45 | success: function (res){ 46 | alert("检测完成!"); 47 | console.log(res); 48 | // img_src = "data:image/png;base64,"+res; 49 | // console.log(img_src); 50 | // $("#img_detected").attr("src",res); 51 | window.location.href = "detect_result/"+res; 52 | }, 53 | error: function (){ 54 | alert("检测失败!"); 55 | console.log("后台处理错误"); 56 | } 57 | }); 58 | 59 | } -------------------------------------------------------------------------------- /static/js/recycling_up.js: -------------------------------------------------------------------------------- 1 | // Load image and preview 2 | $(function (){ 3 | "use strict"; 4 | $("#up_recycling").change(function(){ 5 | console.log(this.files[0]); 6 | let objUrl = getObjectURL(this.files[0]) ; // 获取文件信息 7 | console.log("objUrl = "+objUrl); 8 | if (objUrl) { 9 | $("#img_loaded01").attr("src", objUrl); 10 | } 11 | }); 12 | }); 13 | 14 | 15 | function getObjectURL(file) { 16 | "use strict"; 17 | let url = null; 18 | if(Window.createObjectURL !== undefined) { 19 | url = window.createObjectURL(file) ; 20 | }else if (window.URL !== undefined) { // mozilla(firefox) 21 | url = window.URL.createObjectURL(file) ; 22 | }else if (window.webkitURL !== undefined) { // webkit or chrome 23 | url = window.webkitURL.createObjectURL(file) ; 24 | } 25 | return url ; 26 | } 27 | 28 | 29 | // Detect image 30 | function recycling_up() { 31 | "use strict"; 32 | let fileObj = $("#up_recycling")[0].files[0]; 33 | console.log("asdfasdf",fileObj); 34 | let form = new FormData(); 35 | let img_src; 36 | // 获取 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | {% endblock %} -------------------------------------------------------------------------------- /templates/Trash_disposal.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 用户垃圾投放记录 4 | {% endblock %} 5 | {% block write %} 6 |
  • 7 | 8 | 投放垃圾 9 | 10 |
  • 11 | {% endblock %} 12 | {% block content %} 13 | {# 使用 length 过滤器获取 staffs 变量的长度 #} 14 | 15 |
    16 | {% if trashs == [] %} 17 |

    暂无用户垃圾投放记录

    18 | {% else %} 19 |

    用户垃圾投放记录

    20 | {% endif %} 21 |
    22 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | {% for trash in trashs %} 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | {% endfor %} 49 | 50 |
    用户名社区可回收数量不可回收数量有害垃圾数量厨余垃圾数量时间
    {{ trash.username }}{{ trash.community }}{{ trash.recyclable_trash }}{{ trash.non_recyclable_trash }}{{ trash.hazardous_trash }}{{ trash.kitchen_trash }}{{ trash.time }}
    51 |
    52 |
      53 | {% if paginate.has_prev %} 54 |
    • 上一页
    • 55 | {% endif %} 56 | {% for i in paginate.iter_pages() %} 57 | {% if i == None %} 58 |
    • ...
    • 59 | {% else %} 60 |
    • {{ i }}
    • 61 | {% endif %} 62 | {% endfor %} 63 | {% if paginate.has_next %} 64 |
    • 下一页
    • 65 | {% endif %} 66 |
    67 |
    68 |
    69 |
    70 | {% endblock %} -------------------------------------------------------------------------------- /templates/blacklist.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 用户不文明管理 4 | {% endblock %} 5 | {% block content %} 6 | {# 使用 length 过滤器获取 staffs 变量的长度 #} 7 | 8 | {% if current_user.has_role('Root') %} 9 |
    10 |

    用户不文明管理

    11 | 12 |
    13 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | {% for user in users %} 26 | 27 | 28 | 29 | 36 | 47 | 48 | {% endfor %} 49 | 50 |
    用户名不文明次数状态操作
    {{ user.username }}{{ user.uncivilized }} 30 | {% if user.blacklist == 0 %} 31 | 正常 32 | {% elif user.blacklist == 1 %} 33 | 拉黑 34 | {% endif %} 35 | 37 | {% if user.username != 'admin' %} 38 | {% if user.blacklist == 0 %} 39 | 41 | {% elif user.blacklist == 1 %} 42 | 恢复 44 | {% endif %} 45 | {% endif %} 46 |
    51 |
    52 |
    53 | {% endif %} 54 | {% endblock %} -------------------------------------------------------------------------------- /templates/commodity_add.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 添加商品 4 | {% endblock %} 5 | {% block content %} 6 | 7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |
    14 |

    添加商品

    15 | 16 |
    17 |
    18 | 19 |
    20 | 22 |
    23 |
    24 |
    25 |
    26 |
    27 | 28 |
    29 | 31 |
    32 |
    33 |
    34 |
    35 |
    36 | 37 |
    38 | 40 |
    41 |
    42 |
    43 | 44 | 45 | 46 |
    47 |
    48 |
    49 | 50 |
    51 | 52 |
    53 |
    54 |
    55 |
    56 |
    57 |
    58 |
    59 |
    60 |
    61 | {% endblock %} -------------------------------------------------------------------------------- /templates/commodity_change.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 修改商品信息 4 | {% endblock %} 5 | {% block content %} 6 | 7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |
    14 |

    修改商品信息

    15 | 16 |
    17 |
    18 | 19 |
    20 | {{commodity.commodity}} 21 |
    22 |
    23 |
    24 |
    25 |
    26 | 27 |
    28 | 30 |
    31 |
    32 |
    33 |
    34 |
    35 | 36 |
    37 | 39 |
    40 |
    41 |
    42 | 43 | 44 | 45 |
    46 |
    47 |
    48 | 49 |
    50 | 51 |
    52 |
    53 |
    54 |
    55 |
    56 |
    57 |
    58 |
    59 |
    60 | {% endblock %} -------------------------------------------------------------------------------- /templates/community.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 社区可投放剩余量 4 | {% endblock %} 5 | {% block write %} 6 | {% if current_user.has_role('Admin') or current_user.has_role('Root') %} 7 |
  • 8 | 9 | 添加社区 10 | 11 |
  • 12 | {% endif %} 13 | {% endblock %} 14 | {% block content %} 15 | {# 使用 length 过滤器获取 staffs 变量的长度 #} 16 | 17 | {% if current_user.has_role('Admin') or current_user.has_role('Root') %} 18 |
    19 |

    社区可投放剩余量

    20 |
    21 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | {% for community in communities %} 36 | 37 | 38 | 39 | 40 | 41 | 42 | 50 | 51 | 52 | {% endfor %} 53 | 54 |
    社区可回收剩余数量不可回收剩余数量有害垃圾剩余数量厨余垃圾剩余数量操作
    {{ community.community }}{{ community.recyclable_trash }}/{{ community.recyclable_trash_original }}{{ community.non_recyclable_trash }}/{{ community.non_recyclable_trash_original }}{{ community.hazardous_trash }}/{{ community.hazardous_trash_original }}{{ community.kitchen_trash }}/{{ community.kitchen_trash_original }} 43 | 44 |
    45 | 修改 47 | 48 |
    49 |
    55 |
    56 |
    57 | {% elif current_user.has_role('User') %} 58 |
    59 |

    社区可投放剩余量

    60 |
    61 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | {% for community in communities %} 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | {% endfor %} 83 | 84 |
    社区可回收剩余数量不可回收剩余数量有害垃圾剩余数量厨余垃圾剩余数量
    {{ community.community }}{{ community.recyclable_trash }}/{{ community.recyclable_trash_original }}{{ community.non_recyclable_trash }}/{{ community.non_recyclable_trash_original }}{{ community.hazardous_trash }}/{{ community.hazardous_trash_original }}{{ community.kitchen_trash }}/{{ community.kitchen_trash_original }}
    85 |
    86 |
    87 | {% endif %} 88 | {% endblock %} -------------------------------------------------------------------------------- /templates/community_add.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 添加社区垃圾容量 4 | {% endblock %} 5 | {% block content %} 6 | 7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |
    14 |

    添加社区垃圾容量

    15 | 16 |
    17 |
    18 | 19 |
    20 | 22 |
    23 |
    24 |
    25 |
    26 |
    27 | 28 |
    29 | 31 |
    32 |
    33 |
    34 |
    35 |
    36 | 37 |
    38 | 40 |
    41 |
    42 |
    43 |
    44 |
    45 | 46 |
    47 | 49 |
    50 |
    51 |
    52 |
    53 |
    54 | 55 |
    56 | 58 |
    59 |
    60 |
    61 | 62 | 63 |
    64 |
    65 |
    66 | 67 |
    68 | 69 |
    70 |
    71 |
    72 |
    73 |
    74 |
    75 |
    76 |
    77 |
    78 | {% endblock %} -------------------------------------------------------------------------------- /templates/community_change.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 修改{{community.community}}垃圾容量 4 | {% endblock %} 5 | {% block content %} 6 | 7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |
    14 |

    修改{{community.community}}垃圾容量

    15 | 16 |
    17 |
    18 | 19 |
    20 | {{community.community}} 21 |
    22 |
    23 |
    24 |
    25 |
    26 | 27 |
    28 | 30 |
    31 |
    32 |
    33 |
    34 |
    35 | 36 |
    37 | 39 |
    40 |
    41 |
    42 |
    43 |
    44 | 45 |
    46 | 48 |
    49 |
    50 |
    51 |
    52 |
    53 | 54 |
    55 | 57 |
    58 |
    59 |
    60 | 61 | 62 |
    63 |
    64 |
    65 | 66 |
    67 | 68 |
    69 |
    70 |
    71 |
    72 |
    73 |
    74 |
    75 |
    76 |
    77 | {% endblock %} -------------------------------------------------------------------------------- /templates/detect_result.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 识别结果 4 | {% endblock %} 5 | {% block content %} 6 |
    7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |

    识别结果

    14 | 15 |
    16 |
    17 | 18 |
    19 | 20 | 21 |
    22 |
    23 |
    24 | {% for i in range(cls|length) %} 25 |
    26 |
    27 | 28 |
    29 | {{ cls[i] }} 30 |
    31 |
    32 |
    33 |
    34 |
    35 | 36 |
    37 | {{ cls_4[i] }} 38 |
    39 |
    40 |
    41 |
    42 |
    43 | 44 |
    45 | {{ cls_num[i] }} 46 |
    47 |
    48 |
    49 | {% endfor %} 50 | {% for key, value in cls_4_counts.items() %} 51 |
    52 |
    53 | 54 |
    55 | {{ value }} 56 |
    57 |
    58 |
    59 | {% endfor %} 60 |
    61 |
    62 |
    63 |
    64 |
    65 |
    66 |
    67 | {% endblock %} -------------------------------------------------------------------------------- /templates/disposal.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 垃圾投放 4 | {% endblock %} 5 | {% block content %} 6 |
    7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |

    垃圾投放

    14 | 15 |
    16 |
    17 | 18 |
    19 | 20 | 21 |
    22 |
    23 |
    24 | 25 | 26 |
    27 |
    28 | 29 |
    30 | 35 |
    36 |
    37 |
    38 | {% for key, value in cls_4_counts.items() %} 39 |
    40 |
    41 | 42 |
    43 | {{ value }} 44 |
    45 |
    46 |
    47 | {% endfor %} 48 |
    49 | {% if communities|length >= 1 %} 50 |
    51 |
    52 | 53 |
    54 | 55 |
    56 |
    57 |
    58 | {% else %} 59 |
    60 |
    61 | 62 |
    63 | 暂无社区请联系管理员添加社区 64 |
    65 |
    66 |
    67 | {% endif %} 68 |
    69 | 70 |
    71 |
    72 |
    73 |
    74 |
    75 | {% endblock %} -------------------------------------------------------------------------------- /templates/disposal_trash.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 垃圾投放 4 | {% endblock %} 5 | {% block write %} 6 | {% endblock %} 7 | 8 | {% block content %} 9 |
    10 |
    11 | 等待上传 12 |
    13 |
    14 |
    15 | 16 | 17 | 18 |
    19 |
    20 |
    21 | 22 | 23 | 24 | {% endblock %} -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 垃圾管理系统 4 | {% endblock %} 5 | {% block write %} 6 | {% endblock %} 7 | 8 | {% block content %} 9 | {% if current_user.is_authenticated %} 10 |
    11 |
    12 |

    13 | 欢迎使用垃圾管理系统 14 |

    15 | 16 |
    17 |
    18 | 19 | 20 | {% else %} 21 |
    22 |
    23 |

    24 | 欢迎使用垃圾管理系统 25 |

    26 |

    27 | 请先登录或注册 28 |

    29 |
    30 |
    31 | 32 | {% endif %} 33 | {% endblock %} -------------------------------------------------------------------------------- /templates/logging.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block body %}{% endblock %} 3 | {% block login %} 4 |
    5 |
    6 | 7 |
    8 |
    9 |
    10 |
    11 | 12 |
    13 | 36 |
    37 |
    38 |
    39 |
    40 |
    41 |
    42 |
    43 | {% endblock %} -------------------------------------------------------------------------------- /templates/login.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block body %}{% endblock %} 3 | {% block login %} 4 |
    5 |
    6 | 7 |
    8 |
    9 |
    10 |
    11 | 12 |
    13 | 35 |
    36 |
    37 |
    38 |
    39 |
    40 |
    41 |
    42 | {% endblock %} -------------------------------------------------------------------------------- /templates/popular_science.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 垃圾科普 4 | {% endblock %} 5 | {% block write %} 6 | {% if current_user.has_role('Root') or current_user.has_role('Admin') %} 7 |
  • 8 | 9 | 填写科普 10 | 11 |
  • 12 | {% endif %} 13 | {% endblock %} 14 | {% block content %} 15 | {# 使用 length 过滤器获取 staffs 变量的长度 #} 16 | {% if current_user.has_role('Root') or current_user.has_role('Admin') %} 17 |
    18 | {% if popular_sciences == [] %} 19 |

    暂无科普

    20 | {% else %} 21 |

    垃圾科普

    22 | {% endif %} 23 |
    24 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | {% for popular_science in popular_sciences %} 38 | 39 | 40 | 41 | 42 | 52 | 53 | 54 | 55 | {% endfor %} 56 | 57 |
    垃圾名称垃圾类别垃圾科普操作
    {{ popular_science.name }}{{ popular_science.classification }}{{ popular_science.introduction }} 43 | 44 |
    45 | 47 | 修改 49 | 50 |
    51 |
    58 |
    59 |
    60 | {% elif current_user.has_role('User') %} 61 |
    62 | {% if kepus == [] %} 63 |

    暂无科普

    64 | {% else %} 65 |

    垃圾科普

    66 | {% endif %} 67 |
    68 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | {% for popular_science in popular_sciences %} 81 | 82 | 83 | 84 | 85 | 89 | 90 | {% endfor %} 91 | 92 |
    垃圾名称垃圾类别垃圾科普操作
    {{ popular_science.name }}{{ popular_science.classification }}{{ popular_science.introduction }} 86 | 88 |
    93 |
    94 |
    95 | {% endif %} 96 | {% endblock %} -------------------------------------------------------------------------------- /templates/popular_science_add.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 添加垃圾科普信息 4 | {% endblock %} 5 | {% block content %} 6 |
    7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |

    添加垃圾科普信息

    14 |
    15 |
    16 | 17 |
    18 | 20 |
    21 |
    22 |
    23 |
    24 |
    25 | 26 |
    27 | 33 |
    34 |
    35 |
    36 | 37 |
    38 |
    39 | 40 |
    41 | 42 | 43 |
    44 |
    45 |
    46 |
    47 |
    48 |
    49 | 50 |
    51 | 52 |
    53 |
    54 |
    55 |
    56 |
    57 |
    58 |
    59 |
    60 |
    61 | {% endblock %} -------------------------------------------------------------------------------- /templates/popular_science_change.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 修改垃圾科普信息 4 | {% endblock %} 5 | {% block content %} 6 |
    7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |

    修改垃圾科普信息

    14 |
    15 |
    16 | 17 |
    18 | 20 |
    21 |
    22 |
    23 |
    24 |
    25 | 26 |
    27 | 42 |
    43 |
    44 |
    45 | 46 |
    47 |
    48 | 49 |
    50 | 51 | 52 |
    53 |
    54 |
    55 |
    56 |
    57 |
    58 | 59 |
    60 | 61 |
    62 |
    63 |
    64 |
    65 |
    66 |
    67 |
    68 |
    69 |
    70 | {% endblock %} -------------------------------------------------------------------------------- /templates/popular_science_instance.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | {{ popular_science.name }} 的科普介绍 4 | {% endblock %} 5 | {% block content %} 6 |
    7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |

    {{ popular_science.name }} 的科普介绍

    14 | 15 |
    16 |
    17 | 18 |
    19 | 20 | {{ popular_science.name }} 21 |
    22 |
    23 |
    24 |
    25 |
    26 | 27 |
    28 | 29 | {{ popular_science.classification }} 30 |
    31 |
    32 |
    33 |
    34 |
    35 | 36 |
    37 | {{ popular_science.introduction }} 38 |
    39 |
    40 |
    41 |
    42 |
    43 |
    44 |
    45 |
    46 |
    47 |
    48 | {% endblock %} -------------------------------------------------------------------------------- /templates/recycling_add.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 添加旧物 4 | {% endblock %} 5 | {% block content %} 6 |
    7 |
    8 |
    9 |
    10 |
    11 |
    12 |
    13 |

    添加旧物

    14 |
    15 |
    16 | 17 |
    18 | 20 |
    21 |
    22 |
    23 |
    24 |
    25 | 26 |
    27 | 28 | 29 |
    30 |
    31 |
    32 | 33 | 34 |
    35 |
    36 |
    37 |
    38 | 39 | 40 | 41 |
    42 |
    43 |
    44 |
    45 | 46 |
    47 | 52 |
    53 |
    54 |
    55 |
    56 |
    57 |
    58 | 59 |
    60 | 61 |
    62 |
    63 |
    64 |
    65 |
    66 |
    67 |
    68 |
    69 |
    70 | {% endblock %} -------------------------------------------------------------------------------- /templates/user_role.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% block title %} 3 | 用户权限管理 4 | {% endblock %} 5 | {% block content %} 6 | {# 使用 length 过滤器获取 staffs 变量的长度 #} 7 | 8 | {% if current_user.has_role('Root') %} 9 |
    10 |

    用户权限管理

    11 | 12 |
    13 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | {% for user in users %} 25 | {% for role in user.roles %} 26 | 27 | 28 | 29 | 38 | 39 | {% endfor %} 40 | {% endfor %} 41 | 42 | 43 |
    用户名权限操作
    {{ user.username }}{{ role.name }} 30 | {% if role.name == 'User' %} 31 | 33 | {% elif role.name == 'Admin' %} 34 | 降为普通用户 36 | {% endif %} 37 |
    44 |
    45 |
    46 | {% endif %} 47 | {% endblock %} -------------------------------------------------------------------------------- /test/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/1.jpg -------------------------------------------------------------------------------- /test/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/2.jpg -------------------------------------------------------------------------------- /test/20210000115.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000115.jpg -------------------------------------------------------------------------------- /test/20210000116.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000116.jpg -------------------------------------------------------------------------------- /test/20210000131.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000131.jpg -------------------------------------------------------------------------------- /test/20210000160.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000160.jpg -------------------------------------------------------------------------------- /test/20210000169.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000169.jpg -------------------------------------------------------------------------------- /test/20210000192.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000192.jpg -------------------------------------------------------------------------------- /test/20210000232.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000232.jpg -------------------------------------------------------------------------------- /test/20210000242.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000242.jpg -------------------------------------------------------------------------------- /test/20210000273.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000273.jpg -------------------------------------------------------------------------------- /test/20210000310.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000310.jpg -------------------------------------------------------------------------------- /test/20210000319.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000319.jpg -------------------------------------------------------------------------------- /test/20210000321.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000321.jpg -------------------------------------------------------------------------------- /test/20210000365.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000365.jpg -------------------------------------------------------------------------------- /test/20210000401.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210000401.jpg -------------------------------------------------------------------------------- /test/20210001074.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210001074.jpg -------------------------------------------------------------------------------- /test/20210001588.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210001588.jpg -------------------------------------------------------------------------------- /test/20210001757.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210001757.jpg -------------------------------------------------------------------------------- /test/20210003389.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210003389.jpg -------------------------------------------------------------------------------- /test/20210003444.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210003444.jpg -------------------------------------------------------------------------------- /test/20210004790.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210004790.jpg -------------------------------------------------------------------------------- /test/20210007463.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210007463.jpg -------------------------------------------------------------------------------- /test/20210007469.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210007469.jpg -------------------------------------------------------------------------------- /test/20210007481.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210007481.jpg -------------------------------------------------------------------------------- /test/20210008626.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210008626.jpg -------------------------------------------------------------------------------- /test/20210008674.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210008674.jpg -------------------------------------------------------------------------------- /test/20210009108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210009108.jpg -------------------------------------------------------------------------------- /test/20210009665.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210009665.jpg -------------------------------------------------------------------------------- /test/20210011644.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210011644.jpg -------------------------------------------------------------------------------- /test/20210012487.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/20210012487.jpg -------------------------------------------------------------------------------- /test/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/test/3.jpg -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """utils/initialization.""" 3 | 4 | import contextlib 5 | import platform 6 | import threading 7 | 8 | 9 | def emojis(str=""): 10 | """Returns an emoji-safe version of a string, stripped of emojis on Windows platforms.""" 11 | return str.encode().decode("ascii", "ignore") if platform.system() == "Windows" else str 12 | 13 | 14 | class TryExcept(contextlib.ContextDecorator): 15 | # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager 16 | def __init__(self, msg=""): 17 | """Initializes TryExcept with an optional message, used as a decorator or context manager for error handling.""" 18 | self.msg = msg 19 | 20 | def __enter__(self): 21 | """Enter the runtime context related to this object for error handling with an optional message.""" 22 | pass 23 | 24 | def __exit__(self, exc_type, value, traceback): 25 | """Context manager exit method that prints an error message with emojis if an exception occurred, always returns 26 | True. 27 | """ 28 | if value: 29 | print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) 30 | return True 31 | 32 | 33 | def threaded(func): 34 | """Decorator @threaded to run a function in a separate thread, returning the thread instance.""" 35 | 36 | def wrapper(*args, **kwargs): 37 | thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) 38 | thread.start() 39 | return thread 40 | 41 | return wrapper 42 | 43 | 44 | def join_threads(verbose=False): 45 | """ 46 | Joins all daemon threads, optionally printing their names if verbose is True. 47 | 48 | Example: atexit.register(lambda: join_threads()) 49 | """ 50 | main_thread = threading.current_thread() 51 | for t in threading.enumerate(): 52 | if t is not main_thread: 53 | if verbose: 54 | print(f"Joining thread {t.name}") 55 | t.join() 56 | 57 | 58 | def notebook_init(verbose=True): 59 | """Initializes notebook environment by checking requirements, cleaning up, and displaying system info.""" 60 | print("Checking setup...") 61 | 62 | import os 63 | import shutil 64 | 65 | from ultralytics.utils.checks import check_requirements 66 | 67 | from utils.general import check_font, is_colab 68 | from utils.torch_utils import select_device # imports 69 | 70 | check_font() 71 | 72 | import psutil 73 | 74 | if check_requirements("wandb", install=False): 75 | os.system("pip uninstall -y wandb") # eliminate unexpected account creation prompt with infinite hang 76 | if is_colab(): 77 | shutil.rmtree("/content/sample_data", ignore_errors=True) # remove colab /sample_data directory 78 | 79 | # System info 80 | display = None 81 | if verbose: 82 | gb = 1 << 30 # bytes to GiB (1024 ** 3) 83 | ram = psutil.virtual_memory().total 84 | total, used, free = shutil.disk_usage("/") 85 | with contextlib.suppress(Exception): # clear display if ipython is installed 86 | from IPython import display 87 | 88 | display.clear_output() 89 | s = f"({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)" 90 | else: 91 | s = "" 92 | 93 | select_device(newline=False) 94 | print(emojis(f"Setup complete ✅ {s}")) 95 | return display 96 | -------------------------------------------------------------------------------- /utils/activations.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Activation functions.""" 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | 8 | 9 | class SiLU(nn.Module): 10 | @staticmethod 11 | def forward(x): 12 | """ 13 | Applies the Sigmoid-weighted Linear Unit (SiLU) activation function. 14 | 15 | https://arxiv.org/pdf/1606.08415.pdf. 16 | """ 17 | return x * torch.sigmoid(x) 18 | 19 | 20 | class Hardswish(nn.Module): 21 | @staticmethod 22 | def forward(x): 23 | """ 24 | Applies the Hardswish activation function, compatible with TorchScript, CoreML, and ONNX. 25 | 26 | Equivalent to x * F.hardsigmoid(x) 27 | """ 28 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX 29 | 30 | 31 | class Mish(nn.Module): 32 | """Mish activation https://github.com/digantamisra98/Mish.""" 33 | 34 | @staticmethod 35 | def forward(x): 36 | """Applies the Mish activation function, a smooth alternative to ReLU.""" 37 | return x * F.softplus(x).tanh() 38 | 39 | 40 | class MemoryEfficientMish(nn.Module): 41 | class F(torch.autograd.Function): 42 | @staticmethod 43 | def forward(ctx, x): 44 | """Applies the Mish activation function, a smooth ReLU alternative, to the input tensor `x`.""" 45 | ctx.save_for_backward(x) 46 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 47 | 48 | @staticmethod 49 | def backward(ctx, grad_output): 50 | """Computes the gradient of the Mish activation function with respect to input `x`.""" 51 | x = ctx.saved_tensors[0] 52 | sx = torch.sigmoid(x) 53 | fx = F.softplus(x).tanh() 54 | return grad_output * (fx + x * sx * (1 - fx * fx)) 55 | 56 | def forward(self, x): 57 | """Applies the Mish activation function to the input tensor `x`.""" 58 | return self.F.apply(x) 59 | 60 | 61 | class FReLU(nn.Module): 62 | """FReLU activation https://arxiv.org/abs/2007.11824.""" 63 | 64 | def __init__(self, c1, k=3): # ch_in, kernel 65 | """Initializes FReLU activation with channel `c1` and kernel size `k`.""" 66 | super().__init__() 67 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 68 | self.bn = nn.BatchNorm2d(c1) 69 | 70 | def forward(self, x): 71 | """ 72 | Applies FReLU activation with max operation between input and BN-convolved input. 73 | 74 | https://arxiv.org/abs/2007.11824 75 | """ 76 | return torch.max(x, self.bn(self.conv(x))) 77 | 78 | 79 | class AconC(nn.Module): 80 | """ 81 | ACON activation (activate or not) function. 82 | 83 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter 84 | See "Activate or Not: Learning Customized Activation" https://arxiv.org/pdf/2009.04759.pdf. 85 | """ 86 | 87 | def __init__(self, c1): 88 | """Initializes AconC with learnable parameters p1, p2, and beta for channel-wise activation control.""" 89 | super().__init__() 90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 92 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) 93 | 94 | def forward(self, x): 95 | """Applies AconC activation function with learnable parameters for channel-wise control on input tensor x.""" 96 | dpx = (self.p1 - self.p2) * x 97 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x 98 | 99 | 100 | class MetaAconC(nn.Module): 101 | """ 102 | ACON activation (activate or not) function. 103 | 104 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter 105 | See "Activate or Not: Learning Customized Activation" https://arxiv.org/pdf/2009.04759.pdf. 106 | """ 107 | 108 | def __init__(self, c1, k=1, s=1, r=16): 109 | """Initializes MetaAconC with params: channel_in (c1), kernel size (k=1), stride (s=1), reduction (r=16).""" 110 | super().__init__() 111 | c2 = max(r, c1 // r) 112 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 113 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 114 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) 115 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) 116 | # self.bn1 = nn.BatchNorm2d(c2) 117 | # self.bn2 = nn.BatchNorm2d(c1) 118 | 119 | def forward(self, x): 120 | """Applies a forward pass transforming input `x` using learnable parameters and sigmoid activation.""" 121 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) 122 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 123 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable 124 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed 125 | dpx = (self.p1 - self.p2) * x 126 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x 127 | -------------------------------------------------------------------------------- /utils/autobatch.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Auto-batch utils.""" 3 | 4 | from copy import deepcopy 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from utils.general import LOGGER, colorstr 10 | from utils.torch_utils import profile 11 | 12 | 13 | def check_train_batch_size(model, imgsz=640, amp=True): 14 | """Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting.""" 15 | with torch.cuda.amp.autocast(amp): 16 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size 17 | 18 | 19 | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): 20 | """Estimates optimal YOLOv5 batch size using `fraction` of CUDA memory.""" 21 | # Usage: 22 | # import torch 23 | # from utils.autobatch import autobatch 24 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) 25 | # print(autobatch(model)) 26 | 27 | # Check device 28 | prefix = colorstr("AutoBatch: ") 29 | LOGGER.info(f"{prefix}Computing optimal batch size for --imgsz {imgsz}") 30 | device = next(model.parameters()).device # get model device 31 | if device.type == "cpu": 32 | LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}") 33 | return batch_size 34 | if torch.backends.cudnn.benchmark: 35 | LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}") 36 | return batch_size 37 | 38 | # Inspect CUDA memory 39 | gb = 1 << 30 # bytes to GiB (1024 ** 3) 40 | d = str(device).upper() # 'CUDA:0' 41 | properties = torch.cuda.get_device_properties(device) # device properties 42 | t = properties.total_memory / gb # GiB total 43 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved 44 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated 45 | f = t - (r + a) # GiB free 46 | LOGGER.info(f"{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free") 47 | 48 | # Profile batch sizes 49 | batch_sizes = [1, 2, 4, 8, 16] 50 | try: 51 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] 52 | results = profile(img, model, n=3, device=device) 53 | except Exception as e: 54 | LOGGER.warning(f"{prefix}{e}") 55 | 56 | # Fit a solution 57 | y = [x[2] for x in results if x] # memory [2] 58 | p = np.polyfit(batch_sizes[: len(y)], y, deg=1) # first degree polynomial fit 59 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) 60 | if None in results: # some sizes failed 61 | i = results.index(None) # first fail index 62 | if b >= batch_sizes[i]: # y intercept above failure point 63 | b = batch_sizes[max(i - 1, 0)] # select prior safe point 64 | if b < 1 or b > 1024: # b outside of safe range 65 | b = batch_size 66 | LOGGER.warning(f"{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.") 67 | 68 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted 69 | LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅") 70 | return b 71 | -------------------------------------------------------------------------------- /utils/aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/utils/aws/__init__.py -------------------------------------------------------------------------------- /utils/aws/mime.sh: -------------------------------------------------------------------------------- 1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ 2 | # This script will run on every instance restart, not only on first start 3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- 4 | 5 | Content-Type: multipart/mixed; boundary="//" 6 | MIME-Version: 1.0 7 | 8 | --// 9 | Content-Type: text/cloud-config; charset="us-ascii" 10 | MIME-Version: 1.0 11 | Content-Transfer-Encoding: 7bit 12 | Content-Disposition: attachment; filename="cloud-config.txt" 13 | 14 | #cloud-config 15 | cloud_final_modules: 16 | - [scripts-user, always] 17 | 18 | --// 19 | Content-Type: text/x-shellscript; charset="us-ascii" 20 | MIME-Version: 1.0 21 | Content-Transfer-Encoding: 7bit 22 | Content-Disposition: attachment; filename="userdata.txt" 23 | 24 | #!/bin/bash 25 | # --- paste contents of userdata.sh here --- 26 | --// 27 | -------------------------------------------------------------------------------- /utils/aws/resume.py: -------------------------------------------------------------------------------- 1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings 2 | # Usage: $ python utils/aws/resume.py 3 | 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import torch 9 | import yaml 10 | 11 | FILE = Path(__file__).resolve() 12 | ROOT = FILE.parents[2] # YOLOv5 root directory 13 | if str(ROOT) not in sys.path: 14 | sys.path.append(str(ROOT)) # add ROOT to PATH 15 | 16 | port = 0 # --master_port 17 | path = Path("").resolve() 18 | for last in path.rglob("*/**/last.pt"): 19 | ckpt = torch.load(last) 20 | if ckpt["optimizer"] is None: 21 | continue 22 | 23 | # Load opt.yaml 24 | with open(last.parent.parent / "opt.yaml", errors="ignore") as f: 25 | opt = yaml.safe_load(f) 26 | 27 | # Get device count 28 | d = opt["device"].split(",") # devices 29 | nd = len(d) # number of devices 30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel 31 | 32 | if ddp: # multi-GPU 33 | port += 1 34 | cmd = f"python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}" 35 | else: # single-GPU 36 | cmd = f"python train.py --resume {last}" 37 | 38 | cmd += " > /dev/null 2>&1 &" # redirect output to dev/null and run in daemon thread 39 | print(cmd) 40 | os.system(cmd) 41 | -------------------------------------------------------------------------------- /utils/aws/userdata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html 3 | # This script will run only once on first instance start (for a re-start script see mime.sh) 4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir 5 | # Use >300 GB SSD 6 | 7 | cd home/ubuntu 8 | if [ ! -d yolov5 ]; then 9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker 10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 11 | cd yolov5 12 | bash data/scripts/get_coco.sh && echo "COCO done." & 13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & 14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & 15 | wait && echo "All tasks done." # finish background tasks 16 | else 17 | echo "Running re-start script." # resume interrupted runs 18 | i=0 19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' 20 | while IFS= read -r id; do 21 | ((i++)) 22 | echo "restarting container $i: $id" 23 | sudo docker start $id 24 | # sudo docker exec -it $id python train.py --resume # single-GPU 25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario 26 | done <<<"$list" 27 | fi 28 | -------------------------------------------------------------------------------- /utils/callbacks.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Callback utils.""" 3 | 4 | import threading 5 | 6 | 7 | class Callbacks: 8 | """Handles all registered callbacks for YOLOv5 Hooks.""" 9 | 10 | def __init__(self): 11 | """Initializes a Callbacks object to manage registered YOLOv5 training event hooks.""" 12 | self._callbacks = { 13 | "on_pretrain_routine_start": [], 14 | "on_pretrain_routine_end": [], 15 | "on_train_start": [], 16 | "on_train_epoch_start": [], 17 | "on_train_batch_start": [], 18 | "optimizer_step": [], 19 | "on_before_zero_grad": [], 20 | "on_train_batch_end": [], 21 | "on_train_epoch_end": [], 22 | "on_val_start": [], 23 | "on_val_batch_start": [], 24 | "on_val_image_end": [], 25 | "on_val_batch_end": [], 26 | "on_val_end": [], 27 | "on_fit_epoch_end": [], # fit = train + val 28 | "on_model_save": [], 29 | "on_train_end": [], 30 | "on_params_update": [], 31 | "teardown": [], 32 | } 33 | self.stop_training = False # set True to interrupt training 34 | 35 | def register_action(self, hook, name="", callback=None): 36 | """ 37 | Register a new action to a callback hook. 38 | 39 | Args: 40 | hook: The callback hook name to register the action to 41 | name: The name of the action for later reference 42 | callback: The callback to fire 43 | """ 44 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 45 | assert callable(callback), f"callback '{callback}' is not callable" 46 | self._callbacks[hook].append({"name": name, "callback": callback}) 47 | 48 | def get_registered_actions(self, hook=None): 49 | """ 50 | Returns all the registered actions by callback hook. 51 | 52 | Args: 53 | hook: The name of the hook to check, defaults to all 54 | """ 55 | return self._callbacks[hook] if hook else self._callbacks 56 | 57 | def run(self, hook, *args, thread=False, **kwargs): 58 | """ 59 | Loop through the registered actions and fire all callbacks on main thread. 60 | 61 | Args: 62 | hook: The name of the hook to check, defaults to all 63 | args: Arguments to receive from YOLOv5 64 | thread: (boolean) Run callbacks in daemon thread 65 | kwargs: Keyword Arguments to receive from YOLOv5 66 | """ 67 | 68 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 69 | for logger in self._callbacks[hook]: 70 | if thread: 71 | threading.Thread(target=logger["callback"], args=args, kwargs=kwargs, daemon=True).start() 72 | else: 73 | logger["callback"](*args, **kwargs) 74 | -------------------------------------------------------------------------------- /utils/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 3 | # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference 4 | 5 | # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch 6 | FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime 7 | 8 | # Downloads to user config dir 9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ 10 | 11 | # Install linux packages 12 | ENV DEBIAN_FRONTEND noninteractive 13 | RUN apt update 14 | RUN TZ=Etc/UTC apt install -y tzdata 15 | RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg 16 | # RUN alias python=python3 17 | 18 | # Security updates 19 | # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 20 | RUN apt upgrade --no-install-recommends -y openssl 21 | 22 | # Create working directory 23 | RUN rm -rf /usr/src/app && mkdir -p /usr/src/app 24 | WORKDIR /usr/src/app 25 | 26 | # Copy contents 27 | COPY . /usr/src/app 28 | 29 | # Install pip packages 30 | COPY requirements.txt . 31 | RUN python3 -m pip install --upgrade pip wheel 32 | RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ 33 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' 34 | # tensorflow tensorflowjs \ 35 | 36 | # Set environment variables 37 | ENV OMP_NUM_THREADS=1 38 | 39 | # Cleanup 40 | ENV DEBIAN_FRONTEND teletype 41 | 42 | 43 | # Usage Examples ------------------------------------------------------------------------------------------------------- 44 | 45 | # Build and Push 46 | # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t 47 | 48 | # Pull and Run 49 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t 50 | 51 | # Pull and Run with local directory access 52 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t 53 | 54 | # Kill all 55 | # sudo docker kill $(sudo docker ps -q) 56 | 57 | # Kill all image-based 58 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) 59 | 60 | # DockerHub tag update 61 | # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew 62 | 63 | # Clean up 64 | # sudo docker system prune -a --volumes 65 | 66 | # Update Ubuntu drivers 67 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ 68 | 69 | # DDP test 70 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 71 | 72 | # GCP VM from Image 73 | # docker.io/ultralytics/yolov5:latest 74 | -------------------------------------------------------------------------------- /utils/docker/Dockerfile-arm64: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi 4 | 5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu 6 | FROM arm64v8/ubuntu:22.10 7 | 8 | # Downloads to user config dir 9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ 10 | 11 | # Install linux packages 12 | ENV DEBIAN_FRONTEND noninteractive 13 | RUN apt update 14 | RUN TZ=Etc/UTC apt install -y tzdata 15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev 16 | # RUN alias python=python3 17 | 18 | # Install pip packages 19 | COPY requirements.txt . 20 | RUN python3 -m pip install --upgrade pip wheel 21 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ 22 | coremltools onnx onnxruntime 23 | # tensorflow-aarch64 tensorflowjs \ 24 | 25 | # Create working directory 26 | RUN mkdir -p /usr/src/app 27 | WORKDIR /usr/src/app 28 | 29 | # Copy contents 30 | COPY . /usr/src/app 31 | ENV DEBIAN_FRONTEND teletype 32 | 33 | 34 | # Usage Examples ------------------------------------------------------------------------------------------------------- 35 | 36 | # Build and Push 37 | # t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t 38 | 39 | # Pull and Run 40 | # t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t 41 | -------------------------------------------------------------------------------- /utils/docker/Dockerfile-cpu: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments 4 | 5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu 6 | FROM ubuntu:23.10 7 | 8 | # Downloads to user config dir 9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ 10 | 11 | # Install linux packages 12 | # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package 13 | RUN apt update \ 14 | && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 15 | # RUN alias python=python3 16 | 17 | # Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error 18 | RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED 19 | 20 | # Install pip packages 21 | COPY requirements.txt . 22 | RUN python3 -m pip install --upgrade pip wheel 23 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ 24 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \ 25 | # tensorflow tensorflowjs \ 26 | --extra-index-url https://download.pytorch.org/whl/cpu 27 | 28 | # Create working directory 29 | RUN mkdir -p /usr/src/app 30 | WORKDIR /usr/src/app 31 | 32 | # Copy contents 33 | COPY . /usr/src/app 34 | 35 | 36 | # Usage Examples ------------------------------------------------------------------------------------------------------- 37 | 38 | # Build and Push 39 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t 40 | 41 | # Pull and Run 42 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t 43 | -------------------------------------------------------------------------------- /utils/downloads.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Download utils.""" 3 | 4 | import logging 5 | import subprocess 6 | import urllib 7 | from pathlib import Path 8 | 9 | import requests 10 | import torch 11 | 12 | 13 | def is_url(url, check=True): 14 | """Determines if a string is a URL and optionally checks its existence online, returning a boolean.""" 15 | try: 16 | url = str(url) 17 | result = urllib.parse.urlparse(url) 18 | assert all([result.scheme, result.netloc]) # check if is url 19 | return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online 20 | except (AssertionError, urllib.request.HTTPError): 21 | return False 22 | 23 | 24 | def gsutil_getsize(url=""): 25 | """ 26 | Returns the size in bytes of a file at a Google Cloud Storage URL using `gsutil du`. 27 | 28 | Returns 0 if the command fails or output is empty. 29 | """ 30 | output = subprocess.check_output(["gsutil", "du", url], shell=True, encoding="utf-8") 31 | return int(output.split()[0]) if output else 0 32 | 33 | 34 | def url_getsize(url="https://ultralytics.com/images/bus.jpg"): 35 | """Returns the size in bytes of a downloadable file at a given URL; defaults to -1 if not found.""" 36 | response = requests.head(url, allow_redirects=True) 37 | return int(response.headers.get("content-length", -1)) 38 | 39 | 40 | def curl_download(url, filename, *, silent: bool = False) -> bool: 41 | """Download a file from a url to a filename using curl.""" 42 | silent_option = "sS" if silent else "" # silent 43 | proc = subprocess.run( 44 | [ 45 | "curl", 46 | "-#", 47 | f"-{silent_option}L", 48 | url, 49 | "--output", 50 | filename, 51 | "--retry", 52 | "9", 53 | "-C", 54 | "-", 55 | ] 56 | ) 57 | return proc.returncode == 0 58 | 59 | 60 | def safe_download(file, url, url2=None, min_bytes=1e0, error_msg=""): 61 | """ 62 | Downloads a file from a URL (or alternate URL) to a specified path if file is above a minimum size. 63 | 64 | Removes incomplete downloads. 65 | """ 66 | from utils.general import LOGGER 67 | 68 | file = Path(file) 69 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" 70 | try: # url1 71 | LOGGER.info(f"Downloading {url} to {file}...") 72 | torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) 73 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check 74 | except Exception as e: # url2 75 | if file.exists(): 76 | file.unlink() # remove partial downloads 77 | LOGGER.info(f"ERROR: {e}\nRe-attempting {url2 or url} to {file}...") 78 | # curl download, retry and resume on fail 79 | curl_download(url2 or url, file) 80 | finally: 81 | if not file.exists() or file.stat().st_size < min_bytes: # check 82 | if file.exists(): 83 | file.unlink() # remove partial downloads 84 | LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") 85 | LOGGER.info("") 86 | 87 | 88 | def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"): 89 | """Downloads a file from GitHub release assets or via direct URL if not found locally, supporting backup 90 | versions. 91 | """ 92 | from utils.general import LOGGER 93 | 94 | def github_assets(repository, version="latest"): 95 | # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) 96 | if version != "latest": 97 | version = f"tags/{version}" # i.e. tags/v7.0 98 | response = requests.get(f"https://api.github.com/repos/{repository}/releases/{version}").json() # github api 99 | return response["tag_name"], [x["name"] for x in response["assets"]] # tag, assets 100 | 101 | file = Path(str(file).strip().replace("'", "")) 102 | if not file.exists(): 103 | # URL specified 104 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. 105 | if str(file).startswith(("http:/", "https:/")): # download 106 | url = str(file).replace(":/", "://") # Pathlib turns :// -> :/ 107 | file = name.split("?")[0] # parse authentication https://url.com/file.txt?auth... 108 | if Path(file).is_file(): 109 | LOGGER.info(f"Found {url} locally at {file}") # file already exists 110 | else: 111 | safe_download(file=file, url=url, min_bytes=1e5) 112 | return file 113 | 114 | # GitHub assets 115 | assets = [f"yolov5{size}{suffix}.pt" for size in "nsmlx" for suffix in ("", "6", "-cls", "-seg")] # default 116 | try: 117 | tag, assets = github_assets(repo, release) 118 | except Exception: 119 | try: 120 | tag, assets = github_assets(repo) # latest release 121 | except Exception: 122 | try: 123 | tag = subprocess.check_output("git tag", shell=True, stderr=subprocess.STDOUT).decode().split()[-1] 124 | except Exception: 125 | tag = release 126 | 127 | if name in assets: 128 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) 129 | safe_download( 130 | file, 131 | url=f"https://github.com/{repo}/releases/download/{tag}/{name}", 132 | min_bytes=1e5, 133 | error_msg=f"{file} missing, try downloading from https://github.com/{repo}/releases/{tag}", 134 | ) 135 | 136 | return str(file) 137 | -------------------------------------------------------------------------------- /utils/flask_rest_api/README.md: -------------------------------------------------------------------------------- 1 | # Flask REST API 2 | 3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). 4 | 5 | ## Requirements 6 | 7 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with: 8 | 9 | ```shell 10 | $ pip install Flask 11 | ``` 12 | 13 | ## Run 14 | 15 | After Flask installation run: 16 | 17 | ```shell 18 | $ python3 restapi.py --port 5000 19 | ``` 20 | 21 | Then use [curl](https://curl.se/) to perform a request: 22 | 23 | ```shell 24 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' 25 | ``` 26 | 27 | The model inference results are returned as a JSON response: 28 | 29 | ```json 30 | [ 31 | { 32 | "class": 0, 33 | "confidence": 0.8900438547, 34 | "height": 0.9318675399, 35 | "name": "person", 36 | "width": 0.3264600933, 37 | "xcenter": 0.7438579798, 38 | "ycenter": 0.5207948685 39 | }, 40 | { 41 | "class": 0, 42 | "confidence": 0.8440024257, 43 | "height": 0.7155083418, 44 | "name": "person", 45 | "width": 0.6546785235, 46 | "xcenter": 0.427829951, 47 | "ycenter": 0.6334488392 48 | }, 49 | { 50 | "class": 27, 51 | "confidence": 0.3771208823, 52 | "height": 0.3902671337, 53 | "name": "tie", 54 | "width": 0.0696444362, 55 | "xcenter": 0.3675483763, 56 | "ycenter": 0.7991207838 57 | }, 58 | { 59 | "class": 27, 60 | "confidence": 0.3527112305, 61 | "height": 0.1540903747, 62 | "name": "tie", 63 | "width": 0.0336618312, 64 | "xcenter": 0.7814827561, 65 | "ycenter": 0.5065554976 66 | } 67 | ] 68 | ``` 69 | 70 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` 71 | -------------------------------------------------------------------------------- /utils/flask_rest_api/example_request.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Perform test request.""" 3 | 4 | import pprint 5 | 6 | import requests 7 | 8 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" 9 | IMAGE = "zidane.jpg" 10 | 11 | # Read image 12 | with open(IMAGE, "rb") as f: 13 | image_data = f.read() 14 | 15 | response = requests.post(DETECTION_URL, files={"image": image_data}).json() 16 | 17 | pprint.pprint(response) 18 | -------------------------------------------------------------------------------- /utils/flask_rest_api/restapi.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Run a Flask REST API exposing one or more YOLOv5s models.""" 3 | 4 | import argparse 5 | import io 6 | 7 | import torch 8 | from flask import Flask, request 9 | from PIL import Image 10 | 11 | app = Flask(__name__) 12 | models = {} 13 | 14 | DETECTION_URL = "/v1/object-detection/" 15 | 16 | 17 | @app.route(DETECTION_URL, methods=["POST"]) 18 | def predict(model): 19 | """Predict and return object detections in JSON format given an image and model name via a Flask REST API POST 20 | request. 21 | """ 22 | if request.method != "POST": 23 | return 24 | 25 | if request.files.get("image"): 26 | # Method 1 27 | # with request.files["image"] as f: 28 | # im = Image.open(io.BytesIO(f.read())) 29 | 30 | # Method 2 31 | im_file = request.files["image"] 32 | im_bytes = im_file.read() 33 | im = Image.open(io.BytesIO(im_bytes)) 34 | 35 | if model in models: 36 | results = models[model](im, size=640) # reduce size=320 for faster inference 37 | return results.pandas().xyxy[0].to_json(orient="records") 38 | 39 | 40 | if __name__ == "__main__": 41 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") 42 | parser.add_argument("--port", default=5000, type=int, help="port number") 43 | parser.add_argument("--model", nargs="+", default=["yolov5s"], help="model(s) to run, i.e. --model yolov5n yolov5s") 44 | opt = parser.parse_args() 45 | 46 | for m in opt.model: 47 | models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) 48 | 49 | app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat 50 | -------------------------------------------------------------------------------- /utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==23.3 3 | Flask==2.3.2 4 | gunicorn==19.10.0 5 | werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability 6 | -------------------------------------------------------------------------------- /utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 15 | -------------------------------------------------------------------------------- /utils/loggers/clearml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/utils/loggers/clearml/__init__.py -------------------------------------------------------------------------------- /utils/loggers/clearml/hpo.py: -------------------------------------------------------------------------------- 1 | from clearml import Task 2 | 3 | # Connecting ClearML with the current process, 4 | # from here on everything is logged automatically 5 | from clearml.automation import HyperParameterOptimizer, UniformParameterRange 6 | from clearml.automation.optuna import OptimizerOptuna 7 | 8 | task = Task.init( 9 | project_name="Hyper-Parameter Optimization", 10 | task_name="YOLOv5", 11 | task_type=Task.TaskTypes.optimizer, 12 | reuse_last_task_id=False, 13 | ) 14 | 15 | # Example use case: 16 | optimizer = HyperParameterOptimizer( 17 | # This is the experiment we want to optimize 18 | base_task_id="", 19 | # here we define the hyper-parameters to optimize 20 | # Notice: The parameter name should exactly match what you see in the UI: / 21 | # For Example, here we see in the base experiment a section Named: "General" 22 | # under it a parameter named "batch_size", this becomes "General/batch_size" 23 | # If you have `argparse` for example, then arguments will appear under the "Args" section, 24 | # and you should instead pass "Args/batch_size" 25 | hyper_parameters=[ 26 | UniformParameterRange("Hyperparameters/lr0", min_value=1e-5, max_value=1e-1), 27 | UniformParameterRange("Hyperparameters/lrf", min_value=0.01, max_value=1.0), 28 | UniformParameterRange("Hyperparameters/momentum", min_value=0.6, max_value=0.98), 29 | UniformParameterRange("Hyperparameters/weight_decay", min_value=0.0, max_value=0.001), 30 | UniformParameterRange("Hyperparameters/warmup_epochs", min_value=0.0, max_value=5.0), 31 | UniformParameterRange("Hyperparameters/warmup_momentum", min_value=0.0, max_value=0.95), 32 | UniformParameterRange("Hyperparameters/warmup_bias_lr", min_value=0.0, max_value=0.2), 33 | UniformParameterRange("Hyperparameters/box", min_value=0.02, max_value=0.2), 34 | UniformParameterRange("Hyperparameters/cls", min_value=0.2, max_value=4.0), 35 | UniformParameterRange("Hyperparameters/cls_pw", min_value=0.5, max_value=2.0), 36 | UniformParameterRange("Hyperparameters/obj", min_value=0.2, max_value=4.0), 37 | UniformParameterRange("Hyperparameters/obj_pw", min_value=0.5, max_value=2.0), 38 | UniformParameterRange("Hyperparameters/iou_t", min_value=0.1, max_value=0.7), 39 | UniformParameterRange("Hyperparameters/anchor_t", min_value=2.0, max_value=8.0), 40 | UniformParameterRange("Hyperparameters/fl_gamma", min_value=0.0, max_value=4.0), 41 | UniformParameterRange("Hyperparameters/hsv_h", min_value=0.0, max_value=0.1), 42 | UniformParameterRange("Hyperparameters/hsv_s", min_value=0.0, max_value=0.9), 43 | UniformParameterRange("Hyperparameters/hsv_v", min_value=0.0, max_value=0.9), 44 | UniformParameterRange("Hyperparameters/degrees", min_value=0.0, max_value=45.0), 45 | UniformParameterRange("Hyperparameters/translate", min_value=0.0, max_value=0.9), 46 | UniformParameterRange("Hyperparameters/scale", min_value=0.0, max_value=0.9), 47 | UniformParameterRange("Hyperparameters/shear", min_value=0.0, max_value=10.0), 48 | UniformParameterRange("Hyperparameters/perspective", min_value=0.0, max_value=0.001), 49 | UniformParameterRange("Hyperparameters/flipud", min_value=0.0, max_value=1.0), 50 | UniformParameterRange("Hyperparameters/fliplr", min_value=0.0, max_value=1.0), 51 | UniformParameterRange("Hyperparameters/mosaic", min_value=0.0, max_value=1.0), 52 | UniformParameterRange("Hyperparameters/mixup", min_value=0.0, max_value=1.0), 53 | UniformParameterRange("Hyperparameters/copy_paste", min_value=0.0, max_value=1.0), 54 | ], 55 | # this is the objective metric we want to maximize/minimize 56 | objective_metric_title="metrics", 57 | objective_metric_series="mAP_0.5", 58 | # now we decide if we want to maximize it or minimize it (accuracy we maximize) 59 | objective_metric_sign="max", 60 | # let us limit the number of concurrent experiments, 61 | # this in turn will make sure we do dont bombard the scheduler with experiments. 62 | # if we have an auto-scaler connected, this, by proxy, will limit the number of machine 63 | max_number_of_concurrent_tasks=1, 64 | # this is the optimizer class (actually doing the optimization) 65 | # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) 66 | optimizer_class=OptimizerOptuna, 67 | # If specified only the top K performing Tasks will be kept, the others will be automatically archived 68 | save_top_k_tasks_only=5, # 5, 69 | compute_time_limit=None, 70 | total_max_jobs=20, 71 | min_iteration_per_job=None, 72 | max_iteration_per_job=None, 73 | ) 74 | 75 | # report every 10 seconds, this is way too often, but we are testing here 76 | optimizer.set_report_period(10 / 60) 77 | # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent 78 | # an_optimizer.start_locally(job_complete_callback=job_complete_callback) 79 | # set the time limit for the optimization process (2 hours) 80 | optimizer.set_time_limit(in_minutes=120.0) 81 | # Start the optimization process in the local environment 82 | optimizer.start_locally() 83 | # wait until process is done (notice we are controlling the optimization process in the background) 84 | optimizer.wait() 85 | # make sure background optimization stopped 86 | optimizer.stop() 87 | 88 | print("We are done, good bye") 89 | -------------------------------------------------------------------------------- /utils/loggers/comet/comet_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from urllib.parse import urlparse 4 | 5 | try: 6 | import comet_ml 7 | except ImportError: 8 | comet_ml = None 9 | 10 | import yaml 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | COMET_PREFIX = "comet://" 15 | COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") 16 | COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") 17 | 18 | 19 | def download_model_checkpoint(opt, experiment): 20 | """Downloads YOLOv5 model checkpoint from Comet ML experiment, updating `opt.weights` with download path.""" 21 | model_dir = f"{opt.project}/{experiment.name}" 22 | os.makedirs(model_dir, exist_ok=True) 23 | 24 | model_name = COMET_MODEL_NAME 25 | model_asset_list = experiment.get_model_asset_list(model_name) 26 | 27 | if len(model_asset_list) == 0: 28 | logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") 29 | return 30 | 31 | model_asset_list = sorted( 32 | model_asset_list, 33 | key=lambda x: x["step"], 34 | reverse=True, 35 | ) 36 | logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} 37 | 38 | resource_url = urlparse(opt.weights) 39 | checkpoint_filename = resource_url.query 40 | 41 | if checkpoint_filename: 42 | asset_id = logged_checkpoint_map.get(checkpoint_filename) 43 | else: 44 | asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) 45 | checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME 46 | 47 | if asset_id is None: 48 | logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") 49 | return 50 | 51 | try: 52 | logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") 53 | asset_filename = checkpoint_filename 54 | 55 | model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) 56 | model_download_path = f"{model_dir}/{asset_filename}" 57 | with open(model_download_path, "wb") as f: 58 | f.write(model_binary) 59 | 60 | opt.weights = model_download_path 61 | 62 | except Exception as e: 63 | logger.warning("COMET WARNING: Unable to download checkpoint from Comet") 64 | logger.exception(e) 65 | 66 | 67 | def set_opt_parameters(opt, experiment): 68 | """ 69 | Update the opts Namespace with parameters from Comet's ExistingExperiment when resuming a run. 70 | 71 | Args: 72 | opt (argparse.Namespace): Namespace of command line options 73 | experiment (comet_ml.APIExperiment): Comet API Experiment object 74 | """ 75 | asset_list = experiment.get_asset_list() 76 | resume_string = opt.resume 77 | 78 | for asset in asset_list: 79 | if asset["fileName"] == "opt.yaml": 80 | asset_id = asset["assetId"] 81 | asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) 82 | opt_dict = yaml.safe_load(asset_binary) 83 | for key, value in opt_dict.items(): 84 | setattr(opt, key, value) 85 | opt.resume = resume_string 86 | 87 | # Save hyperparameters to YAML file 88 | # Necessary to pass checks in training script 89 | save_dir = f"{opt.project}/{experiment.name}" 90 | os.makedirs(save_dir, exist_ok=True) 91 | 92 | hyp_yaml_path = f"{save_dir}/hyp.yaml" 93 | with open(hyp_yaml_path, "w") as f: 94 | yaml.dump(opt.hyp, f) 95 | opt.hyp = hyp_yaml_path 96 | 97 | 98 | def check_comet_weights(opt): 99 | """ 100 | Downloads model weights from Comet and updates the weights path to point to saved weights location. 101 | 102 | Args: 103 | opt (argparse.Namespace): Command Line arguments passed 104 | to YOLOv5 training script 105 | 106 | Returns: 107 | None/bool: Return True if weights are successfully downloaded 108 | else return None 109 | """ 110 | if comet_ml is None: 111 | return 112 | 113 | if isinstance(opt.weights, str) and opt.weights.startswith(COMET_PREFIX): 114 | api = comet_ml.API() 115 | resource = urlparse(opt.weights) 116 | experiment_path = f"{resource.netloc}{resource.path}" 117 | experiment = api.get(experiment_path) 118 | download_model_checkpoint(opt, experiment) 119 | return True 120 | 121 | return None 122 | 123 | 124 | def check_comet_resume(opt): 125 | """ 126 | Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters. 127 | 128 | Args: 129 | opt (argparse.Namespace): Command Line arguments passed 130 | to YOLOv5 training script 131 | 132 | Returns: 133 | None/bool: Return True if the run is restored successfully 134 | else return None 135 | """ 136 | if comet_ml is None: 137 | return 138 | 139 | if isinstance(opt.resume, str) and opt.resume.startswith(COMET_PREFIX): 140 | api = comet_ml.API() 141 | resource = urlparse(opt.resume) 142 | experiment_path = f"{resource.netloc}{resource.path}" 143 | experiment = api.get(experiment_path) 144 | set_opt_parameters(opt, experiment) 145 | download_model_checkpoint(opt, experiment) 146 | 147 | return True 148 | 149 | return None 150 | -------------------------------------------------------------------------------- /utils/loggers/comet/optimizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "algorithm": "random", 3 | "parameters": { 4 | "anchor_t": { 5 | "type": "discrete", 6 | "values": [2, 8] 7 | }, 8 | "batch_size": { 9 | "type": "discrete", 10 | "values": [16, 32, 64] 11 | }, 12 | "box": { 13 | "type": "discrete", 14 | "values": [0.02, 0.2] 15 | }, 16 | "cls": { 17 | "type": "discrete", 18 | "values": [0.2] 19 | }, 20 | "cls_pw": { 21 | "type": "discrete", 22 | "values": [0.5] 23 | }, 24 | "copy_paste": { 25 | "type": "discrete", 26 | "values": [1] 27 | }, 28 | "degrees": { 29 | "type": "discrete", 30 | "values": [0, 45] 31 | }, 32 | "epochs": { 33 | "type": "discrete", 34 | "values": [5] 35 | }, 36 | "fl_gamma": { 37 | "type": "discrete", 38 | "values": [0] 39 | }, 40 | "fliplr": { 41 | "type": "discrete", 42 | "values": [0] 43 | }, 44 | "flipud": { 45 | "type": "discrete", 46 | "values": [0] 47 | }, 48 | "hsv_h": { 49 | "type": "discrete", 50 | "values": [0] 51 | }, 52 | "hsv_s": { 53 | "type": "discrete", 54 | "values": [0] 55 | }, 56 | "hsv_v": { 57 | "type": "discrete", 58 | "values": [0] 59 | }, 60 | "iou_t": { 61 | "type": "discrete", 62 | "values": [0.7] 63 | }, 64 | "lr0": { 65 | "type": "discrete", 66 | "values": [1e-5, 0.1] 67 | }, 68 | "lrf": { 69 | "type": "discrete", 70 | "values": [0.01, 1] 71 | }, 72 | "mixup": { 73 | "type": "discrete", 74 | "values": [1] 75 | }, 76 | "momentum": { 77 | "type": "discrete", 78 | "values": [0.6] 79 | }, 80 | "mosaic": { 81 | "type": "discrete", 82 | "values": [0] 83 | }, 84 | "obj": { 85 | "type": "discrete", 86 | "values": [0.2] 87 | }, 88 | "obj_pw": { 89 | "type": "discrete", 90 | "values": [0.5] 91 | }, 92 | "optimizer": { 93 | "type": "categorical", 94 | "values": ["SGD", "Adam", "AdamW"] 95 | }, 96 | "perspective": { 97 | "type": "discrete", 98 | "values": [0] 99 | }, 100 | "scale": { 101 | "type": "discrete", 102 | "values": [0] 103 | }, 104 | "shear": { 105 | "type": "discrete", 106 | "values": [0] 107 | }, 108 | "translate": { 109 | "type": "discrete", 110 | "values": [0] 111 | }, 112 | "warmup_bias_lr": { 113 | "type": "discrete", 114 | "values": [0, 0.2] 115 | }, 116 | "warmup_epochs": { 117 | "type": "discrete", 118 | "values": [5] 119 | }, 120 | "warmup_momentum": { 121 | "type": "discrete", 122 | "values": [0, 0.95] 123 | }, 124 | "weight_decay": { 125 | "type": "discrete", 126 | "values": [0, 0.001] 127 | } 128 | }, 129 | "spec": { 130 | "maxCombo": 0, 131 | "metric": "metrics/mAP_0.5", 132 | "objective": "maximize" 133 | }, 134 | "trials": 1 135 | } 136 | -------------------------------------------------------------------------------- /utils/loggers/wandb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/utils/loggers/wandb/__init__.py -------------------------------------------------------------------------------- /utils/segment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/utils/segment/__init__.py -------------------------------------------------------------------------------- /utils/segment/augmentations.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Image augmentation functions.""" 3 | 4 | import math 5 | import random 6 | 7 | import cv2 8 | import numpy as np 9 | 10 | from ..augmentations import box_candidates 11 | from ..general import resample_segments, segment2box 12 | 13 | 14 | def mixup(im, labels, segments, im2, labels2, segments2): 15 | """ 16 | Applies MixUp augmentation blending two images, labels, and segments with a random ratio. 17 | 18 | See https://arxiv.org/pdf/1710.09412.pdf 19 | """ 20 | r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 21 | im = (im * r + im2 * (1 - r)).astype(np.uint8) 22 | labels = np.concatenate((labels, labels2), 0) 23 | segments = np.concatenate((segments, segments2), 0) 24 | return im, labels, segments 25 | 26 | 27 | def random_perspective( 28 | im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0) 29 | ): 30 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) 31 | # targets = [cls, xyxy] 32 | 33 | height = im.shape[0] + border[0] * 2 # shape(h,w,c) 34 | width = im.shape[1] + border[1] * 2 35 | 36 | # Center 37 | C = np.eye(3) 38 | C[0, 2] = -im.shape[1] / 2 # x translation (pixels) 39 | C[1, 2] = -im.shape[0] / 2 # y translation (pixels) 40 | 41 | # Perspective 42 | P = np.eye(3) 43 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) 44 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) 45 | 46 | # Rotation and Scale 47 | R = np.eye(3) 48 | a = random.uniform(-degrees, degrees) 49 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations 50 | s = random.uniform(1 - scale, 1 + scale) 51 | # s = 2 ** random.uniform(-scale, scale) 52 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) 53 | 54 | # Shear 55 | S = np.eye(3) 56 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) 57 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) 58 | 59 | # Translation 60 | T = np.eye(3) 61 | T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) 62 | T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) 63 | 64 | # Combined rotation matrix 65 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT 66 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed 67 | if perspective: 68 | im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) 69 | else: # affine 70 | im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) 71 | 72 | # Visualize 73 | # import matplotlib.pyplot as plt 74 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() 75 | # ax[0].imshow(im[:, :, ::-1]) # base 76 | # ax[1].imshow(im2[:, :, ::-1]) # warped 77 | 78 | # Transform label coordinates 79 | n = len(targets) 80 | new_segments = [] 81 | if n: 82 | new = np.zeros((n, 4)) 83 | segments = resample_segments(segments) # upsample 84 | for i, segment in enumerate(segments): 85 | xy = np.ones((len(segment), 3)) 86 | xy[:, :2] = segment 87 | xy = xy @ M.T # transform 88 | xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine 89 | 90 | # clip 91 | new[i] = segment2box(xy, width, height) 92 | new_segments.append(xy) 93 | 94 | # filter candidates 95 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) 96 | targets = targets[i] 97 | targets[:, 1:5] = new[i] 98 | new_segments = np.array(new_segments)[i] 99 | 100 | return im, targets, new_segments 101 | -------------------------------------------------------------------------------- /utils/segment/general.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def crop_mask(masks, boxes): 8 | """ 9 | "Crop" predicted masks by zeroing out everything not in the predicted bbox. Vectorized by Chong (thanks Chong). 10 | 11 | Args: 12 | - masks should be a size [n, h, w] tensor of masks 13 | - boxes should be a size [n, 4] tensor of bbox coords in relative point form 14 | """ 15 | 16 | n, h, w = masks.shape 17 | x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) 18 | r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) 19 | c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) 20 | 21 | return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) 22 | 23 | 24 | def process_mask_upsample(protos, masks_in, bboxes, shape): 25 | """ 26 | Crop after upsample. 27 | protos: [mask_dim, mask_h, mask_w] 28 | masks_in: [n, mask_dim], n is number of masks after nms 29 | bboxes: [n, 4], n is number of masks after nms 30 | shape: input_image_size, (h, w) 31 | 32 | return: h, w, n 33 | """ 34 | 35 | c, mh, mw = protos.shape # CHW 36 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) 37 | masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW 38 | masks = crop_mask(masks, bboxes) # CHW 39 | return masks.gt_(0.5) 40 | 41 | 42 | def process_mask(protos, masks_in, bboxes, shape, upsample=False): 43 | """ 44 | Crop before upsample. 45 | proto_out: [mask_dim, mask_h, mask_w] 46 | out_masks: [n, mask_dim], n is number of masks after nms 47 | bboxes: [n, 4], n is number of masks after nms 48 | shape:input_image_size, (h, w) 49 | 50 | return: h, w, n 51 | """ 52 | 53 | c, mh, mw = protos.shape # CHW 54 | ih, iw = shape 55 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW 56 | 57 | downsampled_bboxes = bboxes.clone() 58 | downsampled_bboxes[:, 0] *= mw / iw 59 | downsampled_bboxes[:, 2] *= mw / iw 60 | downsampled_bboxes[:, 3] *= mh / ih 61 | downsampled_bboxes[:, 1] *= mh / ih 62 | 63 | masks = crop_mask(masks, downsampled_bboxes) # CHW 64 | if upsample: 65 | masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW 66 | return masks.gt_(0.5) 67 | 68 | 69 | def process_mask_native(protos, masks_in, bboxes, shape): 70 | """ 71 | Crop after upsample. 72 | protos: [mask_dim, mask_h, mask_w] 73 | masks_in: [n, mask_dim], n is number of masks after nms 74 | bboxes: [n, 4], n is number of masks after nms 75 | shape: input_image_size, (h, w) 76 | 77 | return: h, w, n 78 | """ 79 | c, mh, mw = protos.shape # CHW 80 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) 81 | gain = min(mh / shape[0], mw / shape[1]) # gain = old / new 82 | pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding 83 | top, left = int(pad[1]), int(pad[0]) # y, x 84 | bottom, right = int(mh - pad[1]), int(mw - pad[0]) 85 | masks = masks[:, top:bottom, left:right] 86 | 87 | masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW 88 | masks = crop_mask(masks, bboxes) # CHW 89 | return masks.gt_(0.5) 90 | 91 | 92 | def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): 93 | """ 94 | img1_shape: model input shape, [h, w] 95 | img0_shape: origin pic shape, [h, w, 3] 96 | masks: [h, w, num] 97 | """ 98 | # Rescale coordinates (xyxy) from im1_shape to im0_shape 99 | if ratio_pad is None: # calculate from im0_shape 100 | gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new 101 | pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding 102 | else: 103 | pad = ratio_pad[1] 104 | top, left = int(pad[1]), int(pad[0]) # y, x 105 | bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) 106 | 107 | if len(masks.shape) < 2: 108 | raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') 109 | masks = masks[top:bottom, left:right] 110 | # masks = masks.permute(2, 0, 1).contiguous() 111 | # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] 112 | # masks = masks.permute(1, 2, 0).contiguous() 113 | masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) 114 | 115 | if len(masks.shape) == 2: 116 | masks = masks[:, :, None] 117 | return masks 118 | 119 | 120 | def mask_iou(mask1, mask2, eps=1e-7): 121 | """ 122 | mask1: [N, n] m1 means number of predicted objects 123 | mask2: [M, n] m2 means number of gt objects 124 | Note: n means image_w x image_h 125 | 126 | return: masks iou, [N, M] 127 | """ 128 | intersection = torch.matmul(mask1, mask2.t()).clamp(0) 129 | union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection 130 | return intersection / (union + eps) 131 | 132 | 133 | def masks_iou(mask1, mask2, eps=1e-7): 134 | """ 135 | mask1: [N, n] m1 means number of predicted objects 136 | mask2: [N, n] m2 means number of gt objects 137 | Note: n means image_w x image_h 138 | 139 | return: masks iou, (N, ) 140 | """ 141 | intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) 142 | union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection 143 | return intersection / (union + eps) 144 | 145 | 146 | def masks2segments(masks, strategy="largest"): 147 | """Converts binary (n,160,160) masks to polygon segments with options for concatenation or selecting the largest 148 | segment. 149 | """ 150 | segments = [] 151 | for x in masks.int().cpu().numpy().astype("uint8"): 152 | c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] 153 | if c: 154 | if strategy == "concat": # concatenate all segments 155 | c = np.concatenate([x.reshape(-1, 2) for x in c]) 156 | elif strategy == "largest": # select largest segment 157 | c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) 158 | else: 159 | c = np.zeros((0, 2)) # no segments found 160 | segments.append(c.astype("float32")) 161 | return segments 162 | -------------------------------------------------------------------------------- /utils/triton.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """Utils to interact with the Triton Inference Server.""" 3 | 4 | import typing 5 | from urllib.parse import urlparse 6 | 7 | import torch 8 | 9 | 10 | class TritonRemoteModel: 11 | """ 12 | A wrapper over a model served by the Triton Inference Server. 13 | 14 | It can be configured to communicate over GRPC or HTTP. It accepts Torch Tensors as input and returns them as 15 | outputs. 16 | """ 17 | 18 | def __init__(self, url: str): 19 | """ 20 | Keyword arguments: 21 | url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 22 | """ 23 | 24 | parsed_url = urlparse(url) 25 | if parsed_url.scheme == "grpc": 26 | from tritonclient.grpc import InferenceServerClient, InferInput 27 | 28 | self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client 29 | model_repository = self.client.get_model_repository_index() 30 | self.model_name = model_repository.models[0].name 31 | self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) 32 | 33 | def create_input_placeholders() -> typing.List[InferInput]: 34 | return [ 35 | InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"] 36 | ] 37 | 38 | else: 39 | from tritonclient.http import InferenceServerClient, InferInput 40 | 41 | self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client 42 | model_repository = self.client.get_model_repository_index() 43 | self.model_name = model_repository[0]["name"] 44 | self.metadata = self.client.get_model_metadata(self.model_name) 45 | 46 | def create_input_placeholders() -> typing.List[InferInput]: 47 | return [ 48 | InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"] 49 | ] 50 | 51 | self._create_input_placeholders_fn = create_input_placeholders 52 | 53 | @property 54 | def runtime(self): 55 | """Returns the model runtime.""" 56 | return self.metadata.get("backend", self.metadata.get("platform")) 57 | 58 | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: 59 | """ 60 | Invokes the model. 61 | 62 | Parameters can be provided via args or kwargs. args, if provided, are assumed to match the order of inputs of 63 | the model. kwargs are matched with the model input names. 64 | """ 65 | inputs = self._create_inputs(*args, **kwargs) 66 | response = self.client.infer(model_name=self.model_name, inputs=inputs) 67 | result = [] 68 | for output in self.metadata["outputs"]: 69 | tensor = torch.as_tensor(response.as_numpy(output["name"])) 70 | result.append(tensor) 71 | return result[0] if len(result) == 1 else result 72 | 73 | def _create_inputs(self, *args, **kwargs): 74 | """Creates input tensors from args or kwargs, not both; raises error if none or both are provided.""" 75 | args_len, kwargs_len = len(args), len(kwargs) 76 | if not args_len and not kwargs_len: 77 | raise RuntimeError("No inputs provided.") 78 | if args_len and kwargs_len: 79 | raise RuntimeError("Cannot specify args and kwargs at the same time") 80 | 81 | placeholders = self._create_input_placeholders_fn() 82 | if args_len: 83 | if args_len != len(placeholders): 84 | raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") 85 | for input, value in zip(placeholders, args): 86 | input.set_data_from_numpy(value.cpu().numpy()) 87 | else: 88 | for input in placeholders: 89 | value = kwargs[input.name] 90 | input.set_data_from_numpy(value.cpu().numpy()) 91 | return placeholders 92 | -------------------------------------------------------------------------------- /演示视频.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akitten-cn/Garbage-Identification-System/b7258294da7ee51b94c15562424f4666d719fe7a/演示视频.mp4 --------------------------------------------------------------------------------