├── .gitignore ├── LICENSE ├── example_aimbot ├── __init__.py ├── gameSelection.py ├── main.py ├── models │ ├── __init__.py │ ├── common.py │ ├── experimental.py │ ├── hub │ │ ├── anchors.yaml │ │ ├── yolov3-spp.yaml │ │ ├── yolov3-tiny.yaml │ │ ├── yolov3.yaml │ │ ├── yolov5-bifpn.yaml │ │ ├── yolov5-fpn.yaml │ │ ├── yolov5-p2.yaml │ │ ├── yolov5-p34.yaml │ │ ├── yolov5-p6.yaml │ │ ├── yolov5-p7.yaml │ │ ├── yolov5-panet.yaml │ │ ├── yolov5l6.yaml │ │ ├── yolov5m6.yaml │ │ ├── yolov5n6.yaml │ │ ├── yolov5s-LeakyReLU.yaml │ │ ├── yolov5s-ghost.yaml │ │ ├── yolov5s-transformer.yaml │ │ ├── yolov5s6.yaml │ │ └── yolov5x6.yaml │ ├── segment │ │ ├── yolov5l-seg.yaml │ │ ├── yolov5m-seg.yaml │ │ ├── yolov5n-seg.yaml │ │ ├── yolov5s-seg.yaml │ │ └── yolov5x-seg.yaml │ ├── tf.py │ ├── yolo.py │ ├── yolov5l.yaml │ ├── yolov5m.yaml │ ├── yolov5n.yaml │ ├── yolov5s.yaml │ └── yolov5x.yaml ├── schema │ └── settings.py └── utils │ ├── __init__.py │ ├── activations.py │ ├── augmentations.py │ ├── autoanchor.py │ ├── autobatch.py │ ├── aws │ ├── __init__.py │ ├── mime.sh │ ├── resume.py │ └── userdata.sh │ ├── callbacks.py │ ├── dataloaders.py │ ├── docker │ ├── Dockerfile │ ├── Dockerfile-arm64 │ └── Dockerfile-cpu │ ├── downloads.py │ ├── flask_rest_api │ ├── README.md │ ├── example_request.py │ └── restapi.py │ ├── general.py │ ├── google_app_engine │ ├── Dockerfile │ ├── additional_requirements.txt │ └── app.yaml │ ├── loggers │ ├── __init__.py │ ├── clearml │ │ ├── README.md │ │ ├── __init__.py │ │ ├── clearml_utils.py │ │ └── hpo.py │ ├── comet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── comet_utils.py │ │ ├── hpo.py │ │ └── optimizer_config.json │ └── wandb │ │ ├── README.md │ │ ├── __init__.py │ │ ├── log_dataset.py │ │ ├── sweep.py │ │ ├── sweep.yaml │ │ └── wandb_utils.py │ ├── loss.py │ ├── metrics.py │ ├── plots.py │ ├── segment │ ├── __init__.py │ ├── augmentations.py │ ├── dataloaders.py │ ├── general.py │ ├── loss.py │ ├── metrics.py │ └── plots.py │ ├── torch_utils.py │ └── triton.py ├── example_bare ├── __init__.py ├── main.py └── schema │ └── settings.py ├── imgs └── banner.png ├── main.py └── readme.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /example_aimbot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_aimbot/__init__.py -------------------------------------------------------------------------------- /example_aimbot/gameSelection.py: -------------------------------------------------------------------------------- 1 | import pygetwindow 2 | import time 3 | import bettercam 4 | from typing import Union 5 | from schema.settings import Settings 6 | 7 | # Could be do with 8 | # from config import * 9 | # But we are writing it out for clarity for new devs 10 | 11 | def gameSelection(settings: Settings) -> (bettercam.BetterCam, int, Union[int, None]): 12 | # Selecting the correct game window 13 | try: 14 | videoGameWindows = pygetwindow.getAllWindows() 15 | print("=== All Windows ===") 16 | for index, window in enumerate(videoGameWindows): 17 | # only output the window if it has a meaningful title 18 | if window.title != "": 19 | print("[{}]: {}".format(index, window.title)) 20 | # have the user select the window they want 21 | try: 22 | userInput = int(input( 23 | "Please enter the number corresponding to the window you'd like to select: ")) 24 | except ValueError: 25 | print("You didn't enter a valid number. Please try again.") 26 | return 27 | # "save" that window as the chosen window for the rest of the script 28 | videoGameWindow = videoGameWindows[userInput] 29 | except Exception as e: 30 | print("Failed to select game window: {}".format(e)) 31 | return None 32 | 33 | # Activate that Window 34 | activationRetries = 30 35 | activationSuccess = False 36 | while (activationRetries > 0): 37 | try: 38 | videoGameWindow.activate() 39 | activationSuccess = True 40 | break 41 | except pygetwindow.PyGetWindowException as we: 42 | print("Failed to activate game window: {}".format(str(we))) 43 | print("Trying again... (you should switch to the game now)") 44 | except Exception as e: 45 | print("Failed to activate game window: {}".format(str(e))) 46 | print("Read the relevant restrictions here: https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setforegroundwindow") 47 | activationSuccess = False 48 | activationRetries = 0 49 | break 50 | # wait a little bit before the next try 51 | time.sleep(3.0) 52 | activationRetries = activationRetries - 1 53 | # if we failed to activate the window then we'll be unable to send input to it 54 | # so just exit the script now 55 | if activationSuccess == False: 56 | return None 57 | print("Successfully activated the game window...") 58 | 59 | # Starting screenshoting engine 60 | left = ((videoGameWindow.left + videoGameWindow.right) // 2) - (settings.screenShotWidth // 2) 61 | top = videoGameWindow.top + \ 62 | (videoGameWindow.height - settings.screenShotHeight) // 2 63 | right, bottom = left + settings.screenShotWidth, top + settings.screenShotHeight 64 | 65 | region: tuple = (left, top, right, bottom) 66 | 67 | # Calculating the center Autoaim box 68 | cWidth: int = settings.screenShotWidth // 2 69 | cHeight: int = settings.screenShotHeight // 2 70 | 71 | print(region) 72 | 73 | camera = bettercam.create(region=region, output_color="BGRA", max_buffer_len=512) 74 | if camera is None: 75 | print("Your Camera Failed! Ask @Wonder for help in our Discord in the #ai-aimbot channel ONLY: https://discord.gg/rootkitorg") 76 | return 77 | camera.start(target_fps=120, video_mode=True) 78 | 79 | return camera, cWidth, cHeight -------------------------------------------------------------------------------- /example_aimbot/main.py: -------------------------------------------------------------------------------- 1 | from .schema.settings import Settings 2 | import json 3 | import os 4 | import torch 5 | import numpy as np 6 | import cv2 7 | import time 8 | import win32api 9 | import win32con 10 | import pandas as pd 11 | import gc 12 | from .utils.general import (cv2, non_max_suppression, xyxy2xywh) 13 | 14 | import gameSelection 15 | 16 | 17 | def main( 18 | version: int = 0, 19 | settingsProfile: str = "", 20 | paidTier: int = 0, 21 | yoloVersion: int = 0, 22 | modelFileName: str = "" 23 | ): 24 | # getting %appdata% 25 | appdataLocation = os.getenv("APPDATA") 26 | settingsPath = os.path.join(appdataLocation, "ai-aimbot-launcher", "aimbotSettings", f"{settingsProfile.lower()}.json") 27 | 28 | # loading settings 29 | with open(settingsPath, "r") as f: 30 | settings = json.load(f) 31 | settings = Settings(**settings) 32 | 33 | # getting model path 34 | modelPath = os.path.join(appdataLocation, "ai-aimbot-launcher", "models", modelFileName) 35 | 36 | if modelPath[-3:] != ".pt": 37 | print("Fast Mode ONLY! (PyTorch)") 38 | return 39 | 40 | Bot(modelPath, settings) 41 | 42 | 43 | def Bot( 44 | modelPath, 45 | settings: Settings 46 | ): 47 | # External Function for running the game selection menu (gameSelection.py) 48 | camera, cWidth, cHeight = gameSelection.gameSelection(settings) 49 | 50 | # Used for forcing garbage collection 51 | count = 0 52 | sTime = time.time() 53 | 54 | # Loading Yolo5 Small AI Model, for better results use yolov5m or yolov5l 55 | model = torch.hub.load('ultralytics/yolov5', 'custom', path=modelPath) 56 | stride, names, pt = model.stride, model.names, model.pt 57 | 58 | if torch.cuda.is_available(): 59 | model.half() 60 | 61 | # Used for colors drawn on bounding boxes 62 | COLORS = np.random.uniform(0, 255, size=(1500, 3)) 63 | 64 | # Main loop Quit if Q is pressed 65 | last_mid_coord = None 66 | with torch.no_grad(): 67 | while win32api.GetAsyncKeyState(settings.quitKey) == 0: 68 | 69 | # Getting Frame 70 | npImg = np.array(camera.get_latest_frame()) 71 | 72 | if settings.useMask: 73 | if not settings.maskLeft: 74 | npImg[-settings.maskHeight:, -settings.maskWidth:, :] = 0 75 | elif settings.maskLeft: 76 | npImg[-settings.maskHeight:, :settings.maskWidth, :] = 0 77 | else: 78 | raise Exception('ERROR: Invalid maskSide! Please use "left" or "right"') 79 | 80 | # Normalizing Data 81 | im = torch.from_numpy(npImg) 82 | if im.shape[2] == 4: 83 | # If the image has an alpha channel, remove it 84 | im = im[:, :, :3,] 85 | 86 | im = torch.movedim(im, 2, 0) 87 | if torch.cuda.is_available(): 88 | im = im.half() 89 | im /= 255 90 | if len(im.shape) == 3: 91 | im = im[None] 92 | 93 | # Detecting all the objects 94 | results = model(im, size=settings.screenShotHeight) 95 | 96 | # Suppressing results that dont meet thresholds 97 | pred = non_max_suppression( 98 | results, settings.confidence, settings.confidence, 0, False, max_det=1000) 99 | 100 | # Converting output to usable cords 101 | targets = [] 102 | for i, det in enumerate(pred): 103 | s = "" 104 | gn = torch.tensor(im.shape)[[0, 0, 0, 0]] 105 | if len(det): 106 | for c in det[:, -1].unique(): 107 | n = (det[:, -1] == c).sum() # detections per class 108 | s += f"{n} {names[int(c)]}, " # add to string 109 | 110 | for *xyxy, conf, cls in reversed(det): 111 | targets.append((xyxy2xywh(torch.tensor(xyxy).view( 112 | 1, 4)) / gn).view(-1).tolist() + [float(conf)]) # normalized xywh 113 | 114 | targets = pd.DataFrame( 115 | targets, columns=['current_mid_x', 'current_mid_y', 'width', "height", "confidence"]) 116 | 117 | center_screen = [cWidth, cHeight] 118 | 119 | # If there are people in the center bounding box 120 | if len(targets) > 0: 121 | if (settings.centerOfScreen): 122 | # Compute the distance from the center 123 | targets["dist_from_center"] = np.sqrt((targets.current_mid_x - center_screen[0])**2 + (targets.current_mid_y - center_screen[1])**2) 124 | 125 | # Sort the data frame by distance from center 126 | targets = targets.sort_values("dist_from_center") 127 | 128 | # Get the last persons mid coordinate if it exists 129 | if last_mid_coord: 130 | targets['last_mid_x'] = last_mid_coord[0] 131 | targets['last_mid_y'] = last_mid_coord[1] 132 | # Take distance between current person mid coordinate and last person mid coordinate 133 | targets['dist'] = np.linalg.norm( 134 | targets.iloc[:, [0, 1]].values - targets.iloc[:, [4, 5]], axis=1) 135 | targets.sort_values(by="dist", ascending=False) 136 | 137 | # Take the first person that shows up in the dataframe (Recall that we sort based on Euclidean distance) 138 | xMid = targets.iloc[0].current_mid_x 139 | yMid = targets.iloc[0].current_mid_y 140 | 141 | box_height = targets.iloc[0].height 142 | if settings.headshotMode: 143 | headshot_offset = box_height * 0.38 144 | else: 145 | headshot_offset = box_height * 0.2 146 | 147 | mouseMove = [xMid - cWidth, (yMid - headshot_offset) - cHeight] 148 | 149 | # Moving the mouse 150 | if win32api.GetKeyState(0x14): 151 | win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, int( 152 | mouseMove[0] * settings.movementAmp), int(mouseMove[1] * settings.movementAmp), 0, 0) 153 | last_mid_coord = [xMid, yMid] 154 | 155 | else: 156 | last_mid_coord = None 157 | 158 | # See what the bot sees 159 | if settings.visuals: 160 | # Loops over every item identified and draws a bounding box 161 | for i in range(0, len(targets)): 162 | halfW = round(targets["width"][i] / 2) 163 | halfH = round(targets["height"][i] / 2) 164 | midX = targets['current_mid_x'][i] 165 | midY = targets['current_mid_y'][i] 166 | (startX, startY, endX, endY) = int( 167 | midX + halfW), int(midY + halfH), int(midX - halfW), int(midY - halfH) 168 | 169 | idx = 0 170 | 171 | # draw the bounding box and label on the frame 172 | label = "{}: {:.2f}%".format( 173 | "Human", targets["confidence"][i] * 100) 174 | cv2.rectangle(npImg, (startX, startY), (endX, endY), 175 | COLORS[idx], 2) 176 | y = startY - 15 if startY - 15 > 15 else startY + 15 177 | cv2.putText(npImg, label, (startX, y), 178 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) 179 | 180 | # Forced garbage cleanup every second 181 | count += 1 182 | if (time.time() - sTime) > 1: 183 | if settings.displayCPS: 184 | print("CPS: {}".format(count)) 185 | count = 0 186 | sTime = time.time() 187 | 188 | # Uncomment if you keep running into memory issues 189 | # gc.collect(generation=0) 190 | 191 | # See visually what the Aimbot sees 192 | if settings.visuals: 193 | cv2.imshow('Live Feed', npImg) 194 | if (cv2.waitKey(1) & 0xFF) == ord('q'): 195 | exit() 196 | camera.stop() 197 | -------------------------------------------------------------------------------- /example_aimbot/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_aimbot/models/__init__.py -------------------------------------------------------------------------------- /example_aimbot/models/experimental.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Experimental modules 4 | """ 5 | import math 6 | 7 | import numpy as np 8 | import torch 9 | import torch.nn as nn 10 | 11 | from utils.downloads import attempt_download 12 | 13 | 14 | class Sum(nn.Module): 15 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 16 | def __init__(self, n, weight=False): # n: number of inputs 17 | super().__init__() 18 | self.weight = weight # apply weights boolean 19 | self.iter = range(n - 1) # iter object 20 | if weight: 21 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights 22 | 23 | def forward(self, x): 24 | y = x[0] # no weight 25 | if self.weight: 26 | w = torch.sigmoid(self.w) * 2 27 | for i in self.iter: 28 | y = y + x[i + 1] * w[i] 29 | else: 30 | for i in self.iter: 31 | y = y + x[i + 1] 32 | return y 33 | 34 | 35 | class MixConv2d(nn.Module): 36 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 37 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy 38 | super().__init__() 39 | n = len(k) # number of convolutions 40 | if equal_ch: # equal c_ per group 41 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices 42 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels 43 | else: # equal weight.numel() per group 44 | b = [c2] + [0] * n 45 | a = np.eye(n + 1, n, k=-1) 46 | a -= np.roll(a, 1, axis=1) 47 | a *= np.array(k) ** 2 48 | a[0] = 1 49 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 50 | 51 | self.m = nn.ModuleList([ 52 | nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) 53 | self.bn = nn.BatchNorm2d(c2) 54 | self.act = nn.SiLU() 55 | 56 | def forward(self, x): 57 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 58 | 59 | 60 | class Ensemble(nn.ModuleList): 61 | # Ensemble of models 62 | def __init__(self): 63 | super().__init__() 64 | 65 | def forward(self, x, augment=False, profile=False, visualize=False): 66 | y = [module(x, augment, profile, visualize)[0] for module in self] 67 | # y = torch.stack(y).max(0)[0] # max ensemble 68 | # y = torch.stack(y).mean(0) # mean ensemble 69 | y = torch.cat(y, 1) # nms ensemble 70 | return y, None # inference, train output 71 | 72 | 73 | def attempt_load(weights, device=None, inplace=True, fuse=True): 74 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 75 | from models.yolo import Detect, Model 76 | 77 | model = Ensemble() 78 | for w in weights if isinstance(weights, list) else [weights]: 79 | ckpt = torch.load(attempt_download(w), map_location='cpu') # load 80 | ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model 81 | 82 | # Model compatibility updates 83 | if not hasattr(ckpt, 'stride'): 84 | ckpt.stride = torch.tensor([32.]) 85 | if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): 86 | ckpt.names = dict(enumerate(ckpt.names)) # convert to dict 87 | 88 | model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode 89 | 90 | # Module updates 91 | for m in model.modules(): 92 | t = type(m) 93 | if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): 94 | m.inplace = inplace 95 | if t is Detect and not isinstance(m.anchor_grid, list): 96 | delattr(m, 'anchor_grid') 97 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) 98 | elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): 99 | m.recompute_scale_factor = None # torch 1.11.0 compatibility 100 | 101 | # Return model 102 | if len(model) == 1: 103 | return model[-1] 104 | 105 | # Return detection ensemble 106 | print(f'Ensemble created with {weights}\n') 107 | for k in 'names', 'nc', 'yaml': 108 | setattr(model, k, getattr(model[0], k)) 109 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride 110 | assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' 111 | return model 112 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/anchors.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Default anchors for COCO data 3 | 4 | 5 | # P5 ------------------------------------------------------------------------------------------------------------------- 6 | # P5-640: 7 | anchors_p5_640: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | 13 | # P6 ------------------------------------------------------------------------------------------------------------------- 14 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 15 | anchors_p6_640: 16 | - [9,11, 21,19, 17,41] # P3/8 17 | - [43,32, 39,70, 86,64] # P4/16 18 | - [65,131, 134,130, 120,265] # P5/32 19 | - [282,180, 247,354, 512,387] # P6/64 20 | 21 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 22 | anchors_p6_1280: 23 | - [19,27, 44,40, 38,94] # P3/8 24 | - [96,68, 86,152, 180,137] # P4/16 25 | - [140,301, 303,264, 238,542] # P5/32 26 | - [436,615, 739,380, 925,792] # P6/64 27 | 28 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 29 | anchors_p6_1920: 30 | - [28,41, 67,59, 57,141] # P3/8 31 | - [144,103, 129,227, 270,205] # P4/16 32 | - [209,452, 455,396, 358,812] # P5/32 33 | - [653,922, 1109,570, 1387,1187] # P6/64 34 | 35 | 36 | # P7 ------------------------------------------------------------------------------------------------------------------- 37 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 38 | anchors_p7_640: 39 | - [11,11, 13,30, 29,20] # P3/8 40 | - [30,46, 61,38, 39,92] # P4/16 41 | - [78,80, 146,66, 79,163] # P5/32 42 | - [149,150, 321,143, 157,303] # P6/64 43 | - [257,402, 359,290, 524,372] # P7/128 44 | 45 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 46 | anchors_p7_1280: 47 | - [19,22, 54,36, 32,77] # P3/8 48 | - [70,83, 138,71, 75,173] # P4/16 49 | - [165,159, 148,334, 375,151] # P5/32 50 | - [334,317, 251,626, 499,474] # P6/64 51 | - [750,326, 534,814, 1079,818] # P7/128 52 | 53 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 54 | anchors_p7_1920: 55 | - [29,34, 81,55, 47,115] # P3/8 56 | - [105,124, 207,107, 113,259] # P4/16 57 | - [247,238, 222,500, 563,227] # P5/32 58 | - [501,476, 376,939, 749,711] # P6/64 59 | - [1126,489, 801,1222, 1618,1227] # P7/128 60 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3-SPP head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, SPP, [512, [5, 9, 13]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,14, 23,27, 37,58] # P4/16 9 | - [81,82, 135,169, 344,319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Conv, [16, 3, 1]], # 0 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 16 | [-1, 1, Conv, [32, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 18 | [-1, 1, Conv, [64, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 20 | [-1, 1, Conv, [128, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 22 | [-1, 1, Conv, [256, 3, 1]], 23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 24 | [-1, 1, Conv, [512, 3, 1]], 25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 27 | ] 28 | 29 | # YOLOv3-tiny head 30 | head: 31 | [[-1, 1, Conv, [1024, 3, 1]], 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 34 | 35 | [-2, 1, Conv, [128, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 39 | 40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 41 | ] 42 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3 head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, Conv, [512, 1, 1]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-bifpn.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 BiFPN head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 FPN head 28 | head: 29 | [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) 30 | 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium) 35 | 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 3, C3, [256, False]], # 18 (P3/8-small) 40 | 41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-p2.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 14 | [-1, 3, C3, [128]], 15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 16 | [-1, 6, C3, [256]], 17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 18 | [-1, 9, C3, [512]], 19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 20 | [-1, 3, C3, [1024]], 21 | [-1, 1, SPPF, [1024, 5]], # 9 22 | ] 23 | 24 | # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs 25 | head: 26 | [[-1, 1, Conv, [512, 1, 1]], 27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 29 | [-1, 3, C3, [512, False]], # 13 30 | 31 | [-1, 1, Conv, [256, 1, 1]], 32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 35 | 36 | [-1, 1, Conv, [128, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 2], 1, Concat, [1]], # cat backbone P2 39 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) 40 | 41 | [-1, 1, Conv, [128, 3, 2]], 42 | [[-1, 18], 1, Concat, [1]], # cat head P3 43 | [-1, 3, C3, [256, False]], # 24 (P3/8-small) 44 | 45 | [-1, 1, Conv, [256, 3, 2]], 46 | [[-1, 14], 1, Concat, [1]], # cat head P4 47 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium) 48 | 49 | [-1, 1, Conv, [512, 3, 2]], 50 | [[-1, 10], 1, Concat, [1]], # cat head P5 51 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large) 52 | 53 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) 54 | ] 55 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-p34.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 14 | [ -1, 3, C3, [ 128 ] ], 15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 16 | [ -1, 6, C3, [ 256 ] ], 17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 18 | [ -1, 9, C3, [ 512 ] ], 19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 20 | [ -1, 3, C3, [ 1024 ] ], 21 | [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 22 | ] 23 | 24 | # YOLOv5 v6.0 head with (P3, P4) outputs 25 | head: 26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ], 27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 29 | [ -1, 3, C3, [ 512, False ] ], # 13 30 | 31 | [ -1, 1, Conv, [ 256, 1, 1 ] ], 32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], 33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) 35 | 36 | [ -1, 1, Conv, [ 256, 3, 2 ] ], 37 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 38 | [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) 39 | 40 | [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) 41 | ] 42 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-p6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 14 | [-1, 3, C3, [128]], 15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 16 | [-1, 6, C3, [256]], 17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 18 | [-1, 9, C3, [512]], 19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 20 | [-1, 3, C3, [768]], 21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 22 | [-1, 3, C3, [1024]], 23 | [-1, 1, SPPF, [1024, 5]], # 11 24 | ] 25 | 26 | # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs 27 | head: 28 | [[-1, 1, Conv, [768, 1, 1]], 29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 30 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 31 | [-1, 3, C3, [768, False]], # 15 32 | 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 35 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 36 | [-1, 3, C3, [512, False]], # 19 37 | 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 40 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 41 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 42 | 43 | [-1, 1, Conv, [256, 3, 2]], 44 | [[-1, 20], 1, Concat, [1]], # cat head P4 45 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 46 | 47 | [-1, 1, Conv, [512, 3, 2]], 48 | [[-1, 16], 1, Concat, [1]], # cat head P5 49 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 50 | 51 | [-1, 1, Conv, [768, 3, 2]], 52 | [[-1, 12], 1, Concat, [1]], # cat head P6 53 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 54 | 55 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 56 | ] 57 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-p7.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer 8 | 9 | # YOLOv5 v6.0 backbone 10 | backbone: 11 | # [from, number, module, args] 12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 14 | [-1, 3, C3, [128]], 15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 16 | [-1, 6, C3, [256]], 17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 18 | [-1, 9, C3, [512]], 19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 20 | [-1, 3, C3, [768]], 21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 22 | [-1, 3, C3, [1024]], 23 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 24 | [-1, 3, C3, [1280]], 25 | [-1, 1, SPPF, [1280, 5]], # 13 26 | ] 27 | 28 | # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs 29 | head: 30 | [[-1, 1, Conv, [1024, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 10], 1, Concat, [1]], # cat backbone P6 33 | [-1, 3, C3, [1024, False]], # 17 34 | 35 | [-1, 1, Conv, [768, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 38 | [-1, 3, C3, [768, False]], # 21 39 | 40 | [-1, 1, Conv, [512, 1, 1]], 41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 42 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 43 | [-1, 3, C3, [512, False]], # 25 44 | 45 | [-1, 1, Conv, [256, 1, 1]], 46 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 47 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 48 | [-1, 3, C3, [256, False]], # 29 (P3/8-small) 49 | 50 | [-1, 1, Conv, [256, 3, 2]], 51 | [[-1, 26], 1, Concat, [1]], # cat head P4 52 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium) 53 | 54 | [-1, 1, Conv, [512, 3, 2]], 55 | [[-1, 22], 1, Concat, [1]], # cat head P5 56 | [-1, 3, C3, [768, False]], # 35 (P5/32-large) 57 | 58 | [-1, 1, Conv, [768, 3, 2]], 59 | [[-1, 18], 1, Concat, [1]], # cat head P6 60 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) 61 | 62 | [-1, 1, Conv, [1024, 3, 2]], 63 | [[-1, 14], 1, Concat, [1]], # cat head P7 64 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) 65 | 66 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) 67 | ] 68 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 PANet head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5l6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5m6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5n6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5s-LeakyReLU.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model 6 | depth_multiple: 0.33 # model depth multiple 7 | width_multiple: 0.50 # layer channel multiple 8 | anchors: 9 | - [10,13, 16,30, 33,23] # P3/8 10 | - [30,61, 62,45, 59,119] # P4/16 11 | - [116,90, 156,198, 373,326] # P5/32 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [1024]], 25 | [-1, 1, SPPF, [1024, 5]], # 9 26 | ] 27 | 28 | # YOLOv5 v6.0 head 29 | head: 30 | [[-1, 1, Conv, [512, 1, 1]], 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 3, C3, [512, False]], # 13 34 | 35 | [-1, 1, Conv, [256, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 39 | 40 | [-1, 1, Conv, [256, 3, 2]], 41 | [[-1, 14], 1, Concat, [1]], # cat head P4 42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 43 | 44 | [-1, 1, Conv, [512, 3, 2]], 45 | [[-1, 10], 1, Concat, [1]], # cat head P5 46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 47 | 48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 49 | ] 50 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5s-ghost.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3Ghost, [128]], 18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3Ghost, [256]], 20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3Ghost, [512]], 22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3Ghost, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, GhostConv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3Ghost, [512, False]], # 13 33 | 34 | [-1, 1, GhostConv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, GhostConv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, GhostConv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5s-transformer.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5s6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /example_aimbot/models/hub/yolov5x6.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [19,27, 44,40, 38,94] # P3/8 9 | - [96,68, 86,152, 180,137] # P4/16 10 | - [140,301, 303,264, 238,542] # P5/32 11 | - [436,615, 739,380, 925,792] # P6/64 12 | 13 | # YOLOv5 v6.0 backbone 14 | backbone: 15 | # [from, number, module, args] 16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 18 | [-1, 3, C3, [128]], 19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 20 | [-1, 6, C3, [256]], 21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 22 | [-1, 9, C3, [512]], 23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 24 | [-1, 3, C3, [768]], 25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 26 | [-1, 3, C3, [1024]], 27 | [-1, 1, SPPF, [1024, 5]], # 11 28 | ] 29 | 30 | # YOLOv5 v6.0 head 31 | head: 32 | [[-1, 1, Conv, [768, 1, 1]], 33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 35 | [-1, 3, C3, [768, False]], # 15 36 | 37 | [-1, 1, Conv, [512, 1, 1]], 38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 40 | [-1, 3, C3, [512, False]], # 19 41 | 42 | [-1, 1, Conv, [256, 1, 1]], 43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) 46 | 47 | [-1, 1, Conv, [256, 3, 2]], 48 | [[-1, 20], 1, Concat, [1]], # cat head P4 49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) 50 | 51 | [-1, 1, Conv, [512, 3, 2]], 52 | [[-1, 16], 1, Concat, [1]], # cat head P5 53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) 54 | 55 | [-1, 1, Conv, [768, 3, 2]], 56 | [[-1, 12], 1, Concat, [1]], # cat head P6 57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) 58 | 59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) 60 | ] 61 | -------------------------------------------------------------------------------- /example_aimbot/models/segment/yolov5l-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/segment/yolov5m-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/segment/yolov5n-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/segment/yolov5s-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.5 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/segment/yolov5x-seg.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.0 # model depth multiple 6 | width_multiple: 1.0 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.67 # model depth multiple 6 | width_multiple: 0.75 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/yolov5n.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.25 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 1.33 # model depth multiple 6 | width_multiple: 1.25 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 v6.0 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 6, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 3, C3, [1024]], 24 | [-1, 1, SPPF, [1024, 5]], # 9 25 | ] 26 | 27 | # YOLOv5 v6.0 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /example_aimbot/schema/settings.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field, validator 2 | import win32con 3 | 4 | KEY_MAP = { 5 | "F1": win32con.VK_F1, 6 | "F2": win32con.VK_F2, 7 | "F3": win32con.VK_F3, 8 | "F4": win32con.VK_F4, 9 | "F5": win32con.VK_F5, 10 | "F6": win32con.VK_F6, 11 | "F7": win32con.VK_F7, 12 | "F8": win32con.VK_F8, 13 | "F9": win32con.VK_F9, 14 | "F10": win32con.VK_F10, 15 | "F11": win32con.VK_F11, 16 | "F12": win32con.VK_F12, 17 | "Escape": win32con.VK_ESCAPE, 18 | "Tab": win32con.VK_TAB, 19 | "CapsLock": win32con.VK_CAPITAL, 20 | "LeftShift": win32con.VK_LSHIFT, 21 | "Shift": win32con.VK_LSHIFT, 22 | "RightShift": win32con.VK_RSHIFT, 23 | "LeftControl": win32con.VK_LCONTROL, 24 | "Control": win32con.VK_LCONTROL, 25 | "RightControl": win32con.VK_RCONTROL, 26 | "LeftAlt": win32con.VK_LMENU, 27 | "Alt": win32con.VK_LMENU, 28 | "RightAlt": win32con.VK_RMENU, 29 | "Enter": win32con.VK_RETURN, 30 | "Backspace": win32con.VK_BACK, 31 | "Delete": win32con.VK_DELETE, 32 | "Insert": win32con.VK_INSERT, 33 | "Home": win32con.VK_HOME, 34 | "End": win32con.VK_END, 35 | "PageUp": win32con.VK_PRIOR, 36 | "PageDown": win32con.VK_NEXT, 37 | "LeftMouseButtonDown": win32con.VK_LBUTTON, 38 | "RightMouseButtonDown": win32con.VK_RBUTTON, 39 | "MiddleMouseButtonDown": win32con.VK_MBUTTON, 40 | } 41 | 42 | class Settings(BaseModel): 43 | movementAmp: float = Field(default=0.3) 44 | useMask: bool = Field(default=False) 45 | maskLeft: bool = Field(default=True) 46 | maskWidth: int = Field(default=80) 47 | maskHeight: int = Field(default=200) 48 | quitKey: int = Field(default=ord("Q")) 49 | screenShotHeight: int = Field(default=320) 50 | screenShotWidth: int = Field(default=320) 51 | confidence: float = Field(default=0.5) 52 | headshotMode: bool = Field(default=True) 53 | headshotDistanceModifier: float = Field(default=0.38) 54 | displayCPS: bool = Field(default=True) 55 | visuals: bool = Field(default=False) 56 | centerOfScreen: bool = Field(default=True) 57 | activationKey: int = Field(default=win32con.VK_CAPITAL) 58 | autoFire: bool = Field(default=False) 59 | autoFireActivationDistance: int = Field(default=50) 60 | onnxChoice: int = Field(default=2) 61 | fovCircle: bool = Field(default=False) 62 | fovCircleRadius: int = Field(default=160) 63 | fovCircleRadiusDetectionModifier: float = Field(default=1.0) 64 | aimShakey: bool = Field(default=False) 65 | aimShakeyStrength: int = Field(default=10) 66 | toggleable: bool = Field(default=True) 67 | gameTitle: str = Field(default=None) 68 | 69 | @validator('activationKey', 'quitKey', pre=True) 70 | def mapKey(cls, key): 71 | 72 | if len(key) == 1: 73 | try: 74 | return ord(key.upper()) 75 | except Exception as e: 76 | print("Invalid activation key") 77 | print("Defaulting to CapsLock") 78 | return KEY_MAP["CapsLock"] 79 | 80 | return KEY_MAP[key] 81 | 82 | class Config: 83 | extra = "allow" -------------------------------------------------------------------------------- /example_aimbot/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | utils/initialization 4 | """ 5 | 6 | import contextlib 7 | import platform 8 | import threading 9 | 10 | 11 | def emojis(str=''): 12 | # Return platform-dependent emoji-safe version of string 13 | return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str 14 | 15 | 16 | class TryExcept(contextlib.ContextDecorator): 17 | # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager 18 | def __init__(self, msg=''): 19 | self.msg = msg 20 | 21 | def __enter__(self): 22 | pass 23 | 24 | def __exit__(self, exc_type, value, traceback): 25 | if value: 26 | print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) 27 | return True 28 | 29 | 30 | def threaded(func): 31 | # Multi-threads a target function and returns thread. Usage: @threaded decorator 32 | def wrapper(*args, **kwargs): 33 | thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) 34 | thread.start() 35 | return thread 36 | 37 | return wrapper 38 | 39 | 40 | def join_threads(verbose=False): 41 | # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) 42 | main_thread = threading.current_thread() 43 | for t in threading.enumerate(): 44 | if t is not main_thread: 45 | if verbose: 46 | print(f'Joining thread {t.name}') 47 | t.join() 48 | 49 | 50 | def notebook_init(verbose=True): 51 | # Check system software and hardware 52 | print('Checking setup...') 53 | 54 | import os 55 | import shutil 56 | 57 | from ultralytics.utils.checks import check_requirements 58 | 59 | from utils.general import check_font, is_colab 60 | from utils.torch_utils import select_device # imports 61 | 62 | check_font() 63 | 64 | import psutil 65 | 66 | if check_requirements('wandb', install=False): 67 | os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang 68 | if is_colab(): 69 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory 70 | 71 | # System info 72 | display = None 73 | if verbose: 74 | gb = 1 << 30 # bytes to GiB (1024 ** 3) 75 | ram = psutil.virtual_memory().total 76 | total, used, free = shutil.disk_usage('/') 77 | with contextlib.suppress(Exception): # clear display if ipython is installed 78 | from IPython import display 79 | display.clear_output() 80 | s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' 81 | else: 82 | s = '' 83 | 84 | select_device(newline=False) 85 | print(emojis(f'Setup complete ✅ {s}')) 86 | return display 87 | -------------------------------------------------------------------------------- /example_aimbot/utils/activations.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Activation functions 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | 11 | class SiLU(nn.Module): 12 | # SiLU activation https://arxiv.org/pdf/1606.08415.pdf 13 | @staticmethod 14 | def forward(x): 15 | return x * torch.sigmoid(x) 16 | 17 | 18 | class Hardswish(nn.Module): 19 | # Hard-SiLU activation 20 | @staticmethod 21 | def forward(x): 22 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML 23 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX 24 | 25 | 26 | class Mish(nn.Module): 27 | # Mish activation https://github.com/digantamisra98/Mish 28 | @staticmethod 29 | def forward(x): 30 | return x * F.softplus(x).tanh() 31 | 32 | 33 | class MemoryEfficientMish(nn.Module): 34 | # Mish activation memory-efficient 35 | class F(torch.autograd.Function): 36 | 37 | @staticmethod 38 | def forward(ctx, x): 39 | ctx.save_for_backward(x) 40 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 41 | 42 | @staticmethod 43 | def backward(ctx, grad_output): 44 | x = ctx.saved_tensors[0] 45 | sx = torch.sigmoid(x) 46 | fx = F.softplus(x).tanh() 47 | return grad_output * (fx + x * sx * (1 - fx * fx)) 48 | 49 | def forward(self, x): 50 | return self.F.apply(x) 51 | 52 | 53 | class FReLU(nn.Module): 54 | # FReLU activation https://arxiv.org/abs/2007.11824 55 | def __init__(self, c1, k=3): # ch_in, kernel 56 | super().__init__() 57 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 58 | self.bn = nn.BatchNorm2d(c1) 59 | 60 | def forward(self, x): 61 | return torch.max(x, self.bn(self.conv(x))) 62 | 63 | 64 | class AconC(nn.Module): 65 | r""" ACON activation (activate or not) 66 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter 67 | according to "Activate or Not: Learning Customized Activation" . 68 | """ 69 | 70 | def __init__(self, c1): 71 | super().__init__() 72 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 73 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 74 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) 75 | 76 | def forward(self, x): 77 | dpx = (self.p1 - self.p2) * x 78 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x 79 | 80 | 81 | class MetaAconC(nn.Module): 82 | r""" ACON activation (activate or not) 83 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network 84 | according to "Activate or Not: Learning Customized Activation" . 85 | """ 86 | 87 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r 88 | super().__init__() 89 | c2 = max(r, c1 // r) 90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) 91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) 92 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) 93 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) 94 | # self.bn1 = nn.BatchNorm2d(c2) 95 | # self.bn2 = nn.BatchNorm2d(c1) 96 | 97 | def forward(self, x): 98 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) 99 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 100 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable 101 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed 102 | dpx = (self.p1 - self.p2) * x 103 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x 104 | -------------------------------------------------------------------------------- /example_aimbot/utils/autoanchor.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | AutoAnchor utils 4 | """ 5 | 6 | import random 7 | 8 | import numpy as np 9 | import torch 10 | import yaml 11 | from tqdm import tqdm 12 | 13 | from utils import TryExcept 14 | from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr 15 | 16 | PREFIX = colorstr('AutoAnchor: ') 17 | 18 | 19 | def check_anchor_order(m): 20 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary 21 | a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer 22 | da = a[-1] - a[0] # delta a 23 | ds = m.stride[-1] - m.stride[0] # delta s 24 | if da and (da.sign() != ds.sign()): # same order 25 | LOGGER.info(f'{PREFIX}Reversing anchor order') 26 | m.anchors[:] = m.anchors.flip(0) 27 | 28 | 29 | @TryExcept(f'{PREFIX}ERROR') 30 | def check_anchors(dataset, model, thr=4.0, imgsz=640): 31 | # Check anchor fit to data, recompute if necessary 32 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() 33 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) 34 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale 35 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh 36 | 37 | def metric(k): # compute metric 38 | r = wh[:, None] / k[None] 39 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric 40 | best = x.max(1)[0] # best_x 41 | aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold 42 | bpr = (best > 1 / thr).float().mean() # best possible recall 43 | return bpr, aat 44 | 45 | stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides 46 | anchors = m.anchors.clone() * stride # current anchors 47 | bpr, aat = metric(anchors.cpu().view(-1, 2)) 48 | s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' 49 | if bpr > 0.98: # threshold to recompute 50 | LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') 51 | else: 52 | LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') 53 | na = m.anchors.numel() // 2 # number of anchors 54 | anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) 55 | new_bpr = metric(anchors)[0] 56 | if new_bpr > bpr: # replace anchors 57 | anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) 58 | m.anchors[:] = anchors.clone().view_as(m.anchors) 59 | check_anchor_order(m) # must be in pixel-space (not grid-space) 60 | m.anchors /= stride 61 | s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' 62 | else: 63 | s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' 64 | LOGGER.info(s) 65 | 66 | 67 | def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): 68 | """ Creates kmeans-evolved anchors from training dataset 69 | 70 | Arguments: 71 | dataset: path to data.yaml, or a loaded dataset 72 | n: number of anchors 73 | img_size: image size used for training 74 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 75 | gen: generations to evolve anchors using genetic algorithm 76 | verbose: print all results 77 | 78 | Return: 79 | k: kmeans evolved anchors 80 | 81 | Usage: 82 | from utils.autoanchor import *; _ = kmean_anchors() 83 | """ 84 | from scipy.cluster.vq import kmeans 85 | 86 | npr = np.random 87 | thr = 1 / thr 88 | 89 | def metric(k, wh): # compute metrics 90 | r = wh[:, None] / k[None] 91 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric 92 | # x = wh_iou(wh, torch.tensor(k)) # iou metric 93 | return x, x.max(1)[0] # x, best_x 94 | 95 | def anchor_fitness(k): # mutation fitness 96 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) 97 | return (best * (best > thr).float()).mean() # fitness 98 | 99 | def print_results(k, verbose=True): 100 | k = k[np.argsort(k.prod(1))] # sort small to large 101 | x, best = metric(k, wh0) 102 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr 103 | s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ 104 | f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ 105 | f'past_thr={x[x > thr].mean():.3f}-mean: ' 106 | for x in k: 107 | s += '%i,%i, ' % (round(x[0]), round(x[1])) 108 | if verbose: 109 | LOGGER.info(s[:-2]) 110 | return k 111 | 112 | if isinstance(dataset, str): # *.yaml file 113 | with open(dataset, errors='ignore') as f: 114 | data_dict = yaml.safe_load(f) # model dict 115 | from utils.dataloaders import LoadImagesAndLabels 116 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) 117 | 118 | # Get label wh 119 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) 120 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh 121 | 122 | # Filter 123 | i = (wh0 < 3.0).any(1).sum() 124 | if i: 125 | LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') 126 | wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels 127 | # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 128 | 129 | # Kmeans init 130 | try: 131 | LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') 132 | assert n <= len(wh) # apply overdetermined constraint 133 | s = wh.std(0) # sigmas for whitening 134 | k = kmeans(wh / s, n, iter=30)[0] * s # points 135 | assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar 136 | except Exception: 137 | LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') 138 | k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init 139 | wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) 140 | k = print_results(k, verbose=False) 141 | 142 | # Plot 143 | # k, d = [None] * 20, [None] * 20 144 | # for i in tqdm(range(1, 21)): 145 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance 146 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) 147 | # ax = ax.ravel() 148 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') 149 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh 150 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) 151 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) 152 | # fig.savefig('wh.png', dpi=200) 153 | 154 | # Evolve 155 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma 156 | pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar 157 | for _ in pbar: 158 | v = np.ones(sh) 159 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) 160 | v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) 161 | kg = (k.copy() * v).clip(min=2.0) 162 | fg = anchor_fitness(kg) 163 | if fg > f: 164 | f, k = fg, kg.copy() 165 | pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' 166 | if verbose: 167 | print_results(k, verbose) 168 | 169 | return print_results(k).astype(np.float32) 170 | -------------------------------------------------------------------------------- /example_aimbot/utils/autobatch.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Auto-batch utils 4 | """ 5 | 6 | from copy import deepcopy 7 | 8 | import numpy as np 9 | import torch 10 | 11 | from utils.general import LOGGER, colorstr 12 | from utils.torch_utils import profile 13 | 14 | 15 | def check_train_batch_size(model, imgsz=640, amp=True): 16 | # Check YOLOv5 training batch size 17 | with torch.cuda.amp.autocast(amp): 18 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size 19 | 20 | 21 | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): 22 | # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory 23 | # Usage: 24 | # import torch 25 | # from utils.autobatch import autobatch 26 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) 27 | # print(autobatch(model)) 28 | 29 | # Check device 30 | prefix = colorstr('AutoBatch: ') 31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') 32 | device = next(model.parameters()).device # get model device 33 | if device.type == 'cpu': 34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') 35 | return batch_size 36 | if torch.backends.cudnn.benchmark: 37 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') 38 | return batch_size 39 | 40 | # Inspect CUDA memory 41 | gb = 1 << 30 # bytes to GiB (1024 ** 3) 42 | d = str(device).upper() # 'CUDA:0' 43 | properties = torch.cuda.get_device_properties(device) # device properties 44 | t = properties.total_memory / gb # GiB total 45 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved 46 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated 47 | f = t - (r + a) # GiB free 48 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') 49 | 50 | # Profile batch sizes 51 | batch_sizes = [1, 2, 4, 8, 16] 52 | try: 53 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] 54 | results = profile(img, model, n=3, device=device) 55 | except Exception as e: 56 | LOGGER.warning(f'{prefix}{e}') 57 | 58 | # Fit a solution 59 | y = [x[2] for x in results if x] # memory [2] 60 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit 61 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) 62 | if None in results: # some sizes failed 63 | i = results.index(None) # first fail index 64 | if b >= batch_sizes[i]: # y intercept above failure point 65 | b = batch_sizes[max(i - 1, 0)] # select prior safe point 66 | if b < 1 or b > 1024: # b outside of safe range 67 | b = batch_size 68 | LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') 69 | 70 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted 71 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') 72 | return b 73 | -------------------------------------------------------------------------------- /example_aimbot/utils/aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_aimbot/utils/aws/__init__.py -------------------------------------------------------------------------------- /example_aimbot/utils/aws/mime.sh: -------------------------------------------------------------------------------- 1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ 2 | # This script will run on every instance restart, not only on first start 3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- 4 | 5 | Content-Type: multipart/mixed; boundary="//" 6 | MIME-Version: 1.0 7 | 8 | --// 9 | Content-Type: text/cloud-config; charset="us-ascii" 10 | MIME-Version: 1.0 11 | Content-Transfer-Encoding: 7bit 12 | Content-Disposition: attachment; filename="cloud-config.txt" 13 | 14 | #cloud-config 15 | cloud_final_modules: 16 | - [scripts-user, always] 17 | 18 | --// 19 | Content-Type: text/x-shellscript; charset="us-ascii" 20 | MIME-Version: 1.0 21 | Content-Transfer-Encoding: 7bit 22 | Content-Disposition: attachment; filename="userdata.txt" 23 | 24 | #!/bin/bash 25 | # --- paste contents of userdata.sh here --- 26 | --// 27 | -------------------------------------------------------------------------------- /example_aimbot/utils/aws/resume.py: -------------------------------------------------------------------------------- 1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings 2 | # Usage: $ python utils/aws/resume.py 3 | 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import torch 9 | import yaml 10 | 11 | FILE = Path(__file__).resolve() 12 | ROOT = FILE.parents[2] # YOLOv5 root directory 13 | if str(ROOT) not in sys.path: 14 | sys.path.append(str(ROOT)) # add ROOT to PATH 15 | 16 | port = 0 # --master_port 17 | path = Path('').resolve() 18 | for last in path.rglob('*/**/last.pt'): 19 | ckpt = torch.load(last) 20 | if ckpt['optimizer'] is None: 21 | continue 22 | 23 | # Load opt.yaml 24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: 25 | opt = yaml.safe_load(f) 26 | 27 | # Get device count 28 | d = opt['device'].split(',') # devices 29 | nd = len(d) # number of devices 30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel 31 | 32 | if ddp: # multi-GPU 33 | port += 1 34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' 35 | else: # single-GPU 36 | cmd = f'python train.py --resume {last}' 37 | 38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread 39 | print(cmd) 40 | os.system(cmd) 41 | -------------------------------------------------------------------------------- /example_aimbot/utils/aws/userdata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html 3 | # This script will run only once on first instance start (for a re-start script see mime.sh) 4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir 5 | # Use >300 GB SSD 6 | 7 | cd home/ubuntu 8 | if [ ! -d yolov5 ]; then 9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker 10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 11 | cd yolov5 12 | bash data/scripts/get_coco.sh && echo "COCO done." & 13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & 14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & 15 | wait && echo "All tasks done." # finish background tasks 16 | else 17 | echo "Running re-start script." # resume interrupted runs 18 | i=0 19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' 20 | while IFS= read -r id; do 21 | ((i++)) 22 | echo "restarting container $i: $id" 23 | sudo docker start $id 24 | # sudo docker exec -it $id python train.py --resume # single-GPU 25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario 26 | done <<<"$list" 27 | fi 28 | -------------------------------------------------------------------------------- /example_aimbot/utils/callbacks.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Callback utils 4 | """ 5 | 6 | import threading 7 | 8 | 9 | class Callbacks: 10 | """" 11 | Handles all registered callbacks for YOLOv5 Hooks 12 | """ 13 | 14 | def __init__(self): 15 | # Define the available callbacks 16 | self._callbacks = { 17 | 'on_pretrain_routine_start': [], 18 | 'on_pretrain_routine_end': [], 19 | 'on_train_start': [], 20 | 'on_train_epoch_start': [], 21 | 'on_train_batch_start': [], 22 | 'optimizer_step': [], 23 | 'on_before_zero_grad': [], 24 | 'on_train_batch_end': [], 25 | 'on_train_epoch_end': [], 26 | 'on_val_start': [], 27 | 'on_val_batch_start': [], 28 | 'on_val_image_end': [], 29 | 'on_val_batch_end': [], 30 | 'on_val_end': [], 31 | 'on_fit_epoch_end': [], # fit = train + val 32 | 'on_model_save': [], 33 | 'on_train_end': [], 34 | 'on_params_update': [], 35 | 'teardown': [], } 36 | self.stop_training = False # set True to interrupt training 37 | 38 | def register_action(self, hook, name='', callback=None): 39 | """ 40 | Register a new action to a callback hook 41 | 42 | Args: 43 | hook: The callback hook name to register the action to 44 | name: The name of the action for later reference 45 | callback: The callback to fire 46 | """ 47 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 48 | assert callable(callback), f"callback '{callback}' is not callable" 49 | self._callbacks[hook].append({'name': name, 'callback': callback}) 50 | 51 | def get_registered_actions(self, hook=None): 52 | """" 53 | Returns all the registered actions by callback hook 54 | 55 | Args: 56 | hook: The name of the hook to check, defaults to all 57 | """ 58 | return self._callbacks[hook] if hook else self._callbacks 59 | 60 | def run(self, hook, *args, thread=False, **kwargs): 61 | """ 62 | Loop through the registered actions and fire all callbacks on main thread 63 | 64 | Args: 65 | hook: The name of the hook to check, defaults to all 66 | args: Arguments to receive from YOLOv5 67 | thread: (boolean) Run callbacks in daemon thread 68 | kwargs: Keyword Arguments to receive from YOLOv5 69 | """ 70 | 71 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" 72 | for logger in self._callbacks[hook]: 73 | if thread: 74 | threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() 75 | else: 76 | logger['callback'](*args, **kwargs) 77 | -------------------------------------------------------------------------------- /example_aimbot/utils/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 3 | # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference 4 | 5 | # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch 6 | FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime 7 | 8 | # Downloads to user config dir 9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ 10 | 11 | # Install linux packages 12 | ENV DEBIAN_FRONTEND noninteractive 13 | RUN apt update 14 | RUN TZ=Etc/UTC apt install -y tzdata 15 | RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg 16 | # RUN alias python=python3 17 | 18 | # Security updates 19 | # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 20 | RUN apt upgrade --no-install-recommends -y openssl 21 | 22 | # Create working directory 23 | RUN rm -rf /usr/src/app && mkdir -p /usr/src/app 24 | WORKDIR /usr/src/app 25 | 26 | # Copy contents 27 | COPY . /usr/src/app 28 | 29 | # Install pip packages 30 | COPY requirements.txt . 31 | RUN python3 -m pip install --upgrade pip wheel 32 | RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ 33 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' 34 | # tensorflow tensorflowjs \ 35 | 36 | # Set environment variables 37 | ENV OMP_NUM_THREADS=1 38 | 39 | # Cleanup 40 | ENV DEBIAN_FRONTEND teletype 41 | 42 | 43 | # Usage Examples ------------------------------------------------------------------------------------------------------- 44 | 45 | # Build and Push 46 | # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t 47 | 48 | # Pull and Run 49 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t 50 | 51 | # Pull and Run with local directory access 52 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t 53 | 54 | # Kill all 55 | # sudo docker kill $(sudo docker ps -q) 56 | 57 | # Kill all image-based 58 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) 59 | 60 | # DockerHub tag update 61 | # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew 62 | 63 | # Clean up 64 | # sudo docker system prune -a --volumes 65 | 66 | # Update Ubuntu drivers 67 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ 68 | 69 | # DDP test 70 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 71 | 72 | # GCP VM from Image 73 | # docker.io/ultralytics/yolov5:latest 74 | -------------------------------------------------------------------------------- /example_aimbot/utils/docker/Dockerfile-arm64: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi 4 | 5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu 6 | FROM arm64v8/ubuntu:22.10 7 | 8 | # Downloads to user config dir 9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ 10 | 11 | # Install linux packages 12 | ENV DEBIAN_FRONTEND noninteractive 13 | RUN apt update 14 | RUN TZ=Etc/UTC apt install -y tzdata 15 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev 16 | # RUN alias python=python3 17 | 18 | # Install pip packages 19 | COPY requirements.txt . 20 | RUN python3 -m pip install --upgrade pip wheel 21 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ 22 | coremltools onnx onnxruntime 23 | # tensorflow-aarch64 tensorflowjs \ 24 | 25 | # Create working directory 26 | RUN mkdir -p /usr/src/app 27 | WORKDIR /usr/src/app 28 | 29 | # Copy contents 30 | COPY . /usr/src/app 31 | ENV DEBIAN_FRONTEND teletype 32 | 33 | 34 | # Usage Examples ------------------------------------------------------------------------------------------------------- 35 | 36 | # Build and Push 37 | # t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t 38 | 39 | # Pull and Run 40 | # t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t 41 | -------------------------------------------------------------------------------- /example_aimbot/utils/docker/Dockerfile-cpu: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments 4 | 5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu 6 | FROM ubuntu:mantic-20231011 7 | 8 | # Downloads to user config dir 9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ 10 | 11 | # Install linux packages 12 | # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package 13 | RUN apt update \ 14 | && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 15 | # RUN alias python=python3 16 | 17 | # Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error 18 | RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED 19 | 20 | # Install pip packages 21 | COPY requirements.txt . 22 | RUN python3 -m pip install --upgrade pip wheel 23 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ 24 | coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \ 25 | # tensorflow tensorflowjs \ 26 | --extra-index-url https://download.pytorch.org/whl/cpu 27 | 28 | # Create working directory 29 | RUN mkdir -p /usr/src/app 30 | WORKDIR /usr/src/app 31 | 32 | # Copy contents 33 | COPY . /usr/src/app 34 | 35 | 36 | # Usage Examples ------------------------------------------------------------------------------------------------------- 37 | 38 | # Build and Push 39 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t 40 | 41 | # Pull and Run 42 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t 43 | -------------------------------------------------------------------------------- /example_aimbot/utils/downloads.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Download utils 4 | """ 5 | 6 | import logging 7 | import subprocess 8 | import urllib 9 | from pathlib import Path 10 | 11 | import requests 12 | import torch 13 | 14 | 15 | def is_url(url, check=True): 16 | # Check if string is URL and check if URL exists 17 | try: 18 | url = str(url) 19 | result = urllib.parse.urlparse(url) 20 | assert all([result.scheme, result.netloc]) # check if is url 21 | return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online 22 | except (AssertionError, urllib.request.HTTPError): 23 | return False 24 | 25 | 26 | def gsutil_getsize(url=''): 27 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 28 | output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') 29 | if output: 30 | return int(output.split()[0]) 31 | return 0 32 | 33 | 34 | def url_getsize(url='https://ultralytics.com/images/bus.jpg'): 35 | # Return downloadable file size in bytes 36 | response = requests.head(url, allow_redirects=True) 37 | return int(response.headers.get('content-length', -1)) 38 | 39 | 40 | def curl_download(url, filename, *, silent: bool = False) -> bool: 41 | """ 42 | Download a file from a url to a filename using curl. 43 | """ 44 | silent_option = 'sS' if silent else '' # silent 45 | proc = subprocess.run([ 46 | 'curl', 47 | '-#', 48 | f'-{silent_option}L', 49 | url, 50 | '--output', 51 | filename, 52 | '--retry', 53 | '9', 54 | '-C', 55 | '-', ]) 56 | return proc.returncode == 0 57 | 58 | 59 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): 60 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes 61 | from utils.general import LOGGER 62 | 63 | file = Path(file) 64 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" 65 | try: # url1 66 | LOGGER.info(f'Downloading {url} to {file}...') 67 | torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) 68 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check 69 | except Exception as e: # url2 70 | if file.exists(): 71 | file.unlink() # remove partial downloads 72 | LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') 73 | # curl download, retry and resume on fail 74 | curl_download(url2 or url, file) 75 | finally: 76 | if not file.exists() or file.stat().st_size < min_bytes: # check 77 | if file.exists(): 78 | file.unlink() # remove partial downloads 79 | LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') 80 | LOGGER.info('') 81 | 82 | 83 | def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): 84 | # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. 85 | from utils.general import LOGGER 86 | 87 | def github_assets(repository, version='latest'): 88 | # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) 89 | if version != 'latest': 90 | version = f'tags/{version}' # i.e. tags/v7.0 91 | response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api 92 | return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets 93 | 94 | file = Path(str(file).strip().replace("'", '')) 95 | if not file.exists(): 96 | # URL specified 97 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. 98 | if str(file).startswith(('http:/', 'https:/')): # download 99 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ 100 | file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... 101 | if Path(file).is_file(): 102 | LOGGER.info(f'Found {url} locally at {file}') # file already exists 103 | else: 104 | safe_download(file=file, url=url, min_bytes=1E5) 105 | return file 106 | 107 | # GitHub assets 108 | assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default 109 | try: 110 | tag, assets = github_assets(repo, release) 111 | except Exception: 112 | try: 113 | tag, assets = github_assets(repo) # latest release 114 | except Exception: 115 | try: 116 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] 117 | except Exception: 118 | tag = release 119 | 120 | if name in assets: 121 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) 122 | safe_download(file, 123 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}', 124 | min_bytes=1E5, 125 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') 126 | 127 | return str(file) 128 | -------------------------------------------------------------------------------- /example_aimbot/utils/flask_rest_api/README.md: -------------------------------------------------------------------------------- 1 | # Flask REST API 2 | 3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are 4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API 5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). 6 | 7 | ## Requirements 8 | 9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with: 10 | 11 | ```shell 12 | $ pip install Flask 13 | ``` 14 | 15 | ## Run 16 | 17 | After Flask installation run: 18 | 19 | ```shell 20 | $ python3 restapi.py --port 5000 21 | ``` 22 | 23 | Then use [curl](https://curl.se/) to perform a request: 24 | 25 | ```shell 26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' 27 | ``` 28 | 29 | The model inference results are returned as a JSON response: 30 | 31 | ```json 32 | [ 33 | { 34 | "class": 0, 35 | "confidence": 0.8900438547, 36 | "height": 0.9318675399, 37 | "name": "person", 38 | "width": 0.3264600933, 39 | "xcenter": 0.7438579798, 40 | "ycenter": 0.5207948685 41 | }, 42 | { 43 | "class": 0, 44 | "confidence": 0.8440024257, 45 | "height": 0.7155083418, 46 | "name": "person", 47 | "width": 0.6546785235, 48 | "xcenter": 0.427829951, 49 | "ycenter": 0.6334488392 50 | }, 51 | { 52 | "class": 27, 53 | "confidence": 0.3771208823, 54 | "height": 0.3902671337, 55 | "name": "tie", 56 | "width": 0.0696444362, 57 | "xcenter": 0.3675483763, 58 | "ycenter": 0.7991207838 59 | }, 60 | { 61 | "class": 27, 62 | "confidence": 0.3527112305, 63 | "height": 0.1540903747, 64 | "name": "tie", 65 | "width": 0.0336618312, 66 | "xcenter": 0.7814827561, 67 | "ycenter": 0.5065554976 68 | } 69 | ] 70 | ``` 71 | 72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given 73 | in `example_request.py` 74 | -------------------------------------------------------------------------------- /example_aimbot/utils/flask_rest_api/example_request.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Perform test request 4 | """ 5 | 6 | import pprint 7 | 8 | import requests 9 | 10 | DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' 11 | IMAGE = 'zidane.jpg' 12 | 13 | # Read image 14 | with open(IMAGE, 'rb') as f: 15 | image_data = f.read() 16 | 17 | response = requests.post(DETECTION_URL, files={'image': image_data}).json() 18 | 19 | pprint.pprint(response) 20 | -------------------------------------------------------------------------------- /example_aimbot/utils/flask_rest_api/restapi.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Run a Flask REST API exposing one or more YOLOv5s models 4 | """ 5 | 6 | import argparse 7 | import io 8 | 9 | import torch 10 | from flask import Flask, request 11 | from PIL import Image 12 | 13 | app = Flask(__name__) 14 | models = {} 15 | 16 | DETECTION_URL = '/v1/object-detection/' 17 | 18 | 19 | @app.route(DETECTION_URL, methods=['POST']) 20 | def predict(model): 21 | if request.method != 'POST': 22 | return 23 | 24 | if request.files.get('image'): 25 | # Method 1 26 | # with request.files["image"] as f: 27 | # im = Image.open(io.BytesIO(f.read())) 28 | 29 | # Method 2 30 | im_file = request.files['image'] 31 | im_bytes = im_file.read() 32 | im = Image.open(io.BytesIO(im_bytes)) 33 | 34 | if model in models: 35 | results = models[model](im, size=640) # reduce size=320 for faster inference 36 | return results.pandas().xyxy[0].to_json(orient='records') 37 | 38 | 39 | if __name__ == '__main__': 40 | parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') 41 | parser.add_argument('--port', default=5000, type=int, help='port number') 42 | parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') 43 | opt = parser.parse_args() 44 | 45 | for m in opt.model: 46 | models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) 47 | 48 | app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat 49 | -------------------------------------------------------------------------------- /example_aimbot/utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /example_aimbot/utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==23.3 3 | Flask==2.3.2 4 | gunicorn==19.10.0 5 | werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability 6 | -------------------------------------------------------------------------------- /example_aimbot/utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 15 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/clearml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_aimbot/utils/loggers/clearml/__init__.py -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/clearml/clearml_utils.py: -------------------------------------------------------------------------------- 1 | """Main Logger class for ClearML experiment tracking.""" 2 | import glob 3 | import re 4 | from pathlib import Path 5 | 6 | import numpy as np 7 | import yaml 8 | from ultralytics.utils.plotting import Annotator, colors 9 | 10 | try: 11 | import clearml 12 | from clearml import Dataset, Task 13 | 14 | assert hasattr(clearml, '__version__') # verify package import not local dir 15 | except (ImportError, AssertionError): 16 | clearml = None 17 | 18 | 19 | def construct_dataset(clearml_info_string): 20 | """Load in a clearml dataset and fill the internal data_dict with its contents. 21 | """ 22 | dataset_id = clearml_info_string.replace('clearml://', '') 23 | dataset = Dataset.get(dataset_id=dataset_id) 24 | dataset_root_path = Path(dataset.get_local_copy()) 25 | 26 | # We'll search for the yaml file definition in the dataset 27 | yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) 28 | if len(yaml_filenames) > 1: 29 | raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' 30 | 'the dataset definition this way.') 31 | elif len(yaml_filenames) == 0: 32 | raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' 33 | 'inside the dataset root path.') 34 | with open(yaml_filenames[0]) as f: 35 | dataset_definition = yaml.safe_load(f) 36 | 37 | assert set(dataset_definition.keys()).issuperset( 38 | {'train', 'test', 'val', 'nc', 'names'} 39 | ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" 40 | 41 | data_dict = dict() 42 | data_dict['train'] = str( 43 | (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None 44 | data_dict['test'] = str( 45 | (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None 46 | data_dict['val'] = str( 47 | (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None 48 | data_dict['nc'] = dataset_definition['nc'] 49 | data_dict['names'] = dataset_definition['names'] 50 | 51 | return data_dict 52 | 53 | 54 | class ClearmlLogger: 55 | """Log training runs, datasets, models, and predictions to ClearML. 56 | 57 | This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, 58 | this information includes hyperparameters, system configuration and metrics, model metrics, code information and 59 | basic data metrics and analyses. 60 | 61 | By providing additional command line arguments to train.py, datasets, 62 | models and predictions can also be logged. 63 | """ 64 | 65 | def __init__(self, opt, hyp): 66 | """ 67 | - Initialize ClearML Task, this object will capture the experiment 68 | - Upload dataset version to ClearML Data if opt.upload_dataset is True 69 | 70 | arguments: 71 | opt (namespace) -- Commandline arguments for this run 72 | hyp (dict) -- Hyperparameters for this run 73 | 74 | """ 75 | self.current_epoch = 0 76 | # Keep tracked of amount of logged images to enforce a limit 77 | self.current_epoch_logged_images = set() 78 | # Maximum number of images to log to clearML per epoch 79 | self.max_imgs_to_log_per_epoch = 16 80 | # Get the interval of epochs when bounding box images should be logged 81 | self.bbox_interval = opt.bbox_interval 82 | self.clearml = clearml 83 | self.task = None 84 | self.data_dict = None 85 | if self.clearml: 86 | self.task = Task.init( 87 | project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', 88 | task_name=opt.name if opt.name != 'exp' else 'Training', 89 | tags=['YOLOv5'], 90 | output_uri=True, 91 | reuse_last_task_id=opt.exist_ok, 92 | auto_connect_frameworks={'pytorch': False} 93 | # We disconnect pytorch auto-detection, because we added manual model save points in the code 94 | ) 95 | # ClearML's hooks will already grab all general parameters 96 | # Only the hyperparameters coming from the yaml config file 97 | # will have to be added manually! 98 | self.task.connect(hyp, name='Hyperparameters') 99 | self.task.connect(opt, name='Args') 100 | 101 | # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent 102 | self.task.set_base_docker('ultralytics/yolov5:latest', 103 | docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', 104 | docker_setup_bash_script='pip install clearml') 105 | 106 | # Get ClearML Dataset Version if requested 107 | if opt.data.startswith('clearml://'): 108 | # data_dict should have the following keys: 109 | # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) 110 | self.data_dict = construct_dataset(opt.data) 111 | # Set data to data_dict because wandb will crash without this information and opt is the best way 112 | # to give it to them 113 | opt.data = self.data_dict 114 | 115 | def log_debug_samples(self, files, title='Debug Samples'): 116 | """ 117 | Log files (images) as debug samples in the ClearML task. 118 | 119 | arguments: 120 | files (List(PosixPath)) a list of file paths in PosixPath format 121 | title (str) A title that groups together images with the same values 122 | """ 123 | for f in files: 124 | if f.exists(): 125 | it = re.search(r'_batch(\d+)', f.name) 126 | iteration = int(it.groups()[0]) if it else 0 127 | self.task.get_logger().report_image(title=title, 128 | series=f.name.replace(it.group(), ''), 129 | local_path=str(f), 130 | iteration=iteration) 131 | 132 | def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): 133 | """ 134 | Draw the bounding boxes on a single image and report the result as a ClearML debug sample. 135 | 136 | arguments: 137 | image_path (PosixPath) the path the original image file 138 | boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] 139 | class_names (dict): dict containing mapping of class int to class name 140 | image (Tensor): A torch tensor containing the actual image data 141 | """ 142 | if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: 143 | # Log every bbox_interval times and deduplicate for any intermittend extra eval runs 144 | if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: 145 | im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) 146 | annotator = Annotator(im=im, pil=True) 147 | for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): 148 | color = colors(i) 149 | 150 | class_name = class_names[int(class_nr)] 151 | confidence_percentage = round(float(conf) * 100, 2) 152 | label = f'{class_name}: {confidence_percentage}%' 153 | 154 | if conf > conf_threshold: 155 | annotator.rectangle(box.cpu().numpy(), outline=color) 156 | annotator.box_label(box.cpu().numpy(), label=label, color=color) 157 | 158 | annotated_image = annotator.result() 159 | self.task.get_logger().report_image(title='Bounding Boxes', 160 | series=image_path.name, 161 | iteration=self.current_epoch, 162 | image=annotated_image) 163 | self.current_epoch_logged_images.add(image_path) 164 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/clearml/hpo.py: -------------------------------------------------------------------------------- 1 | from clearml import Task 2 | # Connecting ClearML with the current process, 3 | # from here on everything is logged automatically 4 | from clearml.automation import HyperParameterOptimizer, UniformParameterRange 5 | from clearml.automation.optuna import OptimizerOptuna 6 | 7 | task = Task.init(project_name='Hyper-Parameter Optimization', 8 | task_name='YOLOv5', 9 | task_type=Task.TaskTypes.optimizer, 10 | reuse_last_task_id=False) 11 | 12 | # Example use case: 13 | optimizer = HyperParameterOptimizer( 14 | # This is the experiment we want to optimize 15 | base_task_id='', 16 | # here we define the hyper-parameters to optimize 17 | # Notice: The parameter name should exactly match what you see in the UI: / 18 | # For Example, here we see in the base experiment a section Named: "General" 19 | # under it a parameter named "batch_size", this becomes "General/batch_size" 20 | # If you have `argparse` for example, then arguments will appear under the "Args" section, 21 | # and you should instead pass "Args/batch_size" 22 | hyper_parameters=[ 23 | UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), 24 | UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), 25 | UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), 26 | UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), 27 | UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), 28 | UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), 29 | UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), 30 | UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), 31 | UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), 32 | UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), 33 | UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), 34 | UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), 35 | UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), 36 | UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), 37 | UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), 38 | UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), 39 | UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), 40 | UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), 41 | UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), 42 | UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), 43 | UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), 44 | UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), 45 | UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), 46 | UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), 47 | UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), 48 | UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), 49 | UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), 50 | UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], 51 | # this is the objective metric we want to maximize/minimize 52 | objective_metric_title='metrics', 53 | objective_metric_series='mAP_0.5', 54 | # now we decide if we want to maximize it or minimize it (accuracy we maximize) 55 | objective_metric_sign='max', 56 | # let us limit the number of concurrent experiments, 57 | # this in turn will make sure we do dont bombard the scheduler with experiments. 58 | # if we have an auto-scaler connected, this, by proxy, will limit the number of machine 59 | max_number_of_concurrent_tasks=1, 60 | # this is the optimizer class (actually doing the optimization) 61 | # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) 62 | optimizer_class=OptimizerOptuna, 63 | # If specified only the top K performing Tasks will be kept, the others will be automatically archived 64 | save_top_k_tasks_only=5, # 5, 65 | compute_time_limit=None, 66 | total_max_jobs=20, 67 | min_iteration_per_job=None, 68 | max_iteration_per_job=None, 69 | ) 70 | 71 | # report every 10 seconds, this is way too often, but we are testing here 72 | optimizer.set_report_period(10 / 60) 73 | # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent 74 | # an_optimizer.start_locally(job_complete_callback=job_complete_callback) 75 | # set the time limit for the optimization process (2 hours) 76 | optimizer.set_time_limit(in_minutes=120.0) 77 | # Start the optimization process in the local environment 78 | optimizer.start_locally() 79 | # wait until process is done (notice we are controlling the optimization process in the background) 80 | optimizer.wait() 81 | # make sure background optimization stopped 82 | optimizer.stop() 83 | 84 | print('We are done, good bye') 85 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/comet/comet_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from urllib.parse import urlparse 4 | 5 | try: 6 | import comet_ml 7 | except (ModuleNotFoundError, ImportError): 8 | comet_ml = None 9 | 10 | import yaml 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | COMET_PREFIX = 'comet://' 15 | COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') 16 | COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') 17 | 18 | 19 | def download_model_checkpoint(opt, experiment): 20 | model_dir = f'{opt.project}/{experiment.name}' 21 | os.makedirs(model_dir, exist_ok=True) 22 | 23 | model_name = COMET_MODEL_NAME 24 | model_asset_list = experiment.get_model_asset_list(model_name) 25 | 26 | if len(model_asset_list) == 0: 27 | logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') 28 | return 29 | 30 | model_asset_list = sorted( 31 | model_asset_list, 32 | key=lambda x: x['step'], 33 | reverse=True, 34 | ) 35 | logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} 36 | 37 | resource_url = urlparse(opt.weights) 38 | checkpoint_filename = resource_url.query 39 | 40 | if checkpoint_filename: 41 | asset_id = logged_checkpoint_map.get(checkpoint_filename) 42 | else: 43 | asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) 44 | checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME 45 | 46 | if asset_id is None: 47 | logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') 48 | return 49 | 50 | try: 51 | logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') 52 | asset_filename = checkpoint_filename 53 | 54 | model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) 55 | model_download_path = f'{model_dir}/{asset_filename}' 56 | with open(model_download_path, 'wb') as f: 57 | f.write(model_binary) 58 | 59 | opt.weights = model_download_path 60 | 61 | except Exception as e: 62 | logger.warning('COMET WARNING: Unable to download checkpoint from Comet') 63 | logger.exception(e) 64 | 65 | 66 | def set_opt_parameters(opt, experiment): 67 | """Update the opts Namespace with parameters 68 | from Comet's ExistingExperiment when resuming a run 69 | 70 | Args: 71 | opt (argparse.Namespace): Namespace of command line options 72 | experiment (comet_ml.APIExperiment): Comet API Experiment object 73 | """ 74 | asset_list = experiment.get_asset_list() 75 | resume_string = opt.resume 76 | 77 | for asset in asset_list: 78 | if asset['fileName'] == 'opt.yaml': 79 | asset_id = asset['assetId'] 80 | asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) 81 | opt_dict = yaml.safe_load(asset_binary) 82 | for key, value in opt_dict.items(): 83 | setattr(opt, key, value) 84 | opt.resume = resume_string 85 | 86 | # Save hyperparameters to YAML file 87 | # Necessary to pass checks in training script 88 | save_dir = f'{opt.project}/{experiment.name}' 89 | os.makedirs(save_dir, exist_ok=True) 90 | 91 | hyp_yaml_path = f'{save_dir}/hyp.yaml' 92 | with open(hyp_yaml_path, 'w') as f: 93 | yaml.dump(opt.hyp, f) 94 | opt.hyp = hyp_yaml_path 95 | 96 | 97 | def check_comet_weights(opt): 98 | """Downloads model weights from Comet and updates the 99 | weights path to point to saved weights location 100 | 101 | Args: 102 | opt (argparse.Namespace): Command Line arguments passed 103 | to YOLOv5 training script 104 | 105 | Returns: 106 | None/bool: Return True if weights are successfully downloaded 107 | else return None 108 | """ 109 | if comet_ml is None: 110 | return 111 | 112 | if isinstance(opt.weights, str): 113 | if opt.weights.startswith(COMET_PREFIX): 114 | api = comet_ml.API() 115 | resource = urlparse(opt.weights) 116 | experiment_path = f'{resource.netloc}{resource.path}' 117 | experiment = api.get(experiment_path) 118 | download_model_checkpoint(opt, experiment) 119 | return True 120 | 121 | return None 122 | 123 | 124 | def check_comet_resume(opt): 125 | """Restores run parameters to its original state based on the model checkpoint 126 | and logged Experiment parameters. 127 | 128 | Args: 129 | opt (argparse.Namespace): Command Line arguments passed 130 | to YOLOv5 training script 131 | 132 | Returns: 133 | None/bool: Return True if the run is restored successfully 134 | else return None 135 | """ 136 | if comet_ml is None: 137 | return 138 | 139 | if isinstance(opt.resume, str): 140 | if opt.resume.startswith(COMET_PREFIX): 141 | api = comet_ml.API() 142 | resource = urlparse(opt.resume) 143 | experiment_path = f'{resource.netloc}{resource.path}' 144 | experiment = api.get(experiment_path) 145 | set_opt_parameters(opt, experiment) 146 | download_model_checkpoint(opt, experiment) 147 | 148 | return True 149 | 150 | return None 151 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/comet/hpo.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import comet_ml 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | FILE = Path(__file__).resolve() 13 | ROOT = FILE.parents[3] # YOLOv5 root directory 14 | if str(ROOT) not in sys.path: 15 | sys.path.append(str(ROOT)) # add ROOT to PATH 16 | 17 | from train import train 18 | from utils.callbacks import Callbacks 19 | from utils.general import increment_path 20 | from utils.torch_utils import select_device 21 | 22 | # Project Configuration 23 | config = comet_ml.config.get_config() 24 | COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') 25 | 26 | 27 | def get_args(known=False): 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') 30 | parser.add_argument('--cfg', type=str, default='', help='model.yaml path') 31 | parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') 32 | parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') 33 | parser.add_argument('--epochs', type=int, default=300, help='total training epochs') 34 | parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') 35 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') 36 | parser.add_argument('--rect', action='store_true', help='rectangular training') 37 | parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') 38 | parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') 39 | parser.add_argument('--noval', action='store_true', help='only validate final epoch') 40 | parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') 41 | parser.add_argument('--noplots', action='store_true', help='save no plot files') 42 | parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') 43 | parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') 44 | parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') 45 | parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') 46 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 47 | parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') 48 | parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') 49 | parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') 50 | parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') 51 | parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') 52 | parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') 53 | parser.add_argument('--name', default='exp', help='save to project/name') 54 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') 55 | parser.add_argument('--quad', action='store_true', help='quad dataloader') 56 | parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') 57 | parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') 58 | parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') 59 | parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') 60 | parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') 61 | parser.add_argument('--seed', type=int, default=0, help='Global training seed') 62 | parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') 63 | 64 | # Weights & Biases arguments 65 | parser.add_argument('--entity', default=None, help='W&B: Entity') 66 | parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') 67 | parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') 68 | parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') 69 | 70 | # Comet Arguments 71 | parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') 72 | parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') 73 | parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") 74 | parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') 75 | parser.add_argument('--comet_optimizer_workers', 76 | type=int, 77 | default=1, 78 | help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') 79 | 80 | return parser.parse_known_args()[0] if known else parser.parse_args() 81 | 82 | 83 | def run(parameters, opt): 84 | hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} 85 | 86 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) 87 | opt.batch_size = parameters.get('batch_size') 88 | opt.epochs = parameters.get('epochs') 89 | 90 | device = select_device(opt.device, batch_size=opt.batch_size) 91 | train(hyp_dict, opt, device, callbacks=Callbacks()) 92 | 93 | 94 | if __name__ == '__main__': 95 | opt = get_args(known=True) 96 | 97 | opt.weights = str(opt.weights) 98 | opt.cfg = str(opt.cfg) 99 | opt.data = str(opt.data) 100 | opt.project = str(opt.project) 101 | 102 | optimizer_id = os.getenv('COMET_OPTIMIZER_ID') 103 | if optimizer_id is None: 104 | with open(opt.comet_optimizer_config) as f: 105 | optimizer_config = json.load(f) 106 | optimizer = comet_ml.Optimizer(optimizer_config) 107 | else: 108 | optimizer = comet_ml.Optimizer(optimizer_id) 109 | 110 | opt.comet_optimizer_id = optimizer.id 111 | status = optimizer.status() 112 | 113 | opt.comet_optimizer_objective = status['spec']['objective'] 114 | opt.comet_optimizer_metric = status['spec']['metric'] 115 | 116 | logger.info('COMET INFO: Starting Hyperparameter Sweep') 117 | for parameter in optimizer.get_parameters(): 118 | run(parameter['parameters'], opt) 119 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/comet/optimizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "algorithm": "random", 3 | "parameters": { 4 | "anchor_t": { 5 | "type": "discrete", 6 | "values": [ 7 | 2, 8 | 8 9 | ] 10 | }, 11 | "batch_size": { 12 | "type": "discrete", 13 | "values": [ 14 | 16, 15 | 32, 16 | 64 17 | ] 18 | }, 19 | "box": { 20 | "type": "discrete", 21 | "values": [ 22 | 0.02, 23 | 0.2 24 | ] 25 | }, 26 | "cls": { 27 | "type": "discrete", 28 | "values": [ 29 | 0.2 30 | ] 31 | }, 32 | "cls_pw": { 33 | "type": "discrete", 34 | "values": [ 35 | 0.5 36 | ] 37 | }, 38 | "copy_paste": { 39 | "type": "discrete", 40 | "values": [ 41 | 1 42 | ] 43 | }, 44 | "degrees": { 45 | "type": "discrete", 46 | "values": [ 47 | 0, 48 | 45 49 | ] 50 | }, 51 | "epochs": { 52 | "type": "discrete", 53 | "values": [ 54 | 5 55 | ] 56 | }, 57 | "fl_gamma": { 58 | "type": "discrete", 59 | "values": [ 60 | 0 61 | ] 62 | }, 63 | "fliplr": { 64 | "type": "discrete", 65 | "values": [ 66 | 0 67 | ] 68 | }, 69 | "flipud": { 70 | "type": "discrete", 71 | "values": [ 72 | 0 73 | ] 74 | }, 75 | "hsv_h": { 76 | "type": "discrete", 77 | "values": [ 78 | 0 79 | ] 80 | }, 81 | "hsv_s": { 82 | "type": "discrete", 83 | "values": [ 84 | 0 85 | ] 86 | }, 87 | "hsv_v": { 88 | "type": "discrete", 89 | "values": [ 90 | 0 91 | ] 92 | }, 93 | "iou_t": { 94 | "type": "discrete", 95 | "values": [ 96 | 0.7 97 | ] 98 | }, 99 | "lr0": { 100 | "type": "discrete", 101 | "values": [ 102 | 1e-05, 103 | 0.1 104 | ] 105 | }, 106 | "lrf": { 107 | "type": "discrete", 108 | "values": [ 109 | 0.01, 110 | 1 111 | ] 112 | }, 113 | "mixup": { 114 | "type": "discrete", 115 | "values": [ 116 | 1 117 | ] 118 | }, 119 | "momentum": { 120 | "type": "discrete", 121 | "values": [ 122 | 0.6 123 | ] 124 | }, 125 | "mosaic": { 126 | "type": "discrete", 127 | "values": [ 128 | 0 129 | ] 130 | }, 131 | "obj": { 132 | "type": "discrete", 133 | "values": [ 134 | 0.2 135 | ] 136 | }, 137 | "obj_pw": { 138 | "type": "discrete", 139 | "values": [ 140 | 0.5 141 | ] 142 | }, 143 | "optimizer": { 144 | "type": "categorical", 145 | "values": [ 146 | "SGD", 147 | "Adam", 148 | "AdamW" 149 | ] 150 | }, 151 | "perspective": { 152 | "type": "discrete", 153 | "values": [ 154 | 0 155 | ] 156 | }, 157 | "scale": { 158 | "type": "discrete", 159 | "values": [ 160 | 0 161 | ] 162 | }, 163 | "shear": { 164 | "type": "discrete", 165 | "values": [ 166 | 0 167 | ] 168 | }, 169 | "translate": { 170 | "type": "discrete", 171 | "values": [ 172 | 0 173 | ] 174 | }, 175 | "warmup_bias_lr": { 176 | "type": "discrete", 177 | "values": [ 178 | 0, 179 | 0.2 180 | ] 181 | }, 182 | "warmup_epochs": { 183 | "type": "discrete", 184 | "values": [ 185 | 5 186 | ] 187 | }, 188 | "warmup_momentum": { 189 | "type": "discrete", 190 | "values": [ 191 | 0, 192 | 0.95 193 | ] 194 | }, 195 | "weight_decay": { 196 | "type": "discrete", 197 | "values": [ 198 | 0, 199 | 0.001 200 | ] 201 | } 202 | }, 203 | "spec": { 204 | "maxCombo": 0, 205 | "metric": "metrics/mAP_0.5", 206 | "objective": "maximize" 207 | }, 208 | "trials": 1 209 | } 210 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/wandb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_aimbot/utils/loggers/wandb/__init__.py -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/wandb/log_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from wandb_utils import WandbLogger 4 | 5 | from utils.general import LOGGER 6 | 7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' 8 | 9 | 10 | def create_dataset_artifact(opt): 11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused 12 | if not logger.wandb: 13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset") 14 | 15 | 16 | if __name__ == '__main__': 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') 19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') 20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') 21 | parser.add_argument('--entity', default=None, help='W&B entity') 22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') 23 | 24 | opt = parser.parse_args() 25 | opt.resume = False # Explicitly disallow resume check for dataset upload job 26 | 27 | create_dataset_artifact(opt) 28 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/wandb/sweep.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | import wandb 5 | 6 | FILE = Path(__file__).resolve() 7 | ROOT = FILE.parents[3] # YOLOv5 root directory 8 | if str(ROOT) not in sys.path: 9 | sys.path.append(str(ROOT)) # add ROOT to PATH 10 | 11 | from train import parse_opt, train 12 | from utils.callbacks import Callbacks 13 | from utils.general import increment_path 14 | from utils.torch_utils import select_device 15 | 16 | 17 | def sweep(): 18 | wandb.init() 19 | # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. 20 | hyp_dict = vars(wandb.config).get("_items").copy() 21 | 22 | # Workaround: get necessary opt args 23 | opt = parse_opt(known=True) 24 | opt.batch_size = hyp_dict.get("batch_size") 25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) 26 | opt.epochs = hyp_dict.get("epochs") 27 | opt.nosave = True 28 | opt.data = hyp_dict.get("data") 29 | opt.weights = str(opt.weights) 30 | opt.cfg = str(opt.cfg) 31 | opt.data = str(opt.data) 32 | opt.hyp = str(opt.hyp) 33 | opt.project = str(opt.project) 34 | device = select_device(opt.device, batch_size=opt.batch_size) 35 | 36 | # train 37 | train(hyp_dict, opt, device, callbacks=Callbacks()) 38 | 39 | 40 | if __name__ == "__main__": 41 | sweep() 42 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/wandb/sweep.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for training 2 | # To set range- 3 | # Provide min and max values as: 4 | # parameter: 5 | # 6 | # min: scalar 7 | # max: scalar 8 | # OR 9 | # 10 | # Set a specific list of search space- 11 | # parameter: 12 | # values: [scalar1, scalar2, scalar3...] 13 | # 14 | # You can use grid, bayesian and hyperopt search strategy 15 | # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration 16 | 17 | program: utils/loggers/wandb/sweep.py 18 | method: random 19 | metric: 20 | name: metrics/mAP_0.5 21 | goal: maximize 22 | 23 | parameters: 24 | # hyperparameters: set either min, max range or values list 25 | data: 26 | value: "data/coco128.yaml" 27 | batch_size: 28 | values: [64] 29 | epochs: 30 | values: [10] 31 | 32 | lr0: 33 | distribution: uniform 34 | min: 1e-5 35 | max: 1e-1 36 | lrf: 37 | distribution: uniform 38 | min: 0.01 39 | max: 1.0 40 | momentum: 41 | distribution: uniform 42 | min: 0.6 43 | max: 0.98 44 | weight_decay: 45 | distribution: uniform 46 | min: 0.0 47 | max: 0.001 48 | warmup_epochs: 49 | distribution: uniform 50 | min: 0.0 51 | max: 5.0 52 | warmup_momentum: 53 | distribution: uniform 54 | min: 0.0 55 | max: 0.95 56 | warmup_bias_lr: 57 | distribution: uniform 58 | min: 0.0 59 | max: 0.2 60 | box: 61 | distribution: uniform 62 | min: 0.02 63 | max: 0.2 64 | cls: 65 | distribution: uniform 66 | min: 0.2 67 | max: 4.0 68 | cls_pw: 69 | distribution: uniform 70 | min: 0.5 71 | max: 2.0 72 | obj: 73 | distribution: uniform 74 | min: 0.2 75 | max: 4.0 76 | obj_pw: 77 | distribution: uniform 78 | min: 0.5 79 | max: 2.0 80 | iou_t: 81 | distribution: uniform 82 | min: 0.1 83 | max: 0.7 84 | anchor_t: 85 | distribution: uniform 86 | min: 2.0 87 | max: 8.0 88 | fl_gamma: 89 | distribution: uniform 90 | min: 0.0 91 | max: 4.0 92 | hsv_h: 93 | distribution: uniform 94 | min: 0.0 95 | max: 0.1 96 | hsv_s: 97 | distribution: uniform 98 | min: 0.0 99 | max: 0.9 100 | hsv_v: 101 | distribution: uniform 102 | min: 0.0 103 | max: 0.9 104 | degrees: 105 | distribution: uniform 106 | min: 0.0 107 | max: 45.0 108 | translate: 109 | distribution: uniform 110 | min: 0.0 111 | max: 0.9 112 | scale: 113 | distribution: uniform 114 | min: 0.0 115 | max: 0.9 116 | shear: 117 | distribution: uniform 118 | min: 0.0 119 | max: 10.0 120 | perspective: 121 | distribution: uniform 122 | min: 0.0 123 | max: 0.001 124 | flipud: 125 | distribution: uniform 126 | min: 0.0 127 | max: 1.0 128 | fliplr: 129 | distribution: uniform 130 | min: 0.0 131 | max: 1.0 132 | mosaic: 133 | distribution: uniform 134 | min: 0.0 135 | max: 1.0 136 | mixup: 137 | distribution: uniform 138 | min: 0.0 139 | max: 1.0 140 | copy_paste: 141 | distribution: uniform 142 | min: 0.0 143 | max: 1.0 144 | -------------------------------------------------------------------------------- /example_aimbot/utils/loggers/wandb/wandb_utils.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | 3 | # WARNING ⚠️ wandb is deprecated and will be removed in future release. 4 | # See supported integrations at https://github.com/ultralytics/yolov5#integrations 5 | 6 | import logging 7 | import os 8 | import sys 9 | from contextlib import contextmanager 10 | from pathlib import Path 11 | 12 | from utils.general import LOGGER, colorstr 13 | 14 | FILE = Path(__file__).resolve() 15 | ROOT = FILE.parents[3] # YOLOv5 root directory 16 | if str(ROOT) not in sys.path: 17 | sys.path.append(str(ROOT)) # add ROOT to PATH 18 | RANK = int(os.getenv('RANK', -1)) 19 | DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ 20 | f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' 21 | 22 | try: 23 | import wandb 24 | 25 | assert hasattr(wandb, '__version__') # verify package import not local dir 26 | LOGGER.warning(DEPRECATION_WARNING) 27 | except (ImportError, AssertionError): 28 | wandb = None 29 | 30 | 31 | class WandbLogger(): 32 | """Log training runs, datasets, models, and predictions to Weights & Biases. 33 | 34 | This logger sends information to W&B at wandb.ai. By default, this information 35 | includes hyperparameters, system configuration and metrics, model metrics, 36 | and basic data metrics and analyses. 37 | 38 | By providing additional command line arguments to train.py, datasets, 39 | models and predictions can also be logged. 40 | 41 | For more on how this logger is used, see the Weights & Biases documentation: 42 | https://docs.wandb.com/guides/integrations/yolov5 43 | """ 44 | 45 | def __init__(self, opt, run_id=None, job_type='Training'): 46 | """ 47 | - Initialize WandbLogger instance 48 | - Upload dataset if opt.upload_dataset is True 49 | - Setup training processes if job_type is 'Training' 50 | 51 | arguments: 52 | opt (namespace) -- Commandline arguments for this run 53 | run_id (str) -- Run ID of W&B run to be resumed 54 | job_type (str) -- To set the job_type for this run 55 | 56 | """ 57 | # Pre-training routine -- 58 | self.job_type = job_type 59 | self.wandb, self.wandb_run = wandb, wandb.run if wandb else None 60 | self.val_artifact, self.train_artifact = None, None 61 | self.train_artifact_path, self.val_artifact_path = None, None 62 | self.result_artifact = None 63 | self.val_table, self.result_table = None, None 64 | self.max_imgs_to_log = 16 65 | self.data_dict = None 66 | if self.wandb: 67 | self.wandb_run = wandb.init(config=opt, 68 | resume='allow', 69 | project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, 70 | entity=opt.entity, 71 | name=opt.name if opt.name != 'exp' else None, 72 | job_type=job_type, 73 | id=run_id, 74 | allow_val_change=True) if not wandb.run else wandb.run 75 | 76 | if self.wandb_run: 77 | if self.job_type == 'Training': 78 | if isinstance(opt.data, dict): 79 | # This means another dataset manager has already processed the dataset info (e.g. ClearML) 80 | # and they will have stored the already processed dict in opt.data 81 | self.data_dict = opt.data 82 | self.setup_training(opt) 83 | 84 | def setup_training(self, opt): 85 | """ 86 | Setup the necessary processes for training YOLO models: 87 | - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX 88 | - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded 89 | - Setup log_dict, initialize bbox_interval 90 | 91 | arguments: 92 | opt (namespace) -- commandline arguments for this run 93 | 94 | """ 95 | self.log_dict, self.current_epoch = {}, 0 96 | self.bbox_interval = opt.bbox_interval 97 | if isinstance(opt.resume, str): 98 | model_dir, _ = self.download_model_artifact(opt) 99 | if model_dir: 100 | self.weights = Path(model_dir) / 'last.pt' 101 | config = self.wandb_run.config 102 | opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( 103 | self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ 104 | config.hyp, config.imgsz 105 | 106 | if opt.bbox_interval == -1: 107 | self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 108 | if opt.evolve or opt.noplots: 109 | self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval 110 | 111 | def log_model(self, path, opt, epoch, fitness_score, best_model=False): 112 | """ 113 | Log the model checkpoint as W&B artifact 114 | 115 | arguments: 116 | path (Path) -- Path of directory containing the checkpoints 117 | opt (namespace) -- Command line arguments for this run 118 | epoch (int) -- Current epoch number 119 | fitness_score (float) -- fitness score for current epoch 120 | best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. 121 | """ 122 | model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', 123 | type='model', 124 | metadata={ 125 | 'original_url': str(path), 126 | 'epochs_trained': epoch + 1, 127 | 'save period': opt.save_period, 128 | 'project': opt.project, 129 | 'total_epochs': opt.epochs, 130 | 'fitness_score': fitness_score}) 131 | model_artifact.add_file(str(path / 'last.pt'), name='last.pt') 132 | wandb.log_artifact(model_artifact, 133 | aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) 134 | LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') 135 | 136 | def val_one_image(self, pred, predn, path, names, im): 137 | pass 138 | 139 | def log(self, log_dict): 140 | """ 141 | save the metrics to the logging dictionary 142 | 143 | arguments: 144 | log_dict (Dict) -- metrics/media to be logged in current step 145 | """ 146 | if self.wandb_run: 147 | for key, value in log_dict.items(): 148 | self.log_dict[key] = value 149 | 150 | def end_epoch(self): 151 | """ 152 | commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. 153 | 154 | arguments: 155 | best_result (boolean): Boolean representing if the result of this evaluation is best or not 156 | """ 157 | if self.wandb_run: 158 | with all_logging_disabled(): 159 | try: 160 | wandb.log(self.log_dict) 161 | except BaseException as e: 162 | LOGGER.info( 163 | f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' 164 | ) 165 | self.wandb_run.finish() 166 | self.wandb_run = None 167 | self.log_dict = {} 168 | 169 | def finish_run(self): 170 | """ 171 | Log metrics if any and finish the current W&B run 172 | """ 173 | if self.wandb_run: 174 | if self.log_dict: 175 | with all_logging_disabled(): 176 | wandb.log(self.log_dict) 177 | wandb.run.finish() 178 | LOGGER.warning(DEPRECATION_WARNING) 179 | 180 | 181 | @contextmanager 182 | def all_logging_disabled(highest_level=logging.CRITICAL): 183 | """ source - https://gist.github.com/simon-weber/7853144 184 | A context manager that will prevent any logging messages triggered during the body from being processed. 185 | :param highest_level: the maximum logging level in use. 186 | This would only need to be changed if a custom level greater than CRITICAL is defined. 187 | """ 188 | previous_level = logging.root.manager.disable 189 | logging.disable(highest_level) 190 | try: 191 | yield 192 | finally: 193 | logging.disable(previous_level) 194 | -------------------------------------------------------------------------------- /example_aimbot/utils/loss.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Loss functions 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | from utils.metrics import bbox_iou 10 | from utils.torch_utils import de_parallel 11 | 12 | 13 | def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 14 | # return positive, negative label smoothing BCE targets 15 | return 1.0 - 0.5 * eps, 0.5 * eps 16 | 17 | 18 | class BCEBlurWithLogitsLoss(nn.Module): 19 | # BCEwithLogitLoss() with reduced missing label effects. 20 | def __init__(self, alpha=0.05): 21 | super().__init__() 22 | self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() 23 | self.alpha = alpha 24 | 25 | def forward(self, pred, true): 26 | loss = self.loss_fcn(pred, true) 27 | pred = torch.sigmoid(pred) # prob from logits 28 | dx = pred - true # reduce only missing label effects 29 | # dx = (pred - true).abs() # reduce missing label and false label effects 30 | alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) 31 | loss *= alpha_factor 32 | return loss.mean() 33 | 34 | 35 | class FocalLoss(nn.Module): 36 | # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) 37 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): 38 | super().__init__() 39 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() 40 | self.gamma = gamma 41 | self.alpha = alpha 42 | self.reduction = loss_fcn.reduction 43 | self.loss_fcn.reduction = 'none' # required to apply FL to each element 44 | 45 | def forward(self, pred, true): 46 | loss = self.loss_fcn(pred, true) 47 | # p_t = torch.exp(-loss) 48 | # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability 49 | 50 | # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py 51 | pred_prob = torch.sigmoid(pred) # prob from logits 52 | p_t = true * pred_prob + (1 - true) * (1 - pred_prob) 53 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) 54 | modulating_factor = (1.0 - p_t) ** self.gamma 55 | loss *= alpha_factor * modulating_factor 56 | 57 | if self.reduction == 'mean': 58 | return loss.mean() 59 | elif self.reduction == 'sum': 60 | return loss.sum() 61 | else: # 'none' 62 | return loss 63 | 64 | 65 | class QFocalLoss(nn.Module): 66 | # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) 67 | def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): 68 | super().__init__() 69 | self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() 70 | self.gamma = gamma 71 | self.alpha = alpha 72 | self.reduction = loss_fcn.reduction 73 | self.loss_fcn.reduction = 'none' # required to apply FL to each element 74 | 75 | def forward(self, pred, true): 76 | loss = self.loss_fcn(pred, true) 77 | 78 | pred_prob = torch.sigmoid(pred) # prob from logits 79 | alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) 80 | modulating_factor = torch.abs(true - pred_prob) ** self.gamma 81 | loss *= alpha_factor * modulating_factor 82 | 83 | if self.reduction == 'mean': 84 | return loss.mean() 85 | elif self.reduction == 'sum': 86 | return loss.sum() 87 | else: # 'none' 88 | return loss 89 | 90 | 91 | class ComputeLoss: 92 | sort_obj_iou = False 93 | 94 | # Compute losses 95 | def __init__(self, model, autobalance=False): 96 | device = next(model.parameters()).device # get model device 97 | h = model.hyp # hyperparameters 98 | 99 | # Define criteria 100 | BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) 101 | BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) 102 | 103 | # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 104 | self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets 105 | 106 | # Focal loss 107 | g = h['fl_gamma'] # focal loss gamma 108 | if g > 0: 109 | BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) 110 | 111 | m = de_parallel(model).model[-1] # Detect() module 112 | self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 113 | self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index 114 | self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance 115 | self.na = m.na # number of anchors 116 | self.nc = m.nc # number of classes 117 | self.nl = m.nl # number of layers 118 | self.anchors = m.anchors 119 | self.device = device 120 | 121 | def __call__(self, p, targets): # predictions, targets 122 | lcls = torch.zeros(1, device=self.device) # class loss 123 | lbox = torch.zeros(1, device=self.device) # box loss 124 | lobj = torch.zeros(1, device=self.device) # object loss 125 | tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets 126 | 127 | # Losses 128 | for i, pi in enumerate(p): # layer index, layer predictions 129 | b, a, gj, gi = indices[i] # image, anchor, gridy, gridx 130 | tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj 131 | 132 | n = b.shape[0] # number of targets 133 | if n: 134 | # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 135 | pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions 136 | 137 | # Regression 138 | pxy = pxy.sigmoid() * 2 - 0.5 139 | pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] 140 | pbox = torch.cat((pxy, pwh), 1) # predicted box 141 | iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) 142 | lbox += (1.0 - iou).mean() # iou loss 143 | 144 | # Objectness 145 | iou = iou.detach().clamp(0).type(tobj.dtype) 146 | if self.sort_obj_iou: 147 | j = iou.argsort() 148 | b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] 149 | if self.gr < 1: 150 | iou = (1.0 - self.gr) + self.gr * iou 151 | tobj[b, a, gj, gi] = iou # iou ratio 152 | 153 | # Classification 154 | if self.nc > 1: # cls loss (only if multiple classes) 155 | t = torch.full_like(pcls, self.cn, device=self.device) # targets 156 | t[range(n), tcls[i]] = self.cp 157 | lcls += self.BCEcls(pcls, t) # BCE 158 | 159 | # Append targets to text file 160 | # with open('targets.txt', 'a') as file: 161 | # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] 162 | 163 | obji = self.BCEobj(pi[..., 4], tobj) 164 | lobj += obji * self.balance[i] # obj loss 165 | if self.autobalance: 166 | self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() 167 | 168 | if self.autobalance: 169 | self.balance = [x / self.balance[self.ssi] for x in self.balance] 170 | lbox *= self.hyp['box'] 171 | lobj *= self.hyp['obj'] 172 | lcls *= self.hyp['cls'] 173 | bs = tobj.shape[0] # batch size 174 | 175 | return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() 176 | 177 | def build_targets(self, p, targets): 178 | # Build targets for compute_loss(), input targets(image,class,x,y,w,h) 179 | na, nt = self.na, targets.shape[0] # number of anchors, targets 180 | tcls, tbox, indices, anch = [], [], [], [] 181 | gain = torch.ones(7, device=self.device) # normalized to gridspace gain 182 | ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) 183 | targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices 184 | 185 | g = 0.5 # bias 186 | off = torch.tensor( 187 | [ 188 | [0, 0], 189 | [1, 0], 190 | [0, 1], 191 | [-1, 0], 192 | [0, -1], # j,k,l,m 193 | # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm 194 | ], 195 | device=self.device).float() * g # offsets 196 | 197 | for i in range(self.nl): 198 | anchors, shape = self.anchors[i], p[i].shape 199 | gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain 200 | 201 | # Match targets to anchors 202 | t = targets * gain # shape(3,n,7) 203 | if nt: 204 | # Matches 205 | r = t[..., 4:6] / anchors[:, None] # wh ratio 206 | j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare 207 | # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) 208 | t = t[j] # filter 209 | 210 | # Offsets 211 | gxy = t[:, 2:4] # grid xy 212 | gxi = gain[[2, 3]] - gxy # inverse 213 | j, k = ((gxy % 1 < g) & (gxy > 1)).T 214 | l, m = ((gxi % 1 < g) & (gxi > 1)).T 215 | j = torch.stack((torch.ones_like(j), j, k, l, m)) 216 | t = t.repeat((5, 1, 1))[j] 217 | offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] 218 | else: 219 | t = targets[0] 220 | offsets = 0 221 | 222 | # Define 223 | bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors 224 | a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class 225 | gij = (gxy - offsets).long() 226 | gi, gj = gij.T # grid indices 227 | 228 | # Append 229 | indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid 230 | tbox.append(torch.cat((gxy - gij, gwh), 1)) # box 231 | anch.append(anchors[a]) # anchors 232 | tcls.append(c) # class 233 | 234 | return tcls, tbox, indices, anch 235 | -------------------------------------------------------------------------------- /example_aimbot/utils/segment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_aimbot/utils/segment/__init__.py -------------------------------------------------------------------------------- /example_aimbot/utils/segment/augmentations.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Image augmentation functions 4 | """ 5 | 6 | import math 7 | import random 8 | 9 | import cv2 10 | import numpy as np 11 | 12 | from ..augmentations import box_candidates 13 | from ..general import resample_segments, segment2box 14 | 15 | 16 | def mixup(im, labels, segments, im2, labels2, segments2): 17 | # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf 18 | r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 19 | im = (im * r + im2 * (1 - r)).astype(np.uint8) 20 | labels = np.concatenate((labels, labels2), 0) 21 | segments = np.concatenate((segments, segments2), 0) 22 | return im, labels, segments 23 | 24 | 25 | def random_perspective(im, 26 | targets=(), 27 | segments=(), 28 | degrees=10, 29 | translate=.1, 30 | scale=.1, 31 | shear=10, 32 | perspective=0.0, 33 | border=(0, 0)): 34 | # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) 35 | # targets = [cls, xyxy] 36 | 37 | height = im.shape[0] + border[0] * 2 # shape(h,w,c) 38 | width = im.shape[1] + border[1] * 2 39 | 40 | # Center 41 | C = np.eye(3) 42 | C[0, 2] = -im.shape[1] / 2 # x translation (pixels) 43 | C[1, 2] = -im.shape[0] / 2 # y translation (pixels) 44 | 45 | # Perspective 46 | P = np.eye(3) 47 | P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) 48 | P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) 49 | 50 | # Rotation and Scale 51 | R = np.eye(3) 52 | a = random.uniform(-degrees, degrees) 53 | # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations 54 | s = random.uniform(1 - scale, 1 + scale) 55 | # s = 2 ** random.uniform(-scale, scale) 56 | R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) 57 | 58 | # Shear 59 | S = np.eye(3) 60 | S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) 61 | S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) 62 | 63 | # Translation 64 | T = np.eye(3) 65 | T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) 66 | T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) 67 | 68 | # Combined rotation matrix 69 | M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT 70 | if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed 71 | if perspective: 72 | im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) 73 | else: # affine 74 | im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) 75 | 76 | # Visualize 77 | # import matplotlib.pyplot as plt 78 | # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() 79 | # ax[0].imshow(im[:, :, ::-1]) # base 80 | # ax[1].imshow(im2[:, :, ::-1]) # warped 81 | 82 | # Transform label coordinates 83 | n = len(targets) 84 | new_segments = [] 85 | if n: 86 | new = np.zeros((n, 4)) 87 | segments = resample_segments(segments) # upsample 88 | for i, segment in enumerate(segments): 89 | xy = np.ones((len(segment), 3)) 90 | xy[:, :2] = segment 91 | xy = xy @ M.T # transform 92 | xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine 93 | 94 | # clip 95 | new[i] = segment2box(xy, width, height) 96 | new_segments.append(xy) 97 | 98 | # filter candidates 99 | i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) 100 | targets = targets[i] 101 | targets[:, 1:5] = new[i] 102 | new_segments = np.array(new_segments)[i] 103 | 104 | return im, targets, new_segments 105 | -------------------------------------------------------------------------------- /example_aimbot/utils/segment/general.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | def crop_mask(masks, boxes): 8 | """ 9 | "Crop" predicted masks by zeroing out everything not in the predicted bbox. 10 | Vectorized by Chong (thanks Chong). 11 | 12 | Args: 13 | - masks should be a size [n, h, w] tensor of masks 14 | - boxes should be a size [n, 4] tensor of bbox coords in relative point form 15 | """ 16 | 17 | n, h, w = masks.shape 18 | x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) 19 | r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) 20 | c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) 21 | 22 | return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) 23 | 24 | 25 | def process_mask_upsample(protos, masks_in, bboxes, shape): 26 | """ 27 | Crop after upsample. 28 | protos: [mask_dim, mask_h, mask_w] 29 | masks_in: [n, mask_dim], n is number of masks after nms 30 | bboxes: [n, 4], n is number of masks after nms 31 | shape: input_image_size, (h, w) 32 | 33 | return: h, w, n 34 | """ 35 | 36 | c, mh, mw = protos.shape # CHW 37 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) 38 | masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW 39 | masks = crop_mask(masks, bboxes) # CHW 40 | return masks.gt_(0.5) 41 | 42 | 43 | def process_mask(protos, masks_in, bboxes, shape, upsample=False): 44 | """ 45 | Crop before upsample. 46 | proto_out: [mask_dim, mask_h, mask_w] 47 | out_masks: [n, mask_dim], n is number of masks after nms 48 | bboxes: [n, 4], n is number of masks after nms 49 | shape:input_image_size, (h, w) 50 | 51 | return: h, w, n 52 | """ 53 | 54 | c, mh, mw = protos.shape # CHW 55 | ih, iw = shape 56 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW 57 | 58 | downsampled_bboxes = bboxes.clone() 59 | downsampled_bboxes[:, 0] *= mw / iw 60 | downsampled_bboxes[:, 2] *= mw / iw 61 | downsampled_bboxes[:, 3] *= mh / ih 62 | downsampled_bboxes[:, 1] *= mh / ih 63 | 64 | masks = crop_mask(masks, downsampled_bboxes) # CHW 65 | if upsample: 66 | masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW 67 | return masks.gt_(0.5) 68 | 69 | 70 | def process_mask_native(protos, masks_in, bboxes, shape): 71 | """ 72 | Crop after upsample. 73 | protos: [mask_dim, mask_h, mask_w] 74 | masks_in: [n, mask_dim], n is number of masks after nms 75 | bboxes: [n, 4], n is number of masks after nms 76 | shape: input_image_size, (h, w) 77 | 78 | return: h, w, n 79 | """ 80 | c, mh, mw = protos.shape # CHW 81 | masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) 82 | gain = min(mh / shape[0], mw / shape[1]) # gain = old / new 83 | pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding 84 | top, left = int(pad[1]), int(pad[0]) # y, x 85 | bottom, right = int(mh - pad[1]), int(mw - pad[0]) 86 | masks = masks[:, top:bottom, left:right] 87 | 88 | masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW 89 | masks = crop_mask(masks, bboxes) # CHW 90 | return masks.gt_(0.5) 91 | 92 | 93 | def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): 94 | """ 95 | img1_shape: model input shape, [h, w] 96 | img0_shape: origin pic shape, [h, w, 3] 97 | masks: [h, w, num] 98 | """ 99 | # Rescale coordinates (xyxy) from im1_shape to im0_shape 100 | if ratio_pad is None: # calculate from im0_shape 101 | gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new 102 | pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding 103 | else: 104 | pad = ratio_pad[1] 105 | top, left = int(pad[1]), int(pad[0]) # y, x 106 | bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) 107 | 108 | if len(masks.shape) < 2: 109 | raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') 110 | masks = masks[top:bottom, left:right] 111 | # masks = masks.permute(2, 0, 1).contiguous() 112 | # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] 113 | # masks = masks.permute(1, 2, 0).contiguous() 114 | masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) 115 | 116 | if len(masks.shape) == 2: 117 | masks = masks[:, :, None] 118 | return masks 119 | 120 | 121 | def mask_iou(mask1, mask2, eps=1e-7): 122 | """ 123 | mask1: [N, n] m1 means number of predicted objects 124 | mask2: [M, n] m2 means number of gt objects 125 | Note: n means image_w x image_h 126 | 127 | return: masks iou, [N, M] 128 | """ 129 | intersection = torch.matmul(mask1, mask2.t()).clamp(0) 130 | union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection 131 | return intersection / (union + eps) 132 | 133 | 134 | def masks_iou(mask1, mask2, eps=1e-7): 135 | """ 136 | mask1: [N, n] m1 means number of predicted objects 137 | mask2: [N, n] m2 means number of gt objects 138 | Note: n means image_w x image_h 139 | 140 | return: masks iou, (N, ) 141 | """ 142 | intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) 143 | union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection 144 | return intersection / (union + eps) 145 | 146 | 147 | def masks2segments(masks, strategy='largest'): 148 | # Convert masks(n,160,160) into segments(n,xy) 149 | segments = [] 150 | for x in masks.int().cpu().numpy().astype('uint8'): 151 | c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] 152 | if c: 153 | if strategy == 'concat': # concatenate all segments 154 | c = np.concatenate([x.reshape(-1, 2) for x in c]) 155 | elif strategy == 'largest': # select largest segment 156 | c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) 157 | else: 158 | c = np.zeros((0, 2)) # no segments found 159 | segments.append(c.astype('float32')) 160 | return segments 161 | -------------------------------------------------------------------------------- /example_aimbot/utils/segment/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from ..general import xywh2xyxy 6 | from ..loss import FocalLoss, smooth_BCE 7 | from ..metrics import bbox_iou 8 | from ..torch_utils import de_parallel 9 | from .general import crop_mask 10 | 11 | 12 | class ComputeLoss: 13 | # Compute losses 14 | def __init__(self, model, autobalance=False, overlap=False): 15 | self.sort_obj_iou = False 16 | self.overlap = overlap 17 | device = next(model.parameters()).device # get model device 18 | h = model.hyp # hyperparameters 19 | 20 | # Define criteria 21 | BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) 22 | BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) 23 | 24 | # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 25 | self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets 26 | 27 | # Focal loss 28 | g = h['fl_gamma'] # focal loss gamma 29 | if g > 0: 30 | BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) 31 | 32 | m = de_parallel(model).model[-1] # Detect() module 33 | self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 34 | self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index 35 | self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance 36 | self.na = m.na # number of anchors 37 | self.nc = m.nc # number of classes 38 | self.nl = m.nl # number of layers 39 | self.nm = m.nm # number of masks 40 | self.anchors = m.anchors 41 | self.device = device 42 | 43 | def __call__(self, preds, targets, masks): # predictions, targets, model 44 | p, proto = preds 45 | bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width 46 | lcls = torch.zeros(1, device=self.device) 47 | lbox = torch.zeros(1, device=self.device) 48 | lobj = torch.zeros(1, device=self.device) 49 | lseg = torch.zeros(1, device=self.device) 50 | tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets 51 | 52 | # Losses 53 | for i, pi in enumerate(p): # layer index, layer predictions 54 | b, a, gj, gi = indices[i] # image, anchor, gridy, gridx 55 | tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj 56 | 57 | n = b.shape[0] # number of targets 58 | if n: 59 | pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions 60 | 61 | # Box regression 62 | pxy = pxy.sigmoid() * 2 - 0.5 63 | pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] 64 | pbox = torch.cat((pxy, pwh), 1) # predicted box 65 | iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) 66 | lbox += (1.0 - iou).mean() # iou loss 67 | 68 | # Objectness 69 | iou = iou.detach().clamp(0).type(tobj.dtype) 70 | if self.sort_obj_iou: 71 | j = iou.argsort() 72 | b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] 73 | if self.gr < 1: 74 | iou = (1.0 - self.gr) + self.gr * iou 75 | tobj[b, a, gj, gi] = iou # iou ratio 76 | 77 | # Classification 78 | if self.nc > 1: # cls loss (only if multiple classes) 79 | t = torch.full_like(pcls, self.cn, device=self.device) # targets 80 | t[range(n), tcls[i]] = self.cp 81 | lcls += self.BCEcls(pcls, t) # BCE 82 | 83 | # Mask regression 84 | if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample 85 | masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] 86 | marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized 87 | mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) 88 | for bi in b.unique(): 89 | j = b == bi # matching index 90 | if self.overlap: 91 | mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) 92 | else: 93 | mask_gti = masks[tidxs[i]][j] 94 | lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) 95 | 96 | obji = self.BCEobj(pi[..., 4], tobj) 97 | lobj += obji * self.balance[i] # obj loss 98 | if self.autobalance: 99 | self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() 100 | 101 | if self.autobalance: 102 | self.balance = [x / self.balance[self.ssi] for x in self.balance] 103 | lbox *= self.hyp['box'] 104 | lobj *= self.hyp['obj'] 105 | lcls *= self.hyp['cls'] 106 | lseg *= self.hyp['box'] / bs 107 | 108 | loss = lbox + lobj + lcls + lseg 109 | return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() 110 | 111 | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): 112 | # Mask loss for one image 113 | pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) 114 | loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') 115 | return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() 116 | 117 | def build_targets(self, p, targets): 118 | # Build targets for compute_loss(), input targets(image,class,x,y,w,h) 119 | na, nt = self.na, targets.shape[0] # number of anchors, targets 120 | tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] 121 | gain = torch.ones(8, device=self.device) # normalized to gridspace gain 122 | ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) 123 | if self.overlap: 124 | batch = p[0].shape[0] 125 | ti = [] 126 | for i in range(batch): 127 | num = (targets[:, 0] == i).sum() # find number of targets of each image 128 | ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) 129 | ti = torch.cat(ti, 1) # (na, nt) 130 | else: 131 | ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) 132 | targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices 133 | 134 | g = 0.5 # bias 135 | off = torch.tensor( 136 | [ 137 | [0, 0], 138 | [1, 0], 139 | [0, 1], 140 | [-1, 0], 141 | [0, -1], # j,k,l,m 142 | # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm 143 | ], 144 | device=self.device).float() * g # offsets 145 | 146 | for i in range(self.nl): 147 | anchors, shape = self.anchors[i], p[i].shape 148 | gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain 149 | 150 | # Match targets to anchors 151 | t = targets * gain # shape(3,n,7) 152 | if nt: 153 | # Matches 154 | r = t[..., 4:6] / anchors[:, None] # wh ratio 155 | j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare 156 | # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) 157 | t = t[j] # filter 158 | 159 | # Offsets 160 | gxy = t[:, 2:4] # grid xy 161 | gxi = gain[[2, 3]] - gxy # inverse 162 | j, k = ((gxy % 1 < g) & (gxy > 1)).T 163 | l, m = ((gxi % 1 < g) & (gxi > 1)).T 164 | j = torch.stack((torch.ones_like(j), j, k, l, m)) 165 | t = t.repeat((5, 1, 1))[j] 166 | offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] 167 | else: 168 | t = targets[0] 169 | offsets = 0 170 | 171 | # Define 172 | bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors 173 | (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class 174 | gij = (gxy - offsets).long() 175 | gi, gj = gij.T # grid indices 176 | 177 | # Append 178 | indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid 179 | tbox.append(torch.cat((gxy - gij, gwh), 1)) # box 180 | anch.append(anchors[a]) # anchors 181 | tcls.append(c) # class 182 | tidxs.append(tidx) 183 | xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized 184 | 185 | return tcls, tbox, indices, anch, tidxs, xywhn 186 | -------------------------------------------------------------------------------- /example_aimbot/utils/segment/metrics.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ 3 | Model validation metrics 4 | """ 5 | 6 | import numpy as np 7 | 8 | from ..metrics import ap_per_class 9 | 10 | 11 | def fitness(x): 12 | # Model fitness as a weighted combination of metrics 13 | w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] 14 | return (x[:, :8] * w).sum(1) 15 | 16 | 17 | def ap_per_class_box_and_mask( 18 | tp_m, 19 | tp_b, 20 | conf, 21 | pred_cls, 22 | target_cls, 23 | plot=False, 24 | save_dir='.', 25 | names=(), 26 | ): 27 | """ 28 | Args: 29 | tp_b: tp of boxes. 30 | tp_m: tp of masks. 31 | other arguments see `func: ap_per_class`. 32 | """ 33 | results_boxes = ap_per_class(tp_b, 34 | conf, 35 | pred_cls, 36 | target_cls, 37 | plot=plot, 38 | save_dir=save_dir, 39 | names=names, 40 | prefix='Box')[2:] 41 | results_masks = ap_per_class(tp_m, 42 | conf, 43 | pred_cls, 44 | target_cls, 45 | plot=plot, 46 | save_dir=save_dir, 47 | names=names, 48 | prefix='Mask')[2:] 49 | 50 | results = { 51 | 'boxes': { 52 | 'p': results_boxes[0], 53 | 'r': results_boxes[1], 54 | 'ap': results_boxes[3], 55 | 'f1': results_boxes[2], 56 | 'ap_class': results_boxes[4]}, 57 | 'masks': { 58 | 'p': results_masks[0], 59 | 'r': results_masks[1], 60 | 'ap': results_masks[3], 61 | 'f1': results_masks[2], 62 | 'ap_class': results_masks[4]}} 63 | return results 64 | 65 | 66 | class Metric: 67 | 68 | def __init__(self) -> None: 69 | self.p = [] # (nc, ) 70 | self.r = [] # (nc, ) 71 | self.f1 = [] # (nc, ) 72 | self.all_ap = [] # (nc, 10) 73 | self.ap_class_index = [] # (nc, ) 74 | 75 | @property 76 | def ap50(self): 77 | """AP@0.5 of all classes. 78 | Return: 79 | (nc, ) or []. 80 | """ 81 | return self.all_ap[:, 0] if len(self.all_ap) else [] 82 | 83 | @property 84 | def ap(self): 85 | """AP@0.5:0.95 86 | Return: 87 | (nc, ) or []. 88 | """ 89 | return self.all_ap.mean(1) if len(self.all_ap) else [] 90 | 91 | @property 92 | def mp(self): 93 | """mean precision of all classes. 94 | Return: 95 | float. 96 | """ 97 | return self.p.mean() if len(self.p) else 0.0 98 | 99 | @property 100 | def mr(self): 101 | """mean recall of all classes. 102 | Return: 103 | float. 104 | """ 105 | return self.r.mean() if len(self.r) else 0.0 106 | 107 | @property 108 | def map50(self): 109 | """Mean AP@0.5 of all classes. 110 | Return: 111 | float. 112 | """ 113 | return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 114 | 115 | @property 116 | def map(self): 117 | """Mean AP@0.5:0.95 of all classes. 118 | Return: 119 | float. 120 | """ 121 | return self.all_ap.mean() if len(self.all_ap) else 0.0 122 | 123 | def mean_results(self): 124 | """Mean of results, return mp, mr, map50, map""" 125 | return (self.mp, self.mr, self.map50, self.map) 126 | 127 | def class_result(self, i): 128 | """class-aware result, return p[i], r[i], ap50[i], ap[i]""" 129 | return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) 130 | 131 | def get_maps(self, nc): 132 | maps = np.zeros(nc) + self.map 133 | for i, c in enumerate(self.ap_class_index): 134 | maps[c] = self.ap[i] 135 | return maps 136 | 137 | def update(self, results): 138 | """ 139 | Args: 140 | results: tuple(p, r, ap, f1, ap_class) 141 | """ 142 | p, r, all_ap, f1, ap_class_index = results 143 | self.p = p 144 | self.r = r 145 | self.all_ap = all_ap 146 | self.f1 = f1 147 | self.ap_class_index = ap_class_index 148 | 149 | 150 | class Metrics: 151 | """Metric for boxes and masks.""" 152 | 153 | def __init__(self) -> None: 154 | self.metric_box = Metric() 155 | self.metric_mask = Metric() 156 | 157 | def update(self, results): 158 | """ 159 | Args: 160 | results: Dict{'boxes': Dict{}, 'masks': Dict{}} 161 | """ 162 | self.metric_box.update(list(results['boxes'].values())) 163 | self.metric_mask.update(list(results['masks'].values())) 164 | 165 | def mean_results(self): 166 | return self.metric_box.mean_results() + self.metric_mask.mean_results() 167 | 168 | def class_result(self, i): 169 | return self.metric_box.class_result(i) + self.metric_mask.class_result(i) 170 | 171 | def get_maps(self, nc): 172 | return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) 173 | 174 | @property 175 | def ap_class_index(self): 176 | # boxes and masks have the same ap_class_index 177 | return self.metric_box.ap_class_index 178 | 179 | 180 | KEYS = [ 181 | 'train/box_loss', 182 | 'train/seg_loss', # train loss 183 | 'train/obj_loss', 184 | 'train/cls_loss', 185 | 'metrics/precision(B)', 186 | 'metrics/recall(B)', 187 | 'metrics/mAP_0.5(B)', 188 | 'metrics/mAP_0.5:0.95(B)', # metrics 189 | 'metrics/precision(M)', 190 | 'metrics/recall(M)', 191 | 'metrics/mAP_0.5(M)', 192 | 'metrics/mAP_0.5:0.95(M)', # metrics 193 | 'val/box_loss', 194 | 'val/seg_loss', # val loss 195 | 'val/obj_loss', 196 | 'val/cls_loss', 197 | 'x/lr0', 198 | 'x/lr1', 199 | 'x/lr2', ] 200 | 201 | BEST_KEYS = [ 202 | 'best/epoch', 203 | 'best/precision(B)', 204 | 'best/recall(B)', 205 | 'best/mAP_0.5(B)', 206 | 'best/mAP_0.5:0.95(B)', 207 | 'best/precision(M)', 208 | 'best/recall(M)', 209 | 'best/mAP_0.5(M)', 210 | 'best/mAP_0.5:0.95(M)', ] 211 | -------------------------------------------------------------------------------- /example_aimbot/utils/segment/plots.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import math 3 | from pathlib import Path 4 | 5 | import cv2 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | import pandas as pd 9 | import torch 10 | 11 | from .. import threaded 12 | from ..general import xywh2xyxy 13 | from ..plots import Annotator, colors 14 | 15 | 16 | @threaded 17 | def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): 18 | # Plot image grid with labels 19 | if isinstance(images, torch.Tensor): 20 | images = images.cpu().float().numpy() 21 | if isinstance(targets, torch.Tensor): 22 | targets = targets.cpu().numpy() 23 | if isinstance(masks, torch.Tensor): 24 | masks = masks.cpu().numpy().astype(int) 25 | 26 | max_size = 1920 # max image size 27 | max_subplots = 16 # max image subplots, i.e. 4x4 28 | bs, _, h, w = images.shape # batch size, _, height, width 29 | bs = min(bs, max_subplots) # limit plot images 30 | ns = np.ceil(bs ** 0.5) # number of subplots (square) 31 | if np.max(images[0]) <= 1: 32 | images *= 255 # de-normalise (optional) 33 | 34 | # Build Image 35 | mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init 36 | for i, im in enumerate(images): 37 | if i == max_subplots: # if last batch has fewer images than we expect 38 | break 39 | x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin 40 | im = im.transpose(1, 2, 0) 41 | mosaic[y:y + h, x:x + w, :] = im 42 | 43 | # Resize (optional) 44 | scale = max_size / ns / max(h, w) 45 | if scale < 1: 46 | h = math.ceil(scale * h) 47 | w = math.ceil(scale * w) 48 | mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) 49 | 50 | # Annotate 51 | fs = int((h + w) * ns * 0.01) # font size 52 | annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) 53 | for i in range(i + 1): 54 | x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin 55 | annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders 56 | if paths: 57 | annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames 58 | if len(targets) > 0: 59 | idx = targets[:, 0] == i 60 | ti = targets[idx] # image targets 61 | 62 | boxes = xywh2xyxy(ti[:, 2:6]).T 63 | classes = ti[:, 1].astype('int') 64 | labels = ti.shape[1] == 6 # labels if no conf column 65 | conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) 66 | 67 | if boxes.shape[1]: 68 | if boxes.max() <= 1.01: # if normalized with tolerance 0.01 69 | boxes[[0, 2]] *= w # scale to pixels 70 | boxes[[1, 3]] *= h 71 | elif scale < 1: # absolute coords need scale if image scales 72 | boxes *= scale 73 | boxes[[0, 2]] += x 74 | boxes[[1, 3]] += y 75 | for j, box in enumerate(boxes.T.tolist()): 76 | cls = classes[j] 77 | color = colors(cls) 78 | cls = names[cls] if names else cls 79 | if labels or conf[j] > 0.25: # 0.25 conf thresh 80 | label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' 81 | annotator.box_label(box, label, color=color) 82 | 83 | # Plot masks 84 | if len(masks): 85 | if masks.max() > 1.0: # mean that masks are overlap 86 | image_masks = masks[[i]] # (1, 640, 640) 87 | nl = len(ti) 88 | index = np.arange(nl).reshape(nl, 1, 1) + 1 89 | image_masks = np.repeat(image_masks, nl, axis=0) 90 | image_masks = np.where(image_masks == index, 1.0, 0.0) 91 | else: 92 | image_masks = masks[idx] 93 | 94 | im = np.asarray(annotator.im).copy() 95 | for j, box in enumerate(boxes.T.tolist()): 96 | if labels or conf[j] > 0.25: # 0.25 conf thresh 97 | color = colors(classes[j]) 98 | mh, mw = image_masks[j].shape 99 | if mh != h or mw != w: 100 | mask = image_masks[j].astype(np.uint8) 101 | mask = cv2.resize(mask, (w, h)) 102 | mask = mask.astype(bool) 103 | else: 104 | mask = image_masks[j].astype(bool) 105 | with contextlib.suppress(Exception): 106 | im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 107 | annotator.fromarray(im) 108 | annotator.im.save(fname) # save 109 | 110 | 111 | def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): 112 | # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') 113 | save_dir = Path(file).parent if file else Path(dir) 114 | fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) 115 | ax = ax.ravel() 116 | files = list(save_dir.glob('results*.csv')) 117 | assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' 118 | for f in files: 119 | try: 120 | data = pd.read_csv(f) 121 | index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + 122 | 0.1 * data.values[:, 11]) 123 | s = [x.strip() for x in data.columns] 124 | x = data.values[:, 0] 125 | for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): 126 | y = data.values[:, j] 127 | # y[y == 0] = np.nan # don't show zero values 128 | ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) 129 | if best: 130 | # best 131 | ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) 132 | ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') 133 | else: 134 | # last 135 | ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) 136 | ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') 137 | # if j in [8, 9, 10]: # share train and val loss y axes 138 | # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) 139 | except Exception as e: 140 | print(f'Warning: Plotting error for {f}: {e}') 141 | ax[1].legend() 142 | fig.savefig(save_dir / 'results.png', dpi=200) 143 | plt.close() 144 | -------------------------------------------------------------------------------- /example_aimbot/utils/triton.py: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license 2 | """ Utils to interact with the Triton Inference Server 3 | """ 4 | 5 | import typing 6 | from urllib.parse import urlparse 7 | 8 | import torch 9 | 10 | 11 | class TritonRemoteModel: 12 | """ A wrapper over a model served by the Triton Inference Server. It can 13 | be configured to communicate over GRPC or HTTP. It accepts Torch Tensors 14 | as input and returns them as outputs. 15 | """ 16 | 17 | def __init__(self, url: str): 18 | """ 19 | Keyword arguments: 20 | url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 21 | """ 22 | 23 | parsed_url = urlparse(url) 24 | if parsed_url.scheme == 'grpc': 25 | from tritonclient.grpc import InferenceServerClient, InferInput 26 | 27 | self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client 28 | model_repository = self.client.get_model_repository_index() 29 | self.model_name = model_repository.models[0].name 30 | self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) 31 | 32 | def create_input_placeholders() -> typing.List[InferInput]: 33 | return [ 34 | InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] 35 | 36 | else: 37 | from tritonclient.http import InferenceServerClient, InferInput 38 | 39 | self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client 40 | model_repository = self.client.get_model_repository_index() 41 | self.model_name = model_repository[0]['name'] 42 | self.metadata = self.client.get_model_metadata(self.model_name) 43 | 44 | def create_input_placeholders() -> typing.List[InferInput]: 45 | return [ 46 | InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] 47 | 48 | self._create_input_placeholders_fn = create_input_placeholders 49 | 50 | @property 51 | def runtime(self): 52 | """Returns the model runtime""" 53 | return self.metadata.get('backend', self.metadata.get('platform')) 54 | 55 | def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: 56 | """ Invokes the model. Parameters can be provided via args or kwargs. 57 | args, if provided, are assumed to match the order of inputs of the model. 58 | kwargs are matched with the model input names. 59 | """ 60 | inputs = self._create_inputs(*args, **kwargs) 61 | response = self.client.infer(model_name=self.model_name, inputs=inputs) 62 | result = [] 63 | for output in self.metadata['outputs']: 64 | tensor = torch.as_tensor(response.as_numpy(output['name'])) 65 | result.append(tensor) 66 | return result[0] if len(result) == 1 else result 67 | 68 | def _create_inputs(self, *args, **kwargs): 69 | args_len, kwargs_len = len(args), len(kwargs) 70 | if not args_len and not kwargs_len: 71 | raise RuntimeError('No inputs provided.') 72 | if args_len and kwargs_len: 73 | raise RuntimeError('Cannot specify args and kwargs at the same time') 74 | 75 | placeholders = self._create_input_placeholders_fn() 76 | if args_len: 77 | if args_len != len(placeholders): 78 | raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') 79 | for input, value in zip(placeholders, args): 80 | input.set_data_from_numpy(value.cpu().numpy()) 81 | else: 82 | for input in placeholders: 83 | value = kwargs[input.name] 84 | input.set_data_from_numpy(value.cpu().numpy()) 85 | return placeholders 86 | -------------------------------------------------------------------------------- /example_bare/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/example_bare/__init__.py -------------------------------------------------------------------------------- /example_bare/main.py: -------------------------------------------------------------------------------- 1 | from .schema.settings import Settings 2 | import json 3 | import os 4 | 5 | def main( 6 | version: int = 0, 7 | settingsProfile: str = "", 8 | paidTier: int = 0, 9 | yoloVersion: int = 0, 10 | modelFileName: str = "" 11 | ): 12 | 13 | # getting %appdata% 14 | appdataLocation = os.getenv("APPDATA") 15 | settingsPath = os.path.join(appdataLocation, "ai-aimbot-launcher", "aimbotSettings", f"{settingsProfile.lower()}.json") 16 | 17 | # loading settings 18 | with open(settingsPath, "r") as f: 19 | settings = json.load(f) 20 | settings = Settings(**settings) 21 | 22 | 23 | print("BASE CODE") 24 | 25 | # getting model path 26 | modelPath = os.path.join(appdataLocation, "ai-aimbot-launcher", "models", modelFileName) 27 | 28 | -------------------------------------------------------------------------------- /example_bare/schema/settings.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field, validator 2 | import win32con 3 | 4 | KEY_MAP = { 5 | "F1": win32con.VK_F1, 6 | "F2": win32con.VK_F2, 7 | "F3": win32con.VK_F3, 8 | "F4": win32con.VK_F4, 9 | "F5": win32con.VK_F5, 10 | "F6": win32con.VK_F6, 11 | "F7": win32con.VK_F7, 12 | "F8": win32con.VK_F8, 13 | "F9": win32con.VK_F9, 14 | "F10": win32con.VK_F10, 15 | "F11": win32con.VK_F11, 16 | "F12": win32con.VK_F12, 17 | "Escape": win32con.VK_ESCAPE, 18 | "Tab": win32con.VK_TAB, 19 | "CapsLock": win32con.VK_CAPITAL, 20 | "LeftShift": win32con.VK_LSHIFT, 21 | "Shift": win32con.VK_LSHIFT, 22 | "RightShift": win32con.VK_RSHIFT, 23 | "LeftControl": win32con.VK_LCONTROL, 24 | "Control": win32con.VK_LCONTROL, 25 | "RightControl": win32con.VK_RCONTROL, 26 | "LeftAlt": win32con.VK_LMENU, 27 | "Alt": win32con.VK_LMENU, 28 | "RightAlt": win32con.VK_RMENU, 29 | "Enter": win32con.VK_RETURN, 30 | "Backspace": win32con.VK_BACK, 31 | "Delete": win32con.VK_DELETE, 32 | "Insert": win32con.VK_INSERT, 33 | "Home": win32con.VK_HOME, 34 | "End": win32con.VK_END, 35 | "PageUp": win32con.VK_PRIOR, 36 | "PageDown": win32con.VK_NEXT, 37 | "LeftMouseButtonDown": win32con.VK_LBUTTON, 38 | "RightMouseButtonDown": win32con.VK_RBUTTON, 39 | "MiddleMouseButtonDown": win32con.VK_MBUTTON, 40 | } 41 | 42 | class Settings(BaseModel): 43 | movementAmp: float = Field(default=0.3) 44 | useMask: bool = Field(default=False) 45 | maskLeft: bool = Field(default=True) 46 | maskWidth: int = Field(default=80) 47 | maskHeight: int = Field(default=200) 48 | quitKey: int = Field(default=ord("Q")) 49 | screenShotHeight: int = Field(default=320) 50 | screenShotWidth: int = Field(default=320) 51 | confidence: float = Field(default=0.5) 52 | headshotMode: bool = Field(default=True) 53 | headshotDistanceModifier: float = Field(default=0.38) 54 | displayCPS: bool = Field(default=True) 55 | visuals: bool = Field(default=False) 56 | centerOfScreen: bool = Field(default=True) 57 | activationKey: int = Field(default=win32con.VK_CAPITAL) 58 | autoFire: bool = Field(default=False) 59 | autoFireActivationDistance: int = Field(default=50) 60 | onnxChoice: int = Field(default=2) 61 | fovCircle: bool = Field(default=False) 62 | fovCircleRadius: int = Field(default=160) 63 | fovCircleRadiusDetectionModifier: float = Field(default=1.0) 64 | aimShakey: bool = Field(default=False) 65 | aimShakeyStrength: int = Field(default=10) 66 | toggleable: bool = Field(default=True) 67 | gameTitle: str = Field(default=None) 68 | 69 | @validator('activationKey', 'quitKey', pre=True) 70 | def mapKey(cls, key): 71 | 72 | if len(key) == 1: 73 | try: 74 | return ord(key.upper()) 75 | except Exception as e: 76 | print("Invalid activation key") 77 | print("Defaulting to CapsLock") 78 | return KEY_MAP["CapsLock"] 79 | 80 | return KEY_MAP[key] 81 | 82 | class Config: 83 | extra = "allow" -------------------------------------------------------------------------------- /imgs/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RootKit-Org/AI-Aimbot-Starter-Code/53a6285a956fe335710477f5b8cb5333724beae2/imgs/banner.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import requests 4 | import json 5 | from colorama import Fore, Back, Style 6 | import time 7 | import importlib 8 | import sys 9 | 10 | def prRed(skk): print(Fore.RED, skk, Style.RESET_ALL) 11 | def prGreen(skk): print(Fore.GREEN + skk + Style.RESET_ALL) 12 | def prYellow(skk): print(Fore.YELLOW + skk + Style.RESET_ALL) 13 | def prBlue(skk): print(Fore.BLUE + skk + Style.RESET_ALL) 14 | def prPurple(skk): print(Fore.MAGENTA, skk + Style.RESET_ALL) 15 | def prCyan(skk): print(Fore.CYAN + skk + Style.RESET_ALL) 16 | def prLightGray(skk): print(Fore.WHITE + skk + Style.RESET_ALL) 17 | def prBlack(skk): print(Fore.BLACK + skk + Style.RESET_ALL) 18 | 19 | appdataLocation = os.getenv("LOCALAPPDATA") 20 | appdata = os.getenv("APPDATA") 21 | currentDirectory = os.path.dirname(os.path.realpath(__file__)) 22 | os.environ['Path'] += f';{appdataLocation}\\Programs\\Python\\Python311\\Scripts' 23 | os.environ['Path'] += f';{appdataLocation}\\Programs\\Python\\Python311\\' 24 | os.environ['Path'] += ';C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8' 25 | os.environ['Path'] += ';C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8\\bin' 26 | os.environ['Path'] += ';C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8\\libnvvp' 27 | os.environ['Path'] += ';C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8\\lib' 28 | os.environ['CUDA_PATH'] = 'C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8' 29 | os.environ['CUDA_PATH_V11_8'] = 'C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.8' 30 | 31 | TEST = True 32 | 33 | def main(): 34 | customCode = None 35 | if len(sys.argv) > 3: 36 | settingsProfile = sys.argv[1] 37 | yoloVersion = int(sys.argv[2]) 38 | modelFileName = sys.argv[3] 39 | else: 40 | print("That's not how you run this. Tsk, tsk, try again.") 41 | prRed("python main.py ") 42 | return 43 | 44 | if len(sys.argv) > 4: 45 | try: 46 | sys.path.append(f"{currentDirectory}\\{sys.argv[4].split('.')[0]}\\") 47 | 48 | customCode = importlib.import_module(sys.argv[4]) 49 | except Exception as err: 50 | raise Exception("Failed to import custom code") 51 | 52 | prBlue(f"Custom Code: {sys.argv[4]}") 53 | 54 | botVersion = 0 55 | 56 | if yoloVersion not in [5, 8]: 57 | prRed("Invalid YOLO version. Please use 5 or 8") 58 | return 59 | 60 | versionText = "" 61 | _, fileExtension = os.path.splitext(modelFileName) 62 | if fileExtension == ".pt": 63 | botVersion = 0 64 | versionText = "Fast (PyTorch)" 65 | elif fileExtension == ".onnx": 66 | botVersion = 1 67 | versionText = "Faster (ONNX)" 68 | elif fileExtension == ".engine": 69 | botVersion = 2 70 | versionText = "Fastest (TensorRT)" 71 | else: 72 | prRed("Invalid model file extension. Please use .pt, .onnx, or .engine") 73 | return 74 | 75 | prGreen(f"Version: {versionText} YOLOv{yoloVersion} from {modelFileName}") 76 | 77 | paidTier = 0 78 | 79 | if TEST: 80 | paidTier = 1 81 | 82 | prGreen(f"Settings Profile: {settingsProfile}") 83 | 84 | if customCode is not None: 85 | customCode.main( 86 | version=botVersion, 87 | settingsProfile=settingsProfile, 88 | paidTier=paidTier, 89 | yoloVersion=yoloVersion, 90 | modelFileName=modelFileName 91 | ) 92 | else: 93 | prBlue("RUNNING MAIN BOT") 94 | 95 | if __name__ == "__main__": 96 | try: 97 | main() 98 | except Exception as e: 99 | import traceback 100 | prRed(traceback.format_exc()) 101 | prRed(e) 102 | prYellow("Ask @Wonder for help in our Discord: https://discord.gg/rootkitorg") 103 | 104 | for i in range(3, 0, -1): 105 | prYellow(f"Bot will close in {i} seconds") 106 | time.sleep(1) -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | ![World's Best AI Aimbot Banner](imgs/banner.png) 2 | 3 | [![Pull Requests Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat)](http://makeapullrequest.com) 4 | 5 | # ✨Make sure you have the Launcher before starting✨ 6 | ### Over 10,000 users use the Launcher 7 | 8 | ### 🔴 LIVE: Aimbot Internation 2024 - Win $1,000s 9 | Prepare your custom code and submit it for the aimbot internation which ends later this year. Work alone or solo. Check out the following videos to learn more. 10 | 11 | All custom code must be submitting thru the launcher. 12 | https://youtube.com/live/mtV6w2qhaNs?feature=share 13 | 14 | Download the [RootKit Launcher](https://github.com/RootKit-Org/Launcher). It is FREE. **No coding required.** 15 | 16 | ## Pros 17 | Using the starter kit is the best way to make your own bot. This is the way you have to submit code to us for competitions to start with. here are more reasons. 18 | - Offical Competitive Standard 19 | - Access to models in Store 20 | - Models will be auto converted for you 21 | - Setting profiles can be used in your code 22 | - You can publish your code on our store 23 | - Everyone has the launcher (Over 4,600 users) 24 | 25 | ## Cons 26 | - You have to learn how to use an API/SDK 27 | 28 | ## What's in the box? 29 | We included 2 different examples for you. 30 | 31 | - A bare example which has the bare minimum for most projects you would want to start. 32 | 33 | - An example using the Open Source Aimbot 34 | 35 | ## Running 36 | Place main.py anywhere. To run it, the syntax is 37 | `python main.py ` 38 | 39 | Treat it as if you were gonna import your code. Here is an example of what it would look like. 40 | `python main.py Default 5 v5_base_s.pt example_bare.main` 41 | 42 | ## Deploying 43 | Move your custom code folder into `%APPDATA%\ai-aimbot-launcher\customCode`. 44 | 45 | If you want to post it on the store, `@Techincal Champions` in the discord. 46 | 47 | ## Starter Function 48 | ### What the Launcher Sends you 49 | ```python 50 | version: int # 0-2 (pytorch, onnx, engine) 51 | settingsProfile: str # file name of settings located in %APPDATA%\ai-aimbot-launcher\aimbotSettings 52 | paidTier: int # 0-3 (free, supporter t1, t2, t3) 53 | yoloVersion: int # 5 or 8 (yolov5 or yolov8) 54 | modelfileName: str # file name of model located in %APPDATA%\ai-aimbot-launcher\models 55 | ``` 56 | 57 | ### Example 1 58 | ```python 59 | def main(**argv): 60 | print("My custom bot") 61 | print(argv) 62 | ``` 63 | 64 | ### Example 2 65 | ```python 66 | def main( 67 | version, 68 | settingsProfile, 69 | paidTier, 70 | yoloVersion, 71 | modelFileName 72 | ): 73 | print("My custom bot") 74 | ``` 75 | 76 | ### Example 3 77 | ```python 78 | from .schema.settings import Settings # Include the schema folder 79 | import json 80 | import os 81 | 82 | def main( 83 | version: int = 0, 84 | settingsProfile: str = "", 85 | paidTier: int = 0, 86 | yoloVersion: int = 0, 87 | modelFileName: str = "" 88 | ): 89 | 90 | appdataLocation = os.getenv("APPDATA") 91 | settingsPath = os.path.join(appdataLocation, "ai-aimbot-launcher", "aimbotSettings", f"{settingsProfile.lower()}.json") 92 | 93 | # loading settings 94 | with open(settingsPath, "r") as f: 95 | settings = json.load(f) 96 | settings = Settings(**settings) 97 | 98 | # getting model path 99 | modelPath = os.path.join(appdataLocation, "ai-aimbot-launcher", "models", modelFileName) 100 | ``` 101 | --------------------------------------------------------------------------------