├── Process ├── Train │ ├── Introduce.md │ ├── trainV7.py │ ├── trainV10.py │ ├── trainV11.py │ ├── trainV5.py │ └── trainV8.py └── eval │ ├── Introduce.md │ └── eval_SML2.py ├── ODverse33.pdf ├── README.md └── Drawing.py /Process/Train/Introduce.md: -------------------------------------------------------------------------------- 1 | ## data Training 2 | -------------------------------------------------------------------------------- /ODverse33.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SkyCol/ODverse33/HEAD/ODverse33.pdf -------------------------------------------------------------------------------- /Process/eval/Introduce.md: -------------------------------------------------------------------------------- 1 | ## data evaluation 2 | In terms of evaluation, the `eval_SML2.py` script is first used to generate the predicted results in JSON format, which are then compared against the ground-truth annotations of the test set. Subsequently, the `pycocotools` library is employed to perform a standard COCO-style evaluation. Key metrics are computed and reported, including `mAP_0.50`, `mAP_0.50:0.95`, as well as scale-specific metrics such as `mAP_small`, `mAP_medium`, and `mAP_large`. 3 | -------------------------------------------------------------------------------- /Process/Train/trainV7.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import random 4 | import numpy as np 5 | import torch 6 | 7 | def set_seed(seed=42): 8 | random.seed(seed) 9 | np.random.seed(seed) 10 | torch.manual_seed(seed) 11 | torch.cuda.manual_seed_all(seed) 12 | torch.backends.cudnn.deterministic = True 13 | torch.backends.cudnn.benchmark = False 14 | 15 | set_seed(42) 16 | 17 | # Root dataset directories 18 | root_dirs = [ 19 | r"D:\YOLO_Benchmark\medical\brain-tumor", 20 | # Add more if needed 21 | ] 22 | 23 | # YOLOv7 training script path 24 | yolov7_dir = r"D:\YOLO_Benchmark\yolov7" # path to cloned yolov7 repo 25 | train_script = os.path.join(yolov7_dir, "train.py") 26 | 27 | # Loop over each dataset 28 | for root_dir in root_dirs: 29 | data_yaml = os.path.join(root_dir, "data.yaml") 30 | last_folder = os.path.basename(root_dir) 31 | project_name = f"{last_folder}_yolo7" 32 | save_dir = f"D:/YOLO_Benchmark/saved_models/{last_folder}" 33 | 34 | command = [ 35 | "python", train_script, 36 | "--weights", "yolov7.pt", 37 | "--cfg", "cfg/training/yolov7.yaml", 38 | "--data", data_yaml, 39 | "--device", "0", 40 | "--batch-size", "32", 41 | "--epochs", "300", 42 | "--img", "640", 43 | "--project", save_dir, 44 | "--name", project_name, 45 | "--exist-ok" 46 | ] 47 | 48 | print(f"Training: {project_name}") 49 | subprocess.run(command, cwd=yolov7_dir) 50 | print(f"Completed: {project_name}") 51 | -------------------------------------------------------------------------------- /Process/Train/trainV10.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | import numpy as np 4 | from ultralytics import YOLO 5 | import os 6 | 7 | # Set random seed for reproducibility 8 | def set_seed(seed=42): 9 | random.seed(seed) 10 | np.random.seed(seed) 11 | torch.manual_seed(seed) 12 | torch.cuda.manual_seed_all(seed) 13 | torch.backends.cudnn.deterministic = True 14 | torch.backends.cudnn.benchmark = False 15 | 16 | set_seed(42) 17 | 18 | # List of dataset root directories 19 | root_dirs = [ 20 | r"D:\YOLO_Benchmark\medical\brain-tumor", 21 | # Add more dataset directories if needed 22 | ] 23 | 24 | # Loop through each dataset root 25 | for root_dir in root_dirs: 26 | # Path to dataset YAML 27 | data_yaml_path = os.path.join(root_dir, "data.yaml") 28 | 29 | # Extract dataset name 30 | last_folder = os.path.basename(root_dir) 31 | 32 | # Set project directory and experiment name 33 | project_root = f"D:/YOLO_Benchmark/saved_models/{last_folder}" 34 | project_name = f"{last_folder}_yolo10m" 35 | 36 | # Load model architecture and pretrained weights 37 | model = YOLO(r"D:\YOLO_Benchmark\ultralytics\ultralytics\cfg\models\v10\yolov10m.yaml") 38 | model = YOLO("yolov10m.pt") # Load pretrained model 39 | 40 | # Train the model 41 | train_results = model.train( 42 | data=data_yaml_path, 43 | epochs=300, 44 | imgsz=640, 45 | batch=32, 46 | device="0", # GPU index 47 | lr0=0.01, 48 | augment=True, 49 | translate=0.1, 50 | scale=0.5, 51 | fliplr=0.5, 52 | hsv_h=0.015, 53 | hsv_s=0.7, 54 | hsv_v=0.4, 55 | mosaic=True, 56 | project=project_root, 57 | name=project_name, 58 | workers=0, 59 | save=True, 60 | save_period=0 61 | ) 62 | 63 | print(f"Training completed: {project_name}") 64 | 65 | # Other version training scripts: 66 | # python ultralytics/trainV5.py 67 | # python ultralytics/trainV8.py 68 | # python ultralytics/trainV9.py 69 | # python ultralytics/trainV10.py 70 | # python ultralytics/trainV11.py 71 | -------------------------------------------------------------------------------- /Process/Train/trainV11.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | import numpy as np 4 | from ultralytics import YOLO 5 | import os 6 | 7 | # Set random seed for reproducibility 8 | def set_seed(seed=42): 9 | random.seed(seed) 10 | np.random.seed(seed) 11 | torch.manual_seed(seed) 12 | torch.cuda.manual_seed_all(seed) 13 | torch.backends.cudnn.deterministic = True 14 | torch.backends.cudnn.benchmark = False 15 | 16 | # Apply seed 17 | set_seed(42) 18 | 19 | # List of dataset root directories 20 | root_dirs = [ 21 | r"D:\YOLO_Benchmark\medical\brain-tumor", 22 | # Add more dataset root paths if needed 23 | ] 24 | 25 | # Iterate through each dataset and train the model 26 | for root_dir in root_dirs: 27 | # Path to the dataset YAML 28 | data_yaml_path = os.path.join(root_dir, "data.yaml") 29 | 30 | # Extract folder name for naming the project 31 | last_folder = os.path.basename(root_dir) 32 | 33 | # Define the save directory and experiment name 34 | project_root = f"D:/YOLO_Benchmark/saved_models/{last_folder}" 35 | project_name = f"{last_folder}_yolo11m" 36 | 37 | # Load the model config and pretrained weights 38 | model = YOLO(r"D:\YOLO_Benchmark\ultralytics\ultralytics\cfg\models\11\yolo11.yaml") 39 | model = YOLO("yolo11m.pt") 40 | 41 | # Train the model 42 | train_results = model.train( 43 | data=data_yaml_path, 44 | epochs=300, 45 | imgsz=640, 46 | batch=32, 47 | device="0", # Use GPU 0 48 | lr0=0.01, 49 | augment=True, 50 | translate=0.1, 51 | scale=0.5, 52 | fliplr=0.5, 53 | hsv_h=0.015, 54 | hsv_s=0.7, 55 | hsv_v=0.4, 56 | mosaic=True, 57 | project=project_root, 58 | name=project_name, 59 | workers=0, 60 | save=True, 61 | save_period=0 62 | ) 63 | 64 | print(f"Training completed: {project_name}") 65 | 66 | # Training script reference for different YOLO versions: 67 | # python ultralytics/trainV5.py 68 | # python ultralytics/trainV8.py 69 | # python ultralytics/trainV9.py 70 | # python ultralytics/trainV10.py 71 | # python ultralytics/trainV11.py 72 | -------------------------------------------------------------------------------- /Process/Train/trainV5.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | import numpy as np 4 | from ultralytics import YOLO 5 | import os 6 | 7 | # Set random seed for reproducibility 8 | def set_seed(seed=42): 9 | random.seed(seed) 10 | np.random.seed(seed) 11 | torch.manual_seed(seed) 12 | torch.cuda.manual_seed_all(seed) 13 | torch.backends.cudnn.deterministic = True 14 | torch.backends.cudnn.benchmark = False 15 | 16 | # Apply the seed 17 | set_seed(42) 18 | 19 | # List of dataset root directories 20 | root_dirs = [ 21 | r"D:\YOLO_Benchmark\medical\brain-tumor", 22 | # Add more root directories here 23 | ] 24 | 25 | # Iterate through each dataset root directory and train YOLO 26 | for root_dir in root_dirs: 27 | # Path to the dataset YAML configuration file 28 | data_yaml_path = os.path.join(root_dir, "data.yaml") 29 | 30 | # Get the last folder name as project identifier 31 | last_folder = os.path.basename(root_dir) 32 | 33 | # Define paths for saving model weights and logs 34 | project_root = f"D:/YOLO_Benchmark/saved_models/{last_folder}" 35 | project_name = f"{last_folder}_yolo5m" 36 | 37 | # Load YOLO model configuration and pretrained weights 38 | model = YOLO(r"D:\YOLO_Benchmark\ultralytics\ultralytics\cfg\models\v5\yolov5.yaml") # Config file 39 | model = YOLO("yolov5mu.pt") # Pretrained weights 40 | 41 | # Start training 42 | train_results = model.train( 43 | data=data_yaml_path, 44 | epochs=300, 45 | imgsz=640, 46 | batch=32, 47 | device="0", # GPU index (0 for first GPU) 48 | lr0=0.01, 49 | augment=True, 50 | translate=0.1, 51 | scale=0.5, 52 | fliplr=0.5, 53 | hsv_h=0.015, 54 | hsv_s=0.7, 55 | hsv_v=0.4, 56 | mosaic=True, 57 | project=project_root, 58 | name=project_name, 59 | workers=0, 60 | save=True, 61 | save_period=0 62 | ) 63 | 64 | print(f"Training completed: {project_name}") 65 | 66 | # Script variants for different model versions: 67 | # python ultralytics/trainV5.py 68 | # python ultralytics/trainV8.py 69 | # python ultralytics/trainV9.py 70 | # python ultralytics/trainV10.py 71 | # python ultralytics/trainV11.py 72 | -------------------------------------------------------------------------------- /Process/Train/trainV8.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | import numpy as np 4 | from ultralytics import YOLO 5 | import os 6 | 7 | # Set random seed for reproducibility 8 | def set_seed(seed=42): 9 | random.seed(seed) 10 | np.random.seed(seed) 11 | torch.manual_seed(seed) 12 | torch.cuda.manual_seed_all(seed) 13 | torch.backends.cudnn.deterministic = True 14 | torch.backends.cudnn.benchmark = False 15 | 16 | # Apply the seed 17 | set_seed(42) 18 | 19 | # List of dataset root directories 20 | root_dirs = [ 21 | r"D:\YOLO_Benchmark\medical\bone-fracture\bone-fracture", 22 | # Add more dataset root directories if needed 23 | ] 24 | 25 | # Loop through each dataset directory and start training 26 | for root_dir in root_dirs: 27 | # Path to the dataset configuration (data.yaml) 28 | data_yaml_path = os.path.join(root_dir, "data.yaml") 29 | 30 | # Get the last folder name to use as an identifier 31 | last_folder = os.path.basename(root_dir) 32 | 33 | # Define where to save the model and logs 34 | project_root = f"D:/YOLO_Benchmark/saved_models/{last_folder}" 35 | project_name = f"{last_folder}_yolo8m" 36 | 37 | # Load the pretrained YOLOv8 model (medium version) 38 | model = YOLO("yolov8m.pt") 39 | 40 | # Start training the model 41 | train_results = model.train( 42 | data=data_yaml_path, 43 | epochs=300, # Number of training epochs 44 | imgsz=640, # Input image size 45 | batch=32, # Batch size 46 | device="0", # GPU index (0 for the first GPU) 47 | lr0=0.01, # Initial learning rate 48 | augment=True, # Enable data augmentation 49 | translate=0.1, # Random translation 50 | scale=0.5, # Random scaling 51 | fliplr=0.5, # Horizontal flip probability 52 | hsv_h=0.015, # HSV hue augmentation 53 | hsv_s=0.7, # HSV saturation augmentation 54 | hsv_v=0.4, # HSV value augmentation 55 | mosaic=True, # Enable mosaic augmentation 56 | project=project_root, # Directory to save results 57 | name=project_name, # Project name 58 | workers=0, # Number of dataloader workers 59 | save=True, # Save checkpoints 60 | save_period=0 # Save every epoch (0 means only save best and last) 61 | ) 62 | 63 | print(f"Training completed: {project_name}") 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ODverse33 2 | **Newer YOLO versions are not always better!** 3 | 4 | **ODverse33** is a comprehensive benchmark that includes **33 high-quality datasets** spanning **11 diverse domains**. It provides a **multi-domain evaluation** for YOLO models, ranging from **YOLOv5 to YOLOv11**. 5 | 6 | The paper, **"ODVerse33: Is the New YOLO Version Always Better? A Multi-Domain Benchmark from YOLO v5 to v11"**, is now available on [*ArXiv*](http://arxiv.org/abs/2502.14314). 7 | 8 | 9 | ### 🌐 Covered Domains 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 |
Autonomous DrivingAgriculturalUnderwater
MedicalVideogameIndustrial
AerialWildlifeRetail
MicroscopicSecurity
33 | 34 | 35 | The datasets, which are divided into 11 domains, can be downloaded from *Kaggle*, while they are entitled *XX Domain in ODverse33*. 36 | - 🚗 [Autonomous Driving](https://www.kaggle.com/datasets/skycol/autonomous-driving-domain-in-odverse33) 37 | - 🌾 [Agricultural](https://www.kaggle.com/datasets/skycol/agricultural-domain-in-odverse33) 38 | - 🌊 [Underwater](https://www.kaggle.com/datasets/skycol/underwater-domain-in-odverse33) 39 | - 🏥 [Medical](https://www.kaggle.com/datasets/skycol/medical-domain-in-odverse33) 40 | - 🎮 [Video Game](https://www.kaggle.com/datasets/skycol/videogame-domain-in-odverse33) 41 | - 🏭 [Industrial](https://www.kaggle.com/datasets/skycol/industrial-domain-in-odverse33) 42 | - 🛰️ [Aerial](https://www.kaggle.com/datasets/skycol/aerial-domain-in-odverse33) 43 | - 🐾 [Wildlife](https://www.kaggle.com/datasets/skycol/wildlife-domain-in-odverse33) 44 | - 🛒 [Retail](https://www.kaggle.com/datasets/skycol/retail-domain-in-odverse33) 45 | - 🔬 [Microscopic](https://www.kaggle.com/datasets/skycol/microscopic-domain-in-odverse33) 46 | - 🛡️ [Security](https://www.kaggle.com/datasets/skycol/security-domain-in-odverse33) 47 | 48 | **April 6, 2025 Update:** Fixed minor errors in the manuscript. Updated the manuscript and its figures using LaTeX to make it easier to read. 49 | 50 | --- 51 | 52 | ### A Timeline of YOLO series detectors from v1 to v11 53 | 54 |

55 | 56 |

57 | 58 | --- 59 | 60 | ### Results on ODverse 33 test sets and COCO validation set 61 | ![Image](https://github.com/user-attachments/assets/ad4dbdee-dcdc-4d71-9d4a-6b26fe9d7878) 62 | 63 | 64 | 65 | --- 66 | 67 | ### 📊 Overall Performance on ODverse33 Test Sets (mAP) 68 | 69 | | Metric | YOLOv5 | YOLOv6 | YOLOv7 | YOLOv8 | YOLOv9 | YOLOv10 | YOLOv11 | 70 | |----------------------|--------|--------|--------|--------|--------|---------|---------| 71 | | **mAP50** | 0.7846 | 0.7675 | 0.7826 | 0.7812 | 0.7913 | 0.7761 | **0.7927** | 72 | | **mAP50–95** | 0.5862 | 0.5498 | 0.5699 | 0.5829 | 0.5902 | 0.5782 | **0.5931** | 73 | | **mAPsmall** | 0.3722 | 0.3243 | 0.3612 | 0.3735 | **0.3877** | 0.3609 | 0.3855 | 74 | | **mAPmedium** | 0.5290 | 0.4822 | 0.5269 | 0.5256 | 0.5357 | 0.5289 | **0.5374** | 75 | | **mAPlarge** | 0.6487 | 0.6106 | 0.6463 | 0.6481 | 0.6546 | 0.6480 | **0.6559** | 76 | 77 | --- 78 | 79 | Test Results were under COCO standard (across a range of confidence 80 | thresholds—from 0.0 to 1.0 in 0.01 increments) 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /Process/eval/eval_SML2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | from ultralytics import YOLO 4 | import json 5 | import cv2 # For checking image dimensions 6 | 7 | def parse_args(): 8 | """ 9 | Parse command-line arguments 10 | """ 11 | parser = argparse.ArgumentParser(description='YOLO prediction with configurable paths') 12 | parser.add_argument('--root_dir', type=str, required=True, 13 | help='Root directory containing model weights (e.g., D:/YOLO_Benchmark/saved_models/UWD)') 14 | parser.add_argument('--data_yaml', type=str, required=True, 15 | help='Path to data.yaml file (e.g., D:/YOLO_Benchmark/datasets2/UWD/data.yaml)') 16 | parser.add_argument('--output_dir', type=str, required=True, 17 | help='Directory to save prediction results (e.g., D:/YOLO_Benchmark/model_result/BDD100k)') 18 | parser.add_argument('--max_size', type=int, default=10, 19 | help='Maximum image size in MB to process (default: 10MB)') # Set the threshold size 20 | 21 | return parser.parse_args() 22 | 23 | def is_image_too_large(image_path, max_size_mb): 24 | """ 25 | Check if the image is too large (based on file size). 26 | """ 27 | image_size = os.path.getsize(image_path) / (2000 * 2000) # Size in MB 28 | return image_size > max_size_mb 29 | 30 | def main(): 31 | # Clear CUDA cache 32 | import torch 33 | torch.cuda.empty_cache() 34 | 35 | args = parse_args() 36 | 37 | # Walk through the directory to find models 38 | for subdir, _, files in os.walk(args.root_dir): 39 | if 'weights' in subdir and 'best.pt' in files: 40 | model_path = os.path.join(subdir, 'best.pt') 41 | model_name = os.path.basename(os.path.dirname(subdir)) 42 | 43 | # Load model 44 | model = YOLO(model_path) 45 | 46 | # Prediction 47 | image_dir = os.path.join(os.path.dirname(args.data_yaml), "test/images") 48 | results = [] 49 | 50 | for image_name in os.listdir(image_dir): 51 | image_path = os.path.join(image_dir, image_name) 52 | 53 | # Skip large images 54 | if is_image_too_large(image_path, args.max_size): 55 | print(f"Skipping {image_name}, image is too large.") 56 | continue 57 | 58 | # Perform prediction for the valid image 59 | result = model.predict( 60 | source=image_path, 61 | imgsz=640, 62 | device="cuda", 63 | batch=1, 64 | save=False 65 | ) 66 | 67 | # Process prediction results 68 | for r in result: 69 | image_id = os.path.splitext(image_name)[0] 70 | if len(r.boxes): 71 | for box in r.boxes: 72 | x1, y1, x2, y2 = box.xyxy[0].tolist() 73 | w = x2 - x1 74 | h = y2 - y1 75 | pred_dict = { 76 | "image_id": image_id, 77 | "category_id": int(box.cls[0]), 78 | "bbox": [float(x1), float(y1), float(w), float(h)], 79 | "score": float(box.conf[0]) 80 | } 81 | results.append(pred_dict) 82 | 83 | # Save prediction results 84 | os.makedirs(args.output_dir, exist_ok=True) 85 | pred_save_path = os.path.join(args.output_dir, f"{model_name}.json") 86 | with open(pred_save_path, 'w') as f: 87 | json.dump(results, f, indent=2) 88 | 89 | print(f"Predictions saved for model {model_name} at {pred_save_path}") 90 | 91 | if __name__ == "__main__": 92 | main() 93 | -------------------------------------------------------------------------------- /Drawing.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from matplotlib.font_manager import FontProperties 4 | import seaborn as sns 5 | 6 | plt.rcParams["font.family"] = "Arial" 7 | plt.rcParams["font.size"] = 27 8 | plt.rcParams["axes.labelsize"] = 30 9 | plt.rcParams["xtick.labelsize"] = 40 10 | plt.rcParams["ytick.labelsize"] = 40 11 | 12 | plt.rcParams['mathtext.fontset'] = 'custom' 13 | plt.rcParams['mathtext.rm'] = 'Arial' 14 | plt.rcParams['mathtext.it'] = 'Arial:italic' 15 | plt.rcParams['mathtext.bf'] = 'Arial:bold' 16 | plt.rcParams['xtick.major.pad'] = 10 17 | plt.rcParams['ytick.major.pad'] = 10 18 | 19 | fig, axes = plt.subplots(1, 3, figsize=(48, 13)) 20 | plt.tight_layout(pad=4) 21 | 22 | models1 = ["v5", "v6", "v7", "v8", "v9", "v10", "v11"] 23 | coco_val = [45.4, 50.0, 51.4, 50.2, 51.4, 51.1, 51.5] 24 | ours_val = [61.48, 59.35, 61.34, 61.89, 62.13, 61.95, 62.53] 25 | ours_test = [59.78, 56.55, 58.34, 59.69, 59.53, 59.10, 60.53] 26 | 27 | colors_line = sns.color_palette("colorblind", 3) 28 | 29 | ax1 = axes[0] 30 | ax1.plot(models1, coco_val, marker='o', markersize=28, label=r"$\mathbf{COCO_{ val}}$", 31 | linestyle='-', linewidth=5, color=colors_line[0]) 32 | ax1.plot(models1, ours_val, marker='o', markersize=28, label=r"$\mathbf{OURS_{ val}}$", 33 | linestyle='--', linewidth=5, color=colors_line[1]) 34 | ax1.plot(models1, ours_test, marker='o', markersize=28, label=r"$\mathbf{OURS_{ test}}$", 35 | linestyle='-.', linewidth=5, color=colors_line[2]) 36 | 37 | ax1.set_ylabel(r"$\mathbf{mAP_{50-95} (\%)}$", fontname='Arial', fontsize=42, fontweight='bold') 38 | 39 | font_prop = FontProperties() 40 | font_prop.set_weight('bold') 41 | 42 | ax1.legend(fontsize=30, loc='lower right', bbox_to_anchor=(0.98, 0.02), frameon=True, 43 | title_fontsize=30, prop=font_prop, markerscale=1, borderpad=1.1, labelspacing=1.1) 44 | ax1.set_ylim(40, 65) 45 | ax1.set_yticks(np.arange(40, 66, 5)) 46 | 47 | ax1.grid(False) 48 | ax1.minorticks_off() 49 | 50 | ax1.tick_params(axis='x', length=10, width=4, which='major', direction='in') 51 | ax1.tick_params(axis='y', length=10, width=4, which='major', direction='in') 52 | 53 | for spine in ax1.spines.values(): 54 | spine.set_edgecolor('black') 55 | spine.set_linewidth(4) 56 | 57 | models2 = ["v5", "v6", "v7", "v8", "v9", "v10 ", "v11"] 58 | ours_small = np.array([0.368352778, 0.311180556, 0.356002778, 0.368931019, 0.381449074, 0.355515741, 0.37935463]) 59 | ours_medium = np.array([0.551184553, 0.500733333, 0.546139837, 0.545868293, 0.556832927, 0.547228049, 0.558750407]) 60 | ours_large = np.array([0.670845635, 0.627299206, 0.66867619, 0.673465079, 0.677045238, 0.668137302, 0.676865873]) 61 | 62 | ours_small = ours_small * 100 63 | ours_medium = ours_medium * 100 64 | ours_large = ours_large * 100 65 | 66 | x = np.arange(len(models2)) 67 | 68 | ax2 = axes[1] 69 | ax2.plot(x, ours_small, marker='o', markersize=28, label=r"$\mathbf{OURS_{ small}}$", 70 | linestyle='-', linewidth=5, color=colors_line[0]) 71 | ax2.plot(x, ours_medium, marker='o', markersize=28, label=r"$\mathbf{OURS_{ medium}}$", 72 | linestyle='--', linewidth=5, color=colors_line[1]) 73 | ax2.plot(x, ours_large, marker='o', markersize=28, label=r"$\mathbf{OURS_{ large}}$", 74 | linestyle='-.', linewidth=5, color=colors_line[2]) 75 | 76 | ax2.set_ylabel(r"$\mathbf{mAP_{50-95} (\%)}$", fontname='Arial', fontsize=42, fontweight='bold') 77 | ax2.legend(fontsize=30, loc='lower right', bbox_to_anchor=(0.99, 0.01), frameon=True, 78 | title_fontsize=30, prop=font_prop, markerscale=1, borderpad=1.05, labelspacing=1.05) 79 | ax2.set_xticks(x) 80 | ax2.set_xticklabels(models2, rotation=0) 81 | ax2.margins(x=0.05) 82 | 83 | ax2.grid(False) 84 | ax2.minorticks_off() 85 | 86 | ax2.tick_params(axis='x', length=10, width=4, which='major', direction='in') 87 | ax2.tick_params(axis='y', length=10, width=4, which='major', direction='in') 88 | 89 | ax2.set_ylim(10, 80) 90 | for spine in ax2.spines.values(): 91 | spine.set_edgecolor('black') 92 | spine.set_linewidth(4) 93 | 94 | models3 = ["YOLOv11", "YOLOv10", "YOLOv9", "YOLOv8", "YOLOv7", "YOLOv6", "YOLOv5"] 95 | params = np.array([20.1, 35.1, 20.1, 25.9, 36.9, 28.1, 25.1]) 96 | latency = np.array([8.80, 9.97, 10.85, 7.58, 9.94, 8.72, 7.80]) 97 | bubble_values = params * latency 98 | scale_factor = 0.2 99 | bubble_sizes = (bubble_values ** 2) * scale_factor 100 | 101 | custom_colors = ['#f1c232','#1f77b4', '#ff7f0e', '#e377c2','#2ca02c', '#d62728', '#9467bd'] 102 | 103 | ax3 = axes[2] 104 | 105 | for i in range(len(models3)): 106 | ax3.text(params[i] + 0.5, latency[i], models3[i], fontsize=32, ha='center', va='center', fontweight='bold', color='#333333') 107 | ax3.scatter(params[i], latency[i], s=bubble_sizes[i], color=custom_colors[i], 108 | edgecolors="#ffffff", linewidth=3, zorder=5, alpha=0.95) 109 | 110 | ax3.scatter(params[i], latency[i], s=bubble_sizes[i] * 0.65, color="white", 111 | alpha=0.05, zorder=6) 112 | 113 | ax3.set_facecolor('#ffffff') 114 | 115 | ax3.set_xlabel(r"$\mathbf{Params (M)}$", fontname='Arial', fontsize=40, fontweight='bold', color='#333333') 116 | ax3.set_ylabel(r"$\mathbf{A100 FP16 (ms/image)}$", fontname='Arial', fontsize=40, fontweight='bold', color='#333333') 117 | 118 | ax3.tick_params(axis='x', labelsize=35) 119 | ax3.tick_params(axis='y', labelsize=35) 120 | 121 | ax3.grid(False) 122 | ax3.minorticks_off() 123 | 124 | ax3.tick_params(axis='x', length=12, width=4, which='major', direction='in') 125 | ax3.tick_params(axis='y', length=12, width=4, which='major', direction='in') 126 | 127 | ax3.set_xlim(14, 40) 128 | ax3.set_ylim(6, 12) 129 | 130 | for spine in ax3.spines.values(): 131 | spine.set_edgecolor('#333333') 132 | spine.set_linewidth(4) 133 | 134 | plt.tight_layout() 135 | plt.savefig("1.svg", format="svg") 136 | plt.show() 137 | --------------------------------------------------------------------------------