├── .gitignore ├── test └── dataset │ ├── IMG_0063_empty.JPG │ └── IMG_0089_peccary.JPG ├── README.md ├── .github ├── dependabot.yml └── workflows │ └── test.yml ├── Cargo.toml └── src ├── lib.rs ├── detections.rs ├── main.rs ├── helpers.rs └── model.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | *.onnx 4 | output.jpg 5 | vcpkg_installed 6 | -------------------------------------------------------------------------------- /test/dataset/IMG_0063_empty.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bencevans/rust-opencv-yolov5/HEAD/test/dataset/IMG_0063_empty.JPG -------------------------------------------------------------------------------- /test/dataset/IMG_0089_peccary.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bencevans/rust-opencv-yolov5/HEAD/test/dataset/IMG_0089_peccary.JPG -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # YOLOv5 Inference in Rust 2 | 3 | 1. Follow the documentation in the ultralytics repository to export your model in ONNX format. 4 | 2. Ensure you have OpenCV installed (`brew install opencv`/`choco install opencv` etc.) 5 | 6 | Add the project as a dependency. 7 | 8 | ``` 9 | cargo add git+https://github.com/bencevans/rust-opencv-yolov5.git 10 | ``` 11 | 12 | Check the test in `src/lib.rs` for example inference. 13 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | - package-ecosystem: "github-actions" 13 | directory: "/" 14 | schedule: 15 | interval: "daily" 16 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "opencv-yolov5" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | clap = { version = "4.0.18", features = ["derive"], optional = true } 10 | image = { version = "0.24.3", features = ["rgb"] } 11 | imageproc = "0.23.0" 12 | indicatif = { version = "0.17.0", optional = true } 13 | opencv = { version = "0.74.0", default-features = false, features = ["dnn", "imgcodecs", "imgproc"] } 14 | shellexpand = {version = "3.0.0", optional = true} 15 | serde = { version = "1.0", features = ["derive"] } 16 | serde_json = "1.0" 17 | tracing = "~0.1" 18 | 19 | [features] 20 | default = ["cli"] 21 | cli = ["dep:shellexpand", "dep:clap", "dep:indicatif"] 22 | cuda = [] 23 | 24 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | mod detections; 2 | pub mod helpers; 3 | mod model; 4 | 5 | pub use detections::YoloDetection; 6 | pub use detections::YoloImageDetections; 7 | pub use model::YoloModel; 8 | 9 | #[cfg(test)] 10 | mod tests { 11 | use crate::helpers::render_detections; 12 | 13 | use super::*; 14 | 15 | #[test] 16 | fn create_model() { 17 | let image_path = "test/dataset/IMG_0089_peccary.JPG"; 18 | 19 | let model = YoloModel::new_from_file("md_v5a.0.0-640x640.onnx", (640, 640)); 20 | 21 | let mut model = model.unwrap(); 22 | 23 | let detections = model.detect(image_path, 0.5, 0.45); 24 | 25 | let detections = detections.unwrap(); 26 | 27 | assert_eq!(detections.image_width, 4608); 28 | assert_eq!(detections.image_height, 2560); 29 | assert_eq!(detections.file, image_path.to_string()); 30 | 31 | assert_eq!(detections.detections.len(), 1); 32 | 33 | render_detections(image_path, &detections, "output.jpg").unwrap(); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/detections.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Clone, Serialize, Deserialize)] 4 | /// Representation of an image with it's detections. 5 | pub struct YoloImageDetections { 6 | /// File Path. 7 | pub file: String, 8 | 9 | /// Image Width in Pixels. 10 | pub image_width: u32, 11 | 12 | // Image Height in Pixels. 13 | pub image_height: u32, 14 | 15 | /// Array of [YoloDetection]s. 16 | pub detections: Vec, 17 | } 18 | 19 | #[derive(Debug, Clone, Serialize, Deserialize)] 20 | /// Representation of an object detection within an image. 21 | pub struct YoloDetection { 22 | /// Top-Left Bounds Coordinate in X-Axis 23 | pub x: f32, 24 | 25 | // Top-Left Bounds Coordinate in Y-Axis 26 | pub y: f32, 27 | 28 | /// Width of Bounding Box 29 | pub width: f32, 30 | 31 | /// Height of Bounding Box 32 | pub height: f32, 33 | 34 | /// Class Index 35 | pub class_index: u32, 36 | 37 | /// Softmaxed Activation 38 | pub confidence: f32, 39 | } 40 | 41 | impl YoloDetection { 42 | pub fn area(&self) -> f32 { 43 | self.width * self.height 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "cli")] 2 | use std::path::PathBuf; 3 | 4 | use opencv_yolov5::{helpers::enumerate_images, YoloImageDetections, YoloModel}; 5 | 6 | #[derive(clap::Parser)] 7 | struct Cli { 8 | model_path: PathBuf, 9 | 10 | // #[clap(parse(from_os_str))] 11 | root_path: PathBuf, 12 | 13 | #[clap(long, default_value = "false")] 14 | recursive: bool, 15 | 16 | #[clap(long, default_value = "640")] 17 | input_width: i32, 18 | 19 | #[clap(long, default_value = "640")] 20 | input_height: i32, 21 | } 22 | 23 | fn main() { 24 | let mut args = ::parse(); 25 | 26 | // Handle ~ in paths 27 | args.model_path = args.model_path.canonicalize().unwrap(); 28 | args.root_path = args.root_path.canonicalize().unwrap(); 29 | 30 | let model_progress = indicatif::ProgressBar::new_spinner(); 31 | let mut model = YoloModel::new_from_file( 32 | args.model_path.to_str().unwrap(), 33 | (args.input_width, args.input_height), 34 | ) 35 | .expect("Unable to load model."); 36 | model_progress.finish_with_message("Model loaded."); 37 | 38 | let images = enumerate_images(args.root_path, true); 39 | 40 | let image_progress = indicatif::ProgressBar::new(images.len() as u64); 41 | image_progress.set_style( 42 | indicatif::ProgressStyle::default_bar() 43 | .template( 44 | "{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos}/{len} {per_sec} ({eta_precise})", 45 | ) 46 | .unwrap() 47 | .progress_chars("=> "), 48 | ); 49 | 50 | let mut results: Vec = vec![]; 51 | 52 | for image_path in images { 53 | image_progress.inc(1); 54 | 55 | let detections = model 56 | .detect(image_path.to_str().unwrap(), 0.1, 0.45) 57 | .unwrap(); 58 | 59 | results.push(detections); 60 | } 61 | 62 | image_progress.finish_with_message("Done."); 63 | 64 | std::fs::write( 65 | "output.json", 66 | serde_json::to_string_pretty(&results).unwrap(), 67 | ) 68 | .expect("Failed to write results"); 69 | } 70 | -------------------------------------------------------------------------------- /src/helpers.rs: -------------------------------------------------------------------------------- 1 | use crate::detections::YoloImageDetections; 2 | use image::Rgb; 3 | use imageproc::drawing::draw_hollow_rect_mut; 4 | use imageproc::rect::Rect; 5 | use std::path::{Path, PathBuf}; 6 | 7 | const IMAGE_EXTENTIONS: [&str; 3] = ["jpg", "jpeg", "png"]; 8 | 9 | /// Check if path is a known image extention 10 | pub fn is_image_path(path: &Path) -> bool { 11 | match path.extension() { 12 | None => false, 13 | Some(a) => IMAGE_EXTENTIONS.contains(&a.to_str().unwrap().to_lowercase().as_str()), 14 | } 15 | } 16 | 17 | /// Find all images beleived to be an image. 18 | pub fn enumerate_images(root_dir: PathBuf, recursive: bool) -> Vec { 19 | if root_dir.is_file() { 20 | if is_image_path(&root_dir) { 21 | vec![root_dir] 22 | } else { 23 | vec![] 24 | } 25 | } else { 26 | let mut images: Vec = vec![]; 27 | 28 | for entry in root_dir.read_dir().unwrap() { 29 | let entry = entry.unwrap(); 30 | let path = entry.path(); 31 | 32 | if path.is_dir() && recursive { 33 | images.extend(enumerate_images(path, recursive)); 34 | } else if path.is_file() && is_image_path(&path) { 35 | images.push(path); 36 | } 37 | } 38 | 39 | images 40 | } 41 | } 42 | 43 | /// Render Bounding Boxes onto an Image and Save 44 | pub fn render_detections( 45 | image_path: &str, 46 | detections: &YoloImageDetections, 47 | output_path: &str, 48 | ) -> Result<(), opencv::Error> { 49 | let image = image::open(image_path).unwrap(); 50 | let mut image = image.to_rgb8(); 51 | 52 | for detection in &detections.detections { 53 | let x = (detection.x) * image.width() as f32; 54 | let y = (detection.y) * image.height() as f32; 55 | let width = (detection.width) * image.width() as f32; 56 | let height = (detection.height) * image.height() as f32; 57 | 58 | draw_hollow_rect_mut( 59 | &mut image, 60 | Rect::at(x as i32, y as i32).of_size(width as u32, height as u32), 61 | Rgb([255u8, 0u8, 0u8]), 62 | ); 63 | } 64 | 65 | image.save(output_path).unwrap(); 66 | 67 | Ok(()) 68 | } 69 | 70 | /// Checks if CUDA and a Supported CUDA device can be found. 71 | pub fn is_cuda_available() -> bool { 72 | match opencv::core::get_cuda_enabled_device_count() { 73 | Ok(count) => count > 0, 74 | Err(_) => false, 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: "Rust" 2 | on: 3 | pull_request: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build-app: 10 | defaults: 11 | run: 12 | shell: bash -el {0} 13 | 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | include: 18 | - platform: macos-latest 19 | triplet: x64-osx 20 | - platform: windows-latest 21 | triplet: x64-windows-static-md 22 | - platform: ubuntu-latest 23 | triplet: x64-linux 24 | 25 | runs-on: ${{ matrix.platform }} 26 | 27 | steps: 28 | 29 | - name: vcpkg version 30 | run: | 31 | cd $VCPKG_INSTALLATION_ROOT && git checkout . && git checkout 2023.01.09 32 | - name: Install Linux System Dependencies 33 | run: | 34 | sudo apt-get update 35 | sudo apt-get install -y libdbus-1-3 libdbus-1-dev libxi-dev libxtst-dev libgles2-mesa-dev libgtk-3-dev \ 36 | libwebkit2gtk-4.0-dev librsvg2-dev jq 37 | if: runner.os == 'Linux' 38 | 39 | - name: Install Windows System Dependencies 40 | run: choco install -y wget jq 41 | if: runner.os == 'Windows' 42 | 43 | - name: Install MacOS System Dependencies 44 | run: brew install jq pkg-config 45 | if: runner.os == 'macOS' 46 | 47 | - uses: actions/checkout@v3 48 | with: 49 | submodules: true 50 | 51 | - name: Install Rust stable 52 | uses: actions-rs/toolchain@v1 53 | with: 54 | toolchain: stable 55 | 56 | - name: Set VCPKG_ROOT 57 | run: echo "VCPKG_ROOT=$VCPKG_INSTALLATION_ROOT" >> $GITHUB_ENV 58 | 59 | - name: Cache vcpkg 60 | uses: actions/cache@v3 61 | with: 62 | path: | 63 | ${{ env.VCPKG_ROOT }}/installed 64 | ${{ env.VCPKG_ROOT }}/packages 65 | key: ${{ matrix.platform }}-vcpkg-opencv4[dnn]-${{ matrix.triplet }} 66 | 67 | 68 | - name: Download Model 69 | run: wget -O md_v5a.0.0.onnx https://github.com/bencevans/megadetector-onnx/releases/download/v0.1.0/md_v5a.0.0-640x640.onnx 70 | 71 | - name: Build OpenCV 4 72 | run: "'${{env.VCPKG_ROOT}}/vcpkg' install opencv4[dnn]:${{ matrix.triplet }}" 73 | 74 | - name: Cache Rust 75 | uses: actions/cache@v3 76 | with: 77 | path: | 78 | ~/.cargo/registry 79 | ~/.cargo/git 80 | target 81 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml') }} 82 | 83 | - name: Build 84 | run: cargo build --verbose 85 | 86 | - name: Download Model 87 | run: wget -q https://github.com/bencevans/megadetector-onnx/releases/download/v0.1.0/md_v5a.0.0-640x640.onnx 88 | 89 | - name: Run tests 90 | run: cargo test --verbose 91 | # prerelease: false 92 | -------------------------------------------------------------------------------- /src/model.rs: -------------------------------------------------------------------------------- 1 | use crate::{YoloDetection, YoloImageDetections}; 2 | use opencv::{ 3 | core::{copy_make_border, Scalar, Vector, BORDER_CONSTANT, CV_32F}, 4 | dnn::{read_net_from_onnx, read_net_from_onnx_buffer}, 5 | prelude::{Mat, MatTraitConst, NetTrait, NetTraitConst}, 6 | Error, 7 | }; 8 | use tracing::info; 9 | 10 | /// Calculate Intersection Over Union (IOU) between two bounding boxes. 11 | fn iou(a: &YoloDetection, b: &YoloDetection) -> f32 { 12 | let area_a = a.area(); 13 | let area_b = b.area(); 14 | 15 | let top_left = (a.x.max(b.x), a.y.max(b.y)); 16 | let bottom_right = (a.x + a.width.min(b.width), a.y + a.height.min(b.height)); 17 | 18 | let intersection = 19 | (bottom_right.0 - top_left.0).max(0.0) * (bottom_right.1 - top_left.1).max(0.0); 20 | 21 | intersection / (area_a + area_b - intersection) 22 | } 23 | 24 | /// Non-Maximum Suppression 25 | fn non_max_suppression(detections: Vec, nms_threshold: f32) -> Vec { 26 | let mut suppressed_detections: Vec = vec![]; 27 | let mut sorted_detections: Vec = detections.to_vec(); 28 | 29 | sorted_detections.sort_by(|a, b| a.confidence.partial_cmp(&b.confidence).unwrap()); 30 | sorted_detections.reverse(); 31 | 32 | for i in 0..sorted_detections.len() { 33 | let mut keep = true; 34 | for j in 0..i { 35 | let iou = iou(&sorted_detections[i], &sorted_detections[j]); 36 | if iou > nms_threshold { 37 | keep = false; 38 | break; 39 | } 40 | } 41 | if keep { 42 | suppressed_detections.push(sorted_detections[i].clone()); 43 | } 44 | } 45 | suppressed_detections 46 | } 47 | 48 | /// Filter detections by confidence. 49 | fn filter_confidence(detections: Vec, min_confidence: f32) -> Vec { 50 | detections 51 | .into_iter() 52 | .filter(|dsetection| dsetection.confidence >= min_confidence) 53 | .collect() 54 | } 55 | 56 | /// Wrapper around OpenCV's DNN module for YOLOv5 inference. 57 | pub struct YoloModel { 58 | net: opencv::dnn::Net, 59 | input_size: opencv::core::Size_, 60 | } 61 | 62 | impl YoloModel { 63 | /// Create a new YoloModel from an ONNX file. 64 | pub fn new_from_file(model_path: &str, input_size: (i32, i32)) -> Result { 65 | YoloModel::new_from_network(read_net_from_onnx(model_path)?, input_size) 66 | } 67 | 68 | /// Create a new YoloModel from an ONNX buffer. 69 | pub fn new_from_buffer(buffer: &Vector, input_size: (i32, i32)) -> Result { 70 | YoloModel::new_from_network(read_net_from_onnx_buffer(buffer)?, input_size) 71 | } 72 | 73 | /// Create a new YoloModel from an pre-loaded OpenCV DNN network. 74 | pub fn new_from_network( 75 | mut network: opencv::dnn::Net, 76 | input_size: (i32, i32), 77 | ) -> Result { 78 | let cuda_count = opencv::core::get_cuda_enabled_device_count()?; 79 | info!("CUDA enabled device count: {}", cuda_count); 80 | 81 | if cuda_count > 0 { 82 | network.set_preferable_backend(opencv::dnn::DNN_BACKEND_CUDA)?; 83 | network.set_preferable_target(opencv::dnn::DNN_TARGET_CUDA)?; 84 | } 85 | 86 | Ok(Self { 87 | net: network, 88 | input_size: opencv::core::Size_::new(input_size.0, input_size.1), 89 | }) 90 | } 91 | 92 | /// Load an OpenCV image, resize and adjust the color channels. 93 | fn load_image(&self, image_path: &str) -> Result<(Mat, u32, u32), Error> { 94 | let image = opencv::imgcodecs::imread(image_path, opencv::imgcodecs::IMREAD_COLOR)?; 95 | 96 | let mut boxed_image = Mat::default(); 97 | 98 | copy_make_border( 99 | &image, 100 | &mut boxed_image, 101 | 0, 102 | 0, 103 | 0, 104 | 0, 105 | BORDER_CONSTANT, 106 | Scalar::new(114f64, 114f64, 114f64, 0f64), 107 | )?; 108 | 109 | let width = image.cols() as u32; 110 | let height = image.rows() as u32; 111 | 112 | // println!("scale factor: {:?}", 1.0 / 255.0); 113 | 114 | let blob = opencv::dnn::blob_from_image( 115 | &boxed_image, 116 | 1.0 / 255.0, 117 | opencv::core::Size_ { 118 | width: self.input_size.width, 119 | height: self.input_size.height, 120 | }, 121 | Scalar::new(0f64, 0f64, 0f64, 0f64), 122 | true, 123 | false, 124 | CV_32F, 125 | )?; 126 | 127 | Ok((blob, width, height)) 128 | } 129 | 130 | /// Detect objects in an image. 131 | fn forward(&mut self, blob: &Mat) -> Result { 132 | let mut output_tensor_blobs: opencv::core::Vector = opencv::core::Vector::default(); 133 | 134 | self.net.set_input(&blob, "", 1.0, Scalar::default())?; 135 | self.net.forward( 136 | &mut output_tensor_blobs, 137 | &self.net.get_unconnected_out_layers_names()?, 138 | )?; 139 | 140 | output_tensor_blobs.get(0) 141 | } 142 | 143 | /// Convert the output of the YOLOv5 model to a vector of [YoloDetection]. 144 | fn convert_to_detections(&self, outputs: &Mat) -> Result, Error> { 145 | let rows = *outputs.mat_size().get(1).unwrap(); 146 | let mut detections = Vec::::with_capacity(rows as usize); 147 | 148 | for row in 0..rows { 149 | let cx: &f32 = outputs.at_3d(0, row, 0)?; 150 | let cy: &f32 = outputs.at_3d(0, row, 1)?; 151 | let w: &f32 = outputs.at_3d(0, row, 2)?; 152 | let h: &f32 = outputs.at_3d(0, row, 3)?; 153 | let sc: &f32 = outputs.at_3d(0, row, 4)?; 154 | 155 | let mut x_min = *cx - *w / 2.0; 156 | let mut y_min = *cy - *h / 2.0; 157 | 158 | x_min /= self.input_size.width as f32; 159 | y_min /= self.input_size.height as f32; 160 | let mut width = *w / self.input_size.width as f32; 161 | let mut height = *h / self.input_size.height as f32; 162 | 163 | x_min = x_min.max(0.0).min(1_f32); 164 | y_min = y_min.max(0.0).min(1_f32); 165 | width = width.max(0.0).min(1_f32); 166 | height = height.max(0.0).min(1_f32); 167 | 168 | let mat_size = outputs.mat_size(); 169 | let classes = *mat_size.get(2).unwrap() - 5; 170 | let mut classes_confidences = vec![]; 171 | 172 | for j in 5..5 + classes { 173 | let confidence: &f32 = outputs.at_3d(0, row, j)?; 174 | classes_confidences.push(confidence); 175 | } 176 | 177 | let mut max_index = 0; 178 | let mut max_confidence = 0.0; 179 | for (index, confidence) in classes_confidences.iter().enumerate() { 180 | if *confidence > &max_confidence { 181 | max_index = index; 182 | max_confidence = **confidence; 183 | } 184 | } 185 | 186 | detections.push(YoloDetection { 187 | x: x_min, 188 | y: y_min, 189 | width, 190 | height, 191 | class_index: max_index as u32, 192 | confidence: *sc, 193 | }) 194 | } 195 | 196 | Ok(detections) 197 | } 198 | 199 | /// Run the model on an image and return the detections. 200 | pub fn detect( 201 | &mut self, 202 | image_path: &str, 203 | minimum_confidence: f32, 204 | nms_threshold: f32, 205 | ) -> Result { 206 | // Load the image 207 | let (image, image_width, image_height) = self.load_image(image_path)?; 208 | 209 | // Run the model on the image. 210 | let result = self.forward(&image)?; 211 | 212 | // Convert the result to a Vec of Detections. 213 | let detections = self.convert_to_detections(&result)?; 214 | 215 | // Filter the detections by confidence. 216 | let detections = filter_confidence(detections, minimum_confidence); 217 | 218 | // Non-maximum suppression. 219 | let detections = non_max_suppression(detections, nms_threshold); 220 | 221 | Ok(YoloImageDetections { 222 | file: image_path.to_string(), 223 | image_width, 224 | image_height, 225 | detections, 226 | }) 227 | } 228 | } 229 | 230 | #[cfg(test)] 231 | mod tests { 232 | use super::*; 233 | 234 | #[test] 235 | fn test_iou() { 236 | let detection_a = YoloDetection { 237 | x: 0.0, 238 | y: 0.0, 239 | width: 1.0, 240 | height: 1.0, 241 | class_index: 0, 242 | confidence: 0.0, 243 | }; 244 | 245 | let detection_b = YoloDetection { 246 | x: 0.0, 247 | y: 0.0, 248 | width: 1.0, 249 | height: 0.5, 250 | class_index: 0, 251 | confidence: 0.0, 252 | }; 253 | 254 | assert_eq!(iou(&detection_a, &detection_a), 1.0); 255 | assert_eq!(iou(&detection_a, &detection_b), 0.5); 256 | 257 | let detection_a = YoloDetection { 258 | x: 10.0, 259 | y: 10.0, 260 | width: 1.0, 261 | height: 1.0, 262 | class_index: 0, 263 | confidence: 0.0, 264 | }; 265 | 266 | let detection_b = YoloDetection { 267 | x: 10.0, 268 | y: 10.0, 269 | width: 1.0, 270 | height: 0.5, 271 | class_index: 0, 272 | confidence: 0.0, 273 | }; 274 | 275 | assert_eq!(iou(&detection_a, &detection_a), 1.0); 276 | assert_eq!(iou(&detection_a, &detection_b), 0.5); 277 | 278 | let detection_a = YoloDetection { 279 | x: 0.0, 280 | y: 0.0, 281 | width: 3.0, 282 | height: 3.0, 283 | class_index: 0, 284 | confidence: 0.0, 285 | }; 286 | 287 | let detection_b = YoloDetection { 288 | x: 2.0, 289 | y: 0.0, 290 | width: 3.0, 291 | height: 3.0, 292 | class_index: 0, 293 | confidence: 0.0, 294 | }; 295 | 296 | assert_eq!(iou(&detection_a, &detection_a), 1.0); 297 | assert_eq!(iou(&detection_a, &detection_b), 0.2); 298 | } 299 | } 300 | --------------------------------------------------------------------------------