├── README.md ├── config ├── create_custom_model.sh ├── custom.data └── yolov3-custom.cfg ├── data ├── custom │ ├── classes.names │ ├── images │ │ └── 0520 │ │ │ ├── 0.jpg │ │ │ ├── 1.jpg │ │ │ ├── 10.jpg │ │ │ ├── 100.jpg │ │ │ └── 1000.jpg │ ├── labels │ │ └── 0520 │ │ │ ├── 0.txt │ │ │ ├── 1.txt │ │ │ ├── 10.txt │ │ │ ├── 100.txt │ │ │ └── 1000.txt │ ├── train.txt │ └── valid.txt └── owndata │ ├── 1.jpg │ ├── 1010.jpg │ ├── 1027.jpg │ ├── 104.jpg │ ├── 1041.jpg │ ├── 1048.jpg │ ├── 1063.jpg │ ├── 120.jpg │ ├── 1345.jpg │ ├── 200.jpg │ ├── 22.jpg │ └── 29.jpg ├── detect_owndata.py ├── models.py ├── output ├── 1.png ├── 1010.png ├── 1027.png ├── 104.png ├── 1041.png ├── 1048.png ├── 1063.png ├── 120.png ├── 1345.png ├── 200.png ├── 22.png └── 29.png ├── requirements.txt ├── scripts ├── generate_labels.py ├── k_means.py └── xml_to_json.py ├── test.py ├── train.py ├── utils ├── augmentations.py ├── datasets.py ├── parse_config.py └── utils.py └── weights └── download_weights.sh /README.md: -------------------------------------------------------------------------------- 1 | # Pytorch-Yolov3--remote sensing image 2 | a simple practice about using yolov3 to detect remote sensing image 3 | 4 | if you are interested in it, please refer to 5 | https://zhuanlan.zhihu.com/p/142345958 6 | -------------------------------------------------------------------------------- /config/create_custom_model.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NUM_CLASSES=$1 4 | 5 | echo " 6 | [net] 7 | # Testing 8 | #batch=1 9 | #subdivisions=1 10 | # Training 11 | batch=16 12 | subdivisions=1 13 | width=416 14 | height=416 15 | channels=3 16 | momentum=0.9 17 | decay=0.0005 18 | angle=0 19 | saturation = 1.5 20 | exposure = 1.5 21 | hue=.1 22 | 23 | learning_rate=0.001 24 | burn_in=1000 25 | max_batches = 500200 26 | policy=steps 27 | steps=400000,450000 28 | scales=.1,.1 29 | 30 | [convolutional] 31 | batch_normalize=1 32 | filters=32 33 | size=3 34 | stride=1 35 | pad=1 36 | activation=leaky 37 | 38 | # Downsample 39 | 40 | [convolutional] 41 | batch_normalize=1 42 | filters=64 43 | size=3 44 | stride=2 45 | pad=1 46 | activation=leaky 47 | 48 | [convolutional] 49 | batch_normalize=1 50 | filters=32 51 | size=1 52 | stride=1 53 | pad=1 54 | activation=leaky 55 | 56 | [convolutional] 57 | batch_normalize=1 58 | filters=64 59 | size=3 60 | stride=1 61 | pad=1 62 | activation=leaky 63 | 64 | [shortcut] 65 | from=-3 66 | activation=linear 67 | 68 | # Downsample 69 | 70 | [convolutional] 71 | batch_normalize=1 72 | filters=128 73 | size=3 74 | stride=2 75 | pad=1 76 | activation=leaky 77 | 78 | [convolutional] 79 | batch_normalize=1 80 | filters=64 81 | size=1 82 | stride=1 83 | pad=1 84 | activation=leaky 85 | 86 | [convolutional] 87 | batch_normalize=1 88 | filters=128 89 | size=3 90 | stride=1 91 | pad=1 92 | activation=leaky 93 | 94 | [shortcut] 95 | from=-3 96 | activation=linear 97 | 98 | [convolutional] 99 | batch_normalize=1 100 | filters=64 101 | size=1 102 | stride=1 103 | pad=1 104 | activation=leaky 105 | 106 | [convolutional] 107 | batch_normalize=1 108 | filters=128 109 | size=3 110 | stride=1 111 | pad=1 112 | activation=leaky 113 | 114 | [shortcut] 115 | from=-3 116 | activation=linear 117 | 118 | # Downsample 119 | 120 | [convolutional] 121 | batch_normalize=1 122 | filters=256 123 | size=3 124 | stride=2 125 | pad=1 126 | activation=leaky 127 | 128 | [convolutional] 129 | batch_normalize=1 130 | filters=128 131 | size=1 132 | stride=1 133 | pad=1 134 | activation=leaky 135 | 136 | [convolutional] 137 | batch_normalize=1 138 | filters=256 139 | size=3 140 | stride=1 141 | pad=1 142 | activation=leaky 143 | 144 | [shortcut] 145 | from=-3 146 | activation=linear 147 | 148 | [convolutional] 149 | batch_normalize=1 150 | filters=128 151 | size=1 152 | stride=1 153 | pad=1 154 | activation=leaky 155 | 156 | [convolutional] 157 | batch_normalize=1 158 | filters=256 159 | size=3 160 | stride=1 161 | pad=1 162 | activation=leaky 163 | 164 | [shortcut] 165 | from=-3 166 | activation=linear 167 | 168 | [convolutional] 169 | batch_normalize=1 170 | filters=128 171 | size=1 172 | stride=1 173 | pad=1 174 | activation=leaky 175 | 176 | [convolutional] 177 | batch_normalize=1 178 | filters=256 179 | size=3 180 | stride=1 181 | pad=1 182 | activation=leaky 183 | 184 | [shortcut] 185 | from=-3 186 | activation=linear 187 | 188 | [convolutional] 189 | batch_normalize=1 190 | filters=128 191 | size=1 192 | stride=1 193 | pad=1 194 | activation=leaky 195 | 196 | [convolutional] 197 | batch_normalize=1 198 | filters=256 199 | size=3 200 | stride=1 201 | pad=1 202 | activation=leaky 203 | 204 | [shortcut] 205 | from=-3 206 | activation=linear 207 | 208 | 209 | [convolutional] 210 | batch_normalize=1 211 | filters=128 212 | size=1 213 | stride=1 214 | pad=1 215 | activation=leaky 216 | 217 | [convolutional] 218 | batch_normalize=1 219 | filters=256 220 | size=3 221 | stride=1 222 | pad=1 223 | activation=leaky 224 | 225 | [shortcut] 226 | from=-3 227 | activation=linear 228 | 229 | [convolutional] 230 | batch_normalize=1 231 | filters=128 232 | size=1 233 | stride=1 234 | pad=1 235 | activation=leaky 236 | 237 | [convolutional] 238 | batch_normalize=1 239 | filters=256 240 | size=3 241 | stride=1 242 | pad=1 243 | activation=leaky 244 | 245 | [shortcut] 246 | from=-3 247 | activation=linear 248 | 249 | [convolutional] 250 | batch_normalize=1 251 | filters=128 252 | size=1 253 | stride=1 254 | pad=1 255 | activation=leaky 256 | 257 | [convolutional] 258 | batch_normalize=1 259 | filters=256 260 | size=3 261 | stride=1 262 | pad=1 263 | activation=leaky 264 | 265 | [shortcut] 266 | from=-3 267 | activation=linear 268 | 269 | [convolutional] 270 | batch_normalize=1 271 | filters=128 272 | size=1 273 | stride=1 274 | pad=1 275 | activation=leaky 276 | 277 | [convolutional] 278 | batch_normalize=1 279 | filters=256 280 | size=3 281 | stride=1 282 | pad=1 283 | activation=leaky 284 | 285 | [shortcut] 286 | from=-3 287 | activation=linear 288 | 289 | # Downsample 290 | 291 | [convolutional] 292 | batch_normalize=1 293 | filters=512 294 | size=3 295 | stride=2 296 | pad=1 297 | activation=leaky 298 | 299 | [convolutional] 300 | batch_normalize=1 301 | filters=256 302 | size=1 303 | stride=1 304 | pad=1 305 | activation=leaky 306 | 307 | [convolutional] 308 | batch_normalize=1 309 | filters=512 310 | size=3 311 | stride=1 312 | pad=1 313 | activation=leaky 314 | 315 | [shortcut] 316 | from=-3 317 | activation=linear 318 | 319 | 320 | [convolutional] 321 | batch_normalize=1 322 | filters=256 323 | size=1 324 | stride=1 325 | pad=1 326 | activation=leaky 327 | 328 | [convolutional] 329 | batch_normalize=1 330 | filters=512 331 | size=3 332 | stride=1 333 | pad=1 334 | activation=leaky 335 | 336 | [shortcut] 337 | from=-3 338 | activation=linear 339 | 340 | 341 | [convolutional] 342 | batch_normalize=1 343 | filters=256 344 | size=1 345 | stride=1 346 | pad=1 347 | activation=leaky 348 | 349 | [convolutional] 350 | batch_normalize=1 351 | filters=512 352 | size=3 353 | stride=1 354 | pad=1 355 | activation=leaky 356 | 357 | [shortcut] 358 | from=-3 359 | activation=linear 360 | 361 | 362 | [convolutional] 363 | batch_normalize=1 364 | filters=256 365 | size=1 366 | stride=1 367 | pad=1 368 | activation=leaky 369 | 370 | [convolutional] 371 | batch_normalize=1 372 | filters=512 373 | size=3 374 | stride=1 375 | pad=1 376 | activation=leaky 377 | 378 | [shortcut] 379 | from=-3 380 | activation=linear 381 | 382 | [convolutional] 383 | batch_normalize=1 384 | filters=256 385 | size=1 386 | stride=1 387 | pad=1 388 | activation=leaky 389 | 390 | [convolutional] 391 | batch_normalize=1 392 | filters=512 393 | size=3 394 | stride=1 395 | pad=1 396 | activation=leaky 397 | 398 | [shortcut] 399 | from=-3 400 | activation=linear 401 | 402 | 403 | [convolutional] 404 | batch_normalize=1 405 | filters=256 406 | size=1 407 | stride=1 408 | pad=1 409 | activation=leaky 410 | 411 | [convolutional] 412 | batch_normalize=1 413 | filters=512 414 | size=3 415 | stride=1 416 | pad=1 417 | activation=leaky 418 | 419 | [shortcut] 420 | from=-3 421 | activation=linear 422 | 423 | 424 | [convolutional] 425 | batch_normalize=1 426 | filters=256 427 | size=1 428 | stride=1 429 | pad=1 430 | activation=leaky 431 | 432 | [convolutional] 433 | batch_normalize=1 434 | filters=512 435 | size=3 436 | stride=1 437 | pad=1 438 | activation=leaky 439 | 440 | [shortcut] 441 | from=-3 442 | activation=linear 443 | 444 | [convolutional] 445 | batch_normalize=1 446 | filters=256 447 | size=1 448 | stride=1 449 | pad=1 450 | activation=leaky 451 | 452 | [convolutional] 453 | batch_normalize=1 454 | filters=512 455 | size=3 456 | stride=1 457 | pad=1 458 | activation=leaky 459 | 460 | [shortcut] 461 | from=-3 462 | activation=linear 463 | 464 | # Downsample 465 | 466 | [convolutional] 467 | batch_normalize=1 468 | filters=1024 469 | size=3 470 | stride=2 471 | pad=1 472 | activation=leaky 473 | 474 | [convolutional] 475 | batch_normalize=1 476 | filters=512 477 | size=1 478 | stride=1 479 | pad=1 480 | activation=leaky 481 | 482 | [convolutional] 483 | batch_normalize=1 484 | filters=1024 485 | size=3 486 | stride=1 487 | pad=1 488 | activation=leaky 489 | 490 | [shortcut] 491 | from=-3 492 | activation=linear 493 | 494 | [convolutional] 495 | batch_normalize=1 496 | filters=512 497 | size=1 498 | stride=1 499 | pad=1 500 | activation=leaky 501 | 502 | [convolutional] 503 | batch_normalize=1 504 | filters=1024 505 | size=3 506 | stride=1 507 | pad=1 508 | activation=leaky 509 | 510 | [shortcut] 511 | from=-3 512 | activation=linear 513 | 514 | [convolutional] 515 | batch_normalize=1 516 | filters=512 517 | size=1 518 | stride=1 519 | pad=1 520 | activation=leaky 521 | 522 | [convolutional] 523 | batch_normalize=1 524 | filters=1024 525 | size=3 526 | stride=1 527 | pad=1 528 | activation=leaky 529 | 530 | [shortcut] 531 | from=-3 532 | activation=linear 533 | 534 | [convolutional] 535 | batch_normalize=1 536 | filters=512 537 | size=1 538 | stride=1 539 | pad=1 540 | activation=leaky 541 | 542 | [convolutional] 543 | batch_normalize=1 544 | filters=1024 545 | size=3 546 | stride=1 547 | pad=1 548 | activation=leaky 549 | 550 | [shortcut] 551 | from=-3 552 | activation=linear 553 | 554 | ###################### 555 | 556 | [convolutional] 557 | batch_normalize=1 558 | filters=512 559 | size=1 560 | stride=1 561 | pad=1 562 | activation=leaky 563 | 564 | [convolutional] 565 | batch_normalize=1 566 | size=3 567 | stride=1 568 | pad=1 569 | filters=1024 570 | activation=leaky 571 | 572 | [convolutional] 573 | batch_normalize=1 574 | filters=512 575 | size=1 576 | stride=1 577 | pad=1 578 | activation=leaky 579 | 580 | [convolutional] 581 | batch_normalize=1 582 | size=3 583 | stride=1 584 | pad=1 585 | filters=1024 586 | activation=leaky 587 | 588 | [convolutional] 589 | batch_normalize=1 590 | filters=512 591 | size=1 592 | stride=1 593 | pad=1 594 | activation=leaky 595 | 596 | [convolutional] 597 | batch_normalize=1 598 | size=3 599 | stride=1 600 | pad=1 601 | filters=1024 602 | activation=leaky 603 | 604 | [convolutional] 605 | size=1 606 | stride=1 607 | pad=1 608 | filters=$(expr 3 \* $(expr $NUM_CLASSES \+ 5)) 609 | activation=linear 610 | 611 | 612 | [yolo] 613 | mask = 6,7,8 614 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 615 | classes=$NUM_CLASSES 616 | num=9 617 | jitter=.3 618 | ignore_thresh = .7 619 | truth_thresh = 1 620 | random=1 621 | 622 | 623 | [route] 624 | layers = -4 625 | 626 | [convolutional] 627 | batch_normalize=1 628 | filters=256 629 | size=1 630 | stride=1 631 | pad=1 632 | activation=leaky 633 | 634 | [upsample] 635 | stride=2 636 | 637 | [route] 638 | layers = -1, 61 639 | 640 | 641 | 642 | [convolutional] 643 | batch_normalize=1 644 | filters=256 645 | size=1 646 | stride=1 647 | pad=1 648 | activation=leaky 649 | 650 | [convolutional] 651 | batch_normalize=1 652 | size=3 653 | stride=1 654 | pad=1 655 | filters=512 656 | activation=leaky 657 | 658 | [convolutional] 659 | batch_normalize=1 660 | filters=256 661 | size=1 662 | stride=1 663 | pad=1 664 | activation=leaky 665 | 666 | [convolutional] 667 | batch_normalize=1 668 | size=3 669 | stride=1 670 | pad=1 671 | filters=512 672 | activation=leaky 673 | 674 | [convolutional] 675 | batch_normalize=1 676 | filters=256 677 | size=1 678 | stride=1 679 | pad=1 680 | activation=leaky 681 | 682 | [convolutional] 683 | batch_normalize=1 684 | size=3 685 | stride=1 686 | pad=1 687 | filters=512 688 | activation=leaky 689 | 690 | [convolutional] 691 | size=1 692 | stride=1 693 | pad=1 694 | filters=$(expr 3 \* $(expr $NUM_CLASSES \+ 5)) 695 | activation=linear 696 | 697 | 698 | [yolo] 699 | mask = 3,4,5 700 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 701 | classes=$NUM_CLASSES 702 | num=9 703 | jitter=.3 704 | ignore_thresh = .7 705 | truth_thresh = 1 706 | random=1 707 | 708 | 709 | 710 | [route] 711 | layers = -4 712 | 713 | [convolutional] 714 | batch_normalize=1 715 | filters=128 716 | size=1 717 | stride=1 718 | pad=1 719 | activation=leaky 720 | 721 | [upsample] 722 | stride=2 723 | 724 | [route] 725 | layers = -1, 36 726 | 727 | 728 | 729 | [convolutional] 730 | batch_normalize=1 731 | filters=128 732 | size=1 733 | stride=1 734 | pad=1 735 | activation=leaky 736 | 737 | [convolutional] 738 | batch_normalize=1 739 | size=3 740 | stride=1 741 | pad=1 742 | filters=256 743 | activation=leaky 744 | 745 | [convolutional] 746 | batch_normalize=1 747 | filters=128 748 | size=1 749 | stride=1 750 | pad=1 751 | activation=leaky 752 | 753 | [convolutional] 754 | batch_normalize=1 755 | size=3 756 | stride=1 757 | pad=1 758 | filters=256 759 | activation=leaky 760 | 761 | [convolutional] 762 | batch_normalize=1 763 | filters=128 764 | size=1 765 | stride=1 766 | pad=1 767 | activation=leaky 768 | 769 | [convolutional] 770 | batch_normalize=1 771 | size=3 772 | stride=1 773 | pad=1 774 | filters=256 775 | activation=leaky 776 | 777 | [convolutional] 778 | size=1 779 | stride=1 780 | pad=1 781 | filters=$(expr 3 \* $(expr $NUM_CLASSES \+ 5)) 782 | activation=linear 783 | 784 | 785 | [yolo] 786 | mask = 0,1,2 787 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 788 | classes=$NUM_CLASSES 789 | num=9 790 | jitter=.3 791 | ignore_thresh = .7 792 | truth_thresh = 1 793 | random=1 794 | " >> yolov3-custom.cfg 795 | -------------------------------------------------------------------------------- /config/custom.data: -------------------------------------------------------------------------------- 1 | classes= 2 2 | train=data/custom/train.txt 3 | valid=data/custom/valid.txt 4 | names=data/custom/classes.names 5 | -------------------------------------------------------------------------------- /config/yolov3-custom.cfg: -------------------------------------------------------------------------------- 1 | 2 | [net] 3 | # Testing 4 | # batch=1 5 | # subdivisions=1 6 | # Training 7 | batch=16 8 | subdivisions=1 9 | width=416 10 | height=416 11 | channels=3 12 | momentum=0.9 13 | decay=0.0005 14 | angle=0 15 | saturation = 1.5 16 | exposure = 1.5 17 | hue=.1 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | [convolutional] 25 | batch_normalize=1 26 | filters=32 27 | size=3 28 | stride=1 29 | pad=1 30 | activation=leaky 31 | # Downsample 32 | [convolutional] 33 | batch_normalize=1 34 | filters=64 35 | size=3 36 | stride=2 37 | pad=1 38 | activation=leaky 39 | [convolutional] 40 | batch_normalize=1 41 | filters=32 42 | size=1 43 | stride=1 44 | pad=1 45 | activation=leaky 46 | [convolutional] 47 | batch_normalize=1 48 | filters=64 49 | size=3 50 | stride=1 51 | pad=1 52 | activation=leaky 53 | [shortcut] 54 | from=-3 55 | activation=linear 56 | # Downsample 57 | [convolutional] 58 | batch_normalize=1 59 | filters=128 60 | size=3 61 | stride=2 62 | pad=1 63 | activation=leaky 64 | [convolutional] 65 | batch_normalize=1 66 | filters=64 67 | size=1 68 | stride=1 69 | pad=1 70 | activation=leaky 71 | [convolutional] 72 | batch_normalize=1 73 | filters=128 74 | size=3 75 | stride=1 76 | pad=1 77 | activation=leaky 78 | [shortcut] 79 | from=-3 80 | activation=linear 81 | [convolutional] 82 | batch_normalize=1 83 | filters=64 84 | size=1 85 | stride=1 86 | pad=1 87 | activation=leaky 88 | [convolutional] 89 | batch_normalize=1 90 | filters=128 91 | size=3 92 | stride=1 93 | pad=1 94 | activation=leaky 95 | [shortcut] 96 | from=-3 97 | activation=linear 98 | # Downsample 99 | [convolutional] 100 | batch_normalize=1 101 | filters=256 102 | size=3 103 | stride=2 104 | pad=1 105 | activation=leaky 106 | [convolutional] 107 | batch_normalize=1 108 | filters=128 109 | size=1 110 | stride=1 111 | pad=1 112 | activation=leaky 113 | [convolutional] 114 | batch_normalize=1 115 | filters=256 116 | size=3 117 | stride=1 118 | pad=1 119 | activation=leaky 120 | [shortcut] 121 | from=-3 122 | activation=linear 123 | [convolutional] 124 | batch_normalize=1 125 | filters=128 126 | size=1 127 | stride=1 128 | pad=1 129 | activation=leaky 130 | [convolutional] 131 | batch_normalize=1 132 | filters=256 133 | size=3 134 | stride=1 135 | pad=1 136 | activation=leaky 137 | [shortcut] 138 | from=-3 139 | activation=linear 140 | [convolutional] 141 | batch_normalize=1 142 | filters=128 143 | size=1 144 | stride=1 145 | pad=1 146 | activation=leaky 147 | [convolutional] 148 | batch_normalize=1 149 | filters=256 150 | size=3 151 | stride=1 152 | pad=1 153 | activation=leaky 154 | [shortcut] 155 | from=-3 156 | activation=linear 157 | [convolutional] 158 | batch_normalize=1 159 | filters=128 160 | size=1 161 | stride=1 162 | pad=1 163 | activation=leaky 164 | [convolutional] 165 | batch_normalize=1 166 | filters=256 167 | size=3 168 | stride=1 169 | pad=1 170 | activation=leaky 171 | [shortcut] 172 | from=-3 173 | activation=linear 174 | [convolutional] 175 | batch_normalize=1 176 | filters=128 177 | size=1 178 | stride=1 179 | pad=1 180 | activation=leaky 181 | [convolutional] 182 | batch_normalize=1 183 | filters=256 184 | size=3 185 | stride=1 186 | pad=1 187 | activation=leaky 188 | [shortcut] 189 | from=-3 190 | activation=linear 191 | [convolutional] 192 | batch_normalize=1 193 | filters=128 194 | size=1 195 | stride=1 196 | pad=1 197 | activation=leaky 198 | [convolutional] 199 | batch_normalize=1 200 | filters=256 201 | size=3 202 | stride=1 203 | pad=1 204 | activation=leaky 205 | [shortcut] 206 | from=-3 207 | activation=linear 208 | [convolutional] 209 | batch_normalize=1 210 | filters=128 211 | size=1 212 | stride=1 213 | pad=1 214 | activation=leaky 215 | [convolutional] 216 | batch_normalize=1 217 | filters=256 218 | size=3 219 | stride=1 220 | pad=1 221 | activation=leaky 222 | [shortcut] 223 | from=-3 224 | activation=linear 225 | [convolutional] 226 | batch_normalize=1 227 | filters=128 228 | size=1 229 | stride=1 230 | pad=1 231 | activation=leaky 232 | [convolutional] 233 | batch_normalize=1 234 | filters=256 235 | size=3 236 | stride=1 237 | pad=1 238 | activation=leaky 239 | [shortcut] 240 | from=-3 241 | activation=linear 242 | # Downsample 243 | [convolutional] 244 | batch_normalize=1 245 | filters=512 246 | size=3 247 | stride=2 248 | pad=1 249 | activation=leaky 250 | [convolutional] 251 | batch_normalize=1 252 | filters=256 253 | size=1 254 | stride=1 255 | pad=1 256 | activation=leaky 257 | [convolutional] 258 | batch_normalize=1 259 | filters=512 260 | size=3 261 | stride=1 262 | pad=1 263 | activation=leaky 264 | [shortcut] 265 | from=-3 266 | activation=linear 267 | [convolutional] 268 | batch_normalize=1 269 | filters=256 270 | size=1 271 | stride=1 272 | pad=1 273 | activation=leaky 274 | [convolutional] 275 | batch_normalize=1 276 | filters=512 277 | size=3 278 | stride=1 279 | pad=1 280 | activation=leaky 281 | [shortcut] 282 | from=-3 283 | activation=linear 284 | [convolutional] 285 | batch_normalize=1 286 | filters=256 287 | size=1 288 | stride=1 289 | pad=1 290 | activation=leaky 291 | [convolutional] 292 | batch_normalize=1 293 | filters=512 294 | size=3 295 | stride=1 296 | pad=1 297 | activation=leaky 298 | [shortcut] 299 | from=-3 300 | activation=linear 301 | [convolutional] 302 | batch_normalize=1 303 | filters=256 304 | size=1 305 | stride=1 306 | pad=1 307 | activation=leaky 308 | [convolutional] 309 | batch_normalize=1 310 | filters=512 311 | size=3 312 | stride=1 313 | pad=1 314 | activation=leaky 315 | [shortcut] 316 | from=-3 317 | activation=linear 318 | [convolutional] 319 | batch_normalize=1 320 | filters=256 321 | size=1 322 | stride=1 323 | pad=1 324 | activation=leaky 325 | [convolutional] 326 | batch_normalize=1 327 | filters=512 328 | size=3 329 | stride=1 330 | pad=1 331 | activation=leaky 332 | [shortcut] 333 | from=-3 334 | activation=linear 335 | [convolutional] 336 | batch_normalize=1 337 | filters=256 338 | size=1 339 | stride=1 340 | pad=1 341 | activation=leaky 342 | [convolutional] 343 | batch_normalize=1 344 | filters=512 345 | size=3 346 | stride=1 347 | pad=1 348 | activation=leaky 349 | [shortcut] 350 | from=-3 351 | activation=linear 352 | [convolutional] 353 | batch_normalize=1 354 | filters=256 355 | size=1 356 | stride=1 357 | pad=1 358 | activation=leaky 359 | [convolutional] 360 | batch_normalize=1 361 | filters=512 362 | size=3 363 | stride=1 364 | pad=1 365 | activation=leaky 366 | [shortcut] 367 | from=-3 368 | activation=linear 369 | [convolutional] 370 | batch_normalize=1 371 | filters=256 372 | size=1 373 | stride=1 374 | pad=1 375 | activation=leaky 376 | [convolutional] 377 | batch_normalize=1 378 | filters=512 379 | size=3 380 | stride=1 381 | pad=1 382 | activation=leaky 383 | [shortcut] 384 | from=-3 385 | activation=linear 386 | # Downsample 387 | [convolutional] 388 | batch_normalize=1 389 | filters=1024 390 | size=3 391 | stride=2 392 | pad=1 393 | activation=leaky 394 | [convolutional] 395 | batch_normalize=1 396 | filters=512 397 | size=1 398 | stride=1 399 | pad=1 400 | activation=leaky 401 | [convolutional] 402 | batch_normalize=1 403 | filters=1024 404 | size=3 405 | stride=1 406 | pad=1 407 | activation=leaky 408 | [shortcut] 409 | from=-3 410 | activation=linear 411 | [convolutional] 412 | batch_normalize=1 413 | filters=512 414 | size=1 415 | stride=1 416 | pad=1 417 | activation=leaky 418 | [convolutional] 419 | batch_normalize=1 420 | filters=1024 421 | size=3 422 | stride=1 423 | pad=1 424 | activation=leaky 425 | [shortcut] 426 | from=-3 427 | activation=linear 428 | [convolutional] 429 | batch_normalize=1 430 | filters=512 431 | size=1 432 | stride=1 433 | pad=1 434 | activation=leaky 435 | [convolutional] 436 | batch_normalize=1 437 | filters=1024 438 | size=3 439 | stride=1 440 | pad=1 441 | activation=leaky 442 | [shortcut] 443 | from=-3 444 | activation=linear 445 | [convolutional] 446 | batch_normalize=1 447 | filters=512 448 | size=1 449 | stride=1 450 | pad=1 451 | activation=leaky 452 | [convolutional] 453 | batch_normalize=1 454 | filters=1024 455 | size=3 456 | stride=1 457 | pad=1 458 | activation=leaky 459 | [shortcut] 460 | from=-3 461 | activation=linear 462 | ###################### 463 | [convolutional] 464 | batch_normalize=1 465 | filters=512 466 | size=1 467 | stride=1 468 | pad=1 469 | activation=leaky 470 | [convolutional] 471 | batch_normalize=1 472 | size=3 473 | stride=1 474 | pad=1 475 | filters=1024 476 | activation=leaky 477 | [convolutional] 478 | batch_normalize=1 479 | filters=512 480 | size=1 481 | stride=1 482 | pad=1 483 | activation=leaky 484 | [convolutional] 485 | batch_normalize=1 486 | size=3 487 | stride=1 488 | pad=1 489 | filters=1024 490 | activation=leaky 491 | [convolutional] 492 | batch_normalize=1 493 | filters=512 494 | size=1 495 | stride=1 496 | pad=1 497 | activation=leaky 498 | [convolutional] 499 | batch_normalize=1 500 | size=3 501 | stride=1 502 | pad=1 503 | filters=1024 504 | activation=leaky 505 | [convolutional] 506 | size=1 507 | stride=1 508 | pad=1 509 | filters=21 510 | activation=linear 511 | [yolo] 512 | mask = 6,7,8 513 | anchors = 14,10, 10,17, 21,12, 16,16, 13,23, 24,16, 21,22, 28,27, 38,37 514 | classes=2 515 | num=9 516 | jitter=.3 517 | ignore_thresh = .7 518 | truth_thresh = 1 519 | random=1 520 | [route] 521 | layers = -4 522 | [convolutional] 523 | batch_normalize=1 524 | filters=256 525 | size=1 526 | stride=1 527 | pad=1 528 | activation=leaky 529 | [upsample] 530 | stride=2 531 | [route] 532 | layers = -1, 61 533 | [convolutional] 534 | batch_normalize=1 535 | filters=256 536 | size=1 537 | stride=1 538 | pad=1 539 | activation=leaky 540 | [convolutional] 541 | batch_normalize=1 542 | size=3 543 | stride=1 544 | pad=1 545 | filters=512 546 | activation=leaky 547 | [convolutional] 548 | batch_normalize=1 549 | filters=256 550 | size=1 551 | stride=1 552 | pad=1 553 | activation=leaky 554 | [convolutional] 555 | batch_normalize=1 556 | size=3 557 | stride=1 558 | pad=1 559 | filters=512 560 | activation=leaky 561 | [convolutional] 562 | batch_normalize=1 563 | filters=256 564 | size=1 565 | stride=1 566 | pad=1 567 | activation=leaky 568 | [convolutional] 569 | batch_normalize=1 570 | size=3 571 | stride=1 572 | pad=1 573 | filters=512 574 | activation=leaky 575 | [convolutional] 576 | size=1 577 | stride=1 578 | pad=1 579 | filters=21 580 | activation=linear 581 | [yolo] 582 | mask = 3,4,5 583 | anchors = 14,10, 10,17, 21,12, 16,16, 13,23, 24,16, 21,22, 28,27, 38,37 584 | classes=2 585 | num=9 586 | jitter=.3 587 | ignore_thresh = .7 588 | truth_thresh = 1 589 | random=1 590 | [route] 591 | layers = -4 592 | [convolutional] 593 | batch_normalize=1 594 | filters=128 595 | size=1 596 | stride=1 597 | pad=1 598 | activation=leaky 599 | [upsample] 600 | stride=2 601 | [route] 602 | layers = -1, 36 603 | [convolutional] 604 | batch_normalize=1 605 | filters=128 606 | size=1 607 | stride=1 608 | pad=1 609 | activation=leaky 610 | [convolutional] 611 | batch_normalize=1 612 | size=3 613 | stride=1 614 | pad=1 615 | filters=256 616 | activation=leaky 617 | [convolutional] 618 | batch_normalize=1 619 | filters=128 620 | size=1 621 | stride=1 622 | pad=1 623 | activation=leaky 624 | [convolutional] 625 | batch_normalize=1 626 | size=3 627 | stride=1 628 | pad=1 629 | filters=256 630 | activation=leaky 631 | [convolutional] 632 | batch_normalize=1 633 | filters=128 634 | size=1 635 | stride=1 636 | pad=1 637 | activation=leaky 638 | [convolutional] 639 | batch_normalize=1 640 | size=3 641 | stride=1 642 | pad=1 643 | filters=256 644 | activation=leaky 645 | [convolutional] 646 | size=1 647 | stride=1 648 | pad=1 649 | filters=21 650 | activation=linear 651 | [yolo] 652 | mask = 0,1,2 653 | anchors = 14,10, 10,17, 21,12, 16,16, 13,23, 24,16, 21,22, 28,27, 38,37 654 | classes=2 655 | num=9 656 | jitter=.3 657 | ignore_thresh = .7 658 | truth_thresh = 1 659 | random=1 660 | 661 | -------------------------------------------------------------------------------- /data/custom/classes.names: -------------------------------------------------------------------------------- 1 | car 2 | plane 3 | -------------------------------------------------------------------------------- /data/custom/images/0520/0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/custom/images/0520/0.jpg -------------------------------------------------------------------------------- /data/custom/images/0520/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/custom/images/0520/1.jpg -------------------------------------------------------------------------------- /data/custom/images/0520/10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/custom/images/0520/10.jpg -------------------------------------------------------------------------------- /data/custom/images/0520/100.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/custom/images/0520/100.jpg -------------------------------------------------------------------------------- /data/custom/images/0520/1000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/custom/images/0520/1000.jpg -------------------------------------------------------------------------------- /data/custom/labels/0520/0.txt: -------------------------------------------------------------------------------- 1 | 0 0.194140625 0.051593323216995446 0.02734375 0.07283763277693475 2 | 0 0.23828125 0.10773899848254932 0.021875000000000002 0.07587253414264036 3 | 0 0.25703125 0.12291350531107739 0.018750000000000003 0.07890743550834597 4 | 0 0.28046875 0.13201820940819423 0.0203125 0.07890743550834597 5 | 0 0.30312500000000003 0.14036418816388468 0.0171875 0.07738998482549317 6 | 0 0.32421875 0.15326251896813353 0.0171875 0.09711684370257967 7 | 0 0.34765625 0.16843702579666162 0.018750000000000003 0.07890743550834597 8 | 0 0.219140625 0.3490136570561457 0.02734375 0.07587253414264036 9 | 0 0.24570312500000002 0.3550834597875569 0.01953125 0.07587253414264036 10 | 0 0.2859375 0.37632776934749623 0.018750000000000003 0.08801213960546282 11 | 0 0.309375 0.38467374810318666 0.0203125 0.07132018209408195 12 | 0 0.3328125 0.3732928679817906 0.0203125 0.08801213960546282 13 | 0 0.509375 0.6843702579666161 0.045312500000000006 0.036418816388467376 14 | 0 0.515625 0.7314112291350532 0.0390625 0.033383915022761765 15 | 0 0.533203125 0.8042488619119879 0.03828125 0.048558421851289835 16 | 0 0.6566406250000001 0.779969650986343 0.041406250000000006 0.048558421851289835 17 | 0 0.7156250000000001 0.952959028831563 0.035937500000000004 0.051593323216995446 18 | 0 0.5535156250000001 0.8869499241274659 0.039843750000000004 0.04400606980273141 19 | 0 0.566015625 0.9264036418816389 0.03515625 0.04704097116843703 20 | -------------------------------------------------------------------------------- /data/custom/labels/0520/1.txt: -------------------------------------------------------------------------------- 1 | 0 0.0625 0.19575113808801214 0.021875000000000002 0.08497723823975721 2 | 0 0.095703125 0.22003034901365706 0.017968750000000002 0.06676783004552353 3 | 0 0.115625 0.23520485584218515 0.0171875 0.06069802731411229 4 | 0 0.1390625 0.24203338391502277 0.0125 0.0804248861911988 5 | 0 0.15781250000000002 0.2465857359635812 0.0203125 0.07435508345978756 6 | 0 0.183984375 0.24430955993930198 0.014843750000000001 0.08497723823975721 7 | 0 0.205078125 0.24430955993930198 0.017968750000000002 0.07283763277693475 8 | 0 0.226953125 0.26100151745068284 0.017968750000000002 0.07587253414264036 9 | 0 0.24609375 0.27314112291350534 0.015625 0.07890743550834597 10 | 0 0.26406250000000003 0.27541729893778455 0.018750000000000003 0.07435508345978756 11 | 0 0.286328125 0.27996965098634297 0.01640625 0.07132018209408195 12 | 0 0.30976562500000004 0.291350531107739 0.02109375 0.07890743550834597 13 | 0 0.33125000000000004 0.2898330804248862 0.0203125 0.08497723823975721 14 | 0 0.353125 0.2905918057663126 0.015625 0.06828528072837634 15 | 0 0.37578125 0.30121396054628224 0.015625 0.06525037936267071 16 | 0 0.39570312500000004 0.30349013657056145 0.017968750000000002 0.06980273141122914 17 | 0 0.416796875 0.31638846737481036 0.017968750000000002 0.07738998482549317 18 | 0 0.455078125 0.31942336874051597 0.01953125 0.07132018209408195 19 | 0 0.48125 0.3141122913505311 0.0203125 0.07890743550834597 20 | 0 0.5027343750000001 0.3285280728376328 0.01640625 0.0804248861911988 21 | 0 0.526953125 0.34066767830045525 0.017968750000000002 0.07738998482549317 22 | 0 0.6628906250000001 0.503793626707132 0.04609375 0.03945371775417299 23 | 0 0.653515625 0.5462822458270107 0.047656250000000004 0.054628224582701064 24 | 0 0.644140625 0.5880121396054628 0.05234375 0.04400606980273141 25 | 0 0.63828125 0.6365705614567527 0.043750000000000004 0.0409711684370258 26 | 0 0.6292968750000001 0.6783004552352049 0.03828125 0.036418816388467376 27 | 0 0.6195312500000001 0.7169954476479514 0.040625 0.0409711684370258 28 | 0 0.590234375 0.7814871016691958 0.04609375 0.06676783004552353 29 | 0 0.7625000000000001 0.5462822458270107 0.053125000000000006 0.042488619119878605 30 | 0 0.766796875 0.5963581183611533 0.04453125 0.036418816388467376 31 | 0 0.762890625 0.6380880121396055 0.04609375 0.05311077389984826 32 | 0 0.74765625 0.7207890743550834 0.05 0.042488619119878605 33 | 0 0.739453125 0.7716236722306525 0.04296875 0.05311077389984826 34 | 0 0.94140625 0.24279210925644917 0.025 0.07890743550834597 35 | 0 0.9671875000000001 0.23520485584218515 0.025 0.07283763277693475 36 | -------------------------------------------------------------------------------- /data/custom/labels/0520/10.txt: -------------------------------------------------------------------------------- 1 | 0 0.041406250000000006 0.7822458270106222 0.028125 0.0804248861911988 2 | 0 0.07265625 0.7496206373292869 0.026562500000000003 0.06980273141122914 3 | 0 0.08984375 0.7382397572078908 0.025 0.07435508345978756 4 | 0 0.11093750000000001 0.720030349013657 0.025 0.07132018209408195 5 | 0 0.1265625 0.701062215477997 0.0203125 0.07283763277693475 6 | 0 0.143359375 0.6623672230652504 0.02890625 0.08952959028831563 7 | 0 0.16523437500000002 0.6471927162367224 0.02890625 0.08649468892261002 8 | 0 0.178125 0.6282245827010622 0.026562500000000003 0.07587253414264036 9 | 0 0.619140625 0.5773899848254932 0.03671875 0.05614567526555387 10 | 0 0.61875 0.49013657056145676 0.0390625 0.036418816388467376 11 | 0 0.7269531250000001 0.3907435508345979 0.03828125 0.04704097116843703 12 | 0 0.71484375 0.34294385432473445 0.0390625 0.048558421851289835 13 | 0 0.70078125 0.2670713201820941 0.0390625 0.042488619119878605 14 | 0 0.6890625 0.2268588770864947 0.035937500000000004 0.03490136570561457 15 | 0 0.669921875 0.19802731411229135 0.04609375 0.05918057663125949 16 | 0 0.581640625 0.27996965098634297 0.041406250000000006 0.04400606980273141 17 | 0 0.585546875 0.3330804248861912 0.04453125 0.0409711684370258 18 | 0 0.59765625 0.37632776934749623 0.045312500000000006 0.033383915022761765 19 | 0 0.606640625 0.40364188163884673 0.041406250000000006 0.033383915022761765 20 | 0 0.61328125 0.456752655538695 0.037500000000000006 0.042488619119878605 21 | 0 0.42812500000000003 0.3254931714719272 0.028125 0.07132018209408195 22 | 0 0.408984375 0.3490136570561457 0.02734375 0.06980273141122914 23 | 0 0.388671875 0.3672230652503794 0.025781250000000002 0.07283763277693475 24 | 0 0.37265625 0.3952959028831563 0.03125 0.07738998482549317 25 | 0 0.34140625 0.4355083459787557 0.028125 0.06676783004552353 26 | 0 0.32148437500000004 0.45220030349013657 0.02109375 0.06069802731411229 27 | 0 0.30585937500000004 0.48254931714719274 0.02890625 0.07283763277693475 28 | 0 0.283984375 0.503793626707132 0.02109375 0.0637329286798179 29 | 0 0.26875 0.5265553869499241 0.025 0.07890743550834597 30 | 0 0.253125 0.5462822458270107 0.029687500000000002 0.0819423368740516 31 | 0 0.23203125000000002 0.5728376327769348 0.028125 0.07435508345978756 32 | 0 0.2171875 0.5925644916540213 0.025 0.07435508345978756 33 | 0 0.19687500000000002 0.6092564491654021 0.028125 0.07435508345978756 34 | -------------------------------------------------------------------------------- /data/custom/labels/0520/100.txt: -------------------------------------------------------------------------------- 1 | 0 0.07890625000000001 0.27845220030349016 0.043750000000000004 0.09256449165402125 2 | 0 0.09062500000000001 0.8118361153262519 0.0390625 0.06676783004552353 3 | 0 0.521875 0.8125948406676783 0.0859375 0.11987860394537178 4 | 0 0.519140625 0.7109256449165402 0.039843750000000004 0.07738998482549317 5 | 0 0.60703125 0.33915022761760244 0.040625 0.0622154779969651 6 | 0 0.63984375 0.19726858877086495 0.05 0.09408194233687406 7 | 0 0.54453125 0.13732928679817907 0.05 0.08649468892261002 8 | 0 0.3921875 0.42412746585735966 0.0390625 0.07132018209408195 9 | 0 0.284375 0.6213960546282246 0.035937500000000004 0.0622154779969651 10 | 0 0.416015625 0.2867981790591806 0.04609375 0.07283763277693475 11 | 0 0.44609375 0.15933232169954478 0.037500000000000006 0.07890743550834597 12 | -------------------------------------------------------------------------------- /data/custom/labels/0520/1000.txt: -------------------------------------------------------------------------------- 1 | 1 0.24726562500000002 0.17905918057663125 0.09765625 0.1456752655538695 2 | 1 0.25390625 0.370257966616085 0.08125 0.13353566009104706 3 | 1 0.283203125 0.7511380880121397 0.09765625 0.15477996965098634 4 | 1 0.628515625 0.8543247344461306 0.11171875 0.15477996965098634 5 | 1 0.6062500000000001 0.3103186646433991 0.09062500000000001 0.19575113808801214 6 | 1 0.714453125 0.27541729893778455 0.08359375000000001 0.20182094081942337 7 | 1 0.804296875 0.24279210925644917 0.08203125 0.18816388467374812 8 | -------------------------------------------------------------------------------- /data/custom/train.txt: -------------------------------------------------------------------------------- 1 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1400.jpg 2 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/167.jpg 3 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1388.jpg 4 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/213.jpg 5 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/114.jpg 6 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/15.jpg 7 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1265.jpg 8 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1132.jpg 9 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/122.jpg 10 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/108.jpg 11 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1218.jpg 12 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/131.jpg 13 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1302.jpg 14 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/170.jpg 15 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1012.jpg 16 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1089.jpg 17 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1461.jpg 18 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/19.jpg 19 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/362.jpg 20 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1061.jpg 21 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1434.jpg 22 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1281.jpg 23 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1256.jpg 24 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1498.jpg 25 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1026.jpg 26 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1368.jpg 27 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1116.jpg 28 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1307.jpg 29 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/109.jpg 30 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/264.jpg 31 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1142.jpg 32 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/16.jpg 33 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1292.jpg 34 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1157.jpg 35 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/267.jpg 36 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/118.jpg 37 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1174.jpg 38 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1238.jpg 39 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1207.jpg 40 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/208.jpg 41 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/253.jpg 42 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1105.jpg 43 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1139.jpg 44 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1131.jpg 45 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1096.jpg 46 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/306.jpg 47 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1107.jpg 48 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/369.jpg 49 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1052.jpg 50 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/380.jpg 51 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1474.jpg 52 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/390.jpg 53 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/345.jpg 54 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1289.jpg 55 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/142.jpg 56 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/248.jpg 57 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/371.jpg 58 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1253.jpg 59 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/318.jpg 60 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1439.jpg 61 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1338.jpg 62 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1370.jpg 63 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1140.jpg 64 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1070.jpg 65 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1312.jpg 66 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/285.jpg 67 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1330.jpg 68 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1071.jpg 69 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1059.jpg 70 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1275.jpg 71 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/103.jpg 72 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1086.jpg 73 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1004.jpg 74 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/136.jpg 75 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/137.jpg 76 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/357.jpg 77 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1347.jpg 78 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1421.jpg 79 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/266.jpg 80 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/373.jpg 81 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1100.jpg 82 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1394.jpg 83 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1489.jpg 84 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1175.jpg 85 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1337.jpg 86 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1202.jpg 87 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1251.jpg 88 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1483.jpg 89 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1128.jpg 90 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1325.jpg 91 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1203.jpg 92 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/335.jpg 93 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/14.jpg 94 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1298.jpg 95 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/230.jpg 96 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1311.jpg 97 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/368.jpg 98 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1469.jpg 99 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1115.jpg 100 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1154.jpg 101 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/332.jpg 102 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/283.jpg 103 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1019.jpg 104 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1117.jpg 105 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1456.jpg 106 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/214.jpg 107 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/317.jpg 108 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1015.jpg 109 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/290.jpg 110 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1162.jpg 111 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/112.jpg 112 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/190.jpg 113 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/196.jpg 114 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/152.jpg 115 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/319.jpg 116 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1189.jpg 117 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/219.jpg 118 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/104.jpg 119 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/26.jpg 120 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1076.jpg 121 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1279.jpg 122 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/229.jpg 123 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1206.jpg 124 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/255.jpg 125 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1344.jpg 126 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1020.jpg 127 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1177.jpg 128 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1309.jpg 129 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1428.jpg 130 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/191.jpg 131 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/117.jpg 132 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/102.jpg 133 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1382.jpg 134 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/361.jpg 135 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1126.jpg 136 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1326.jpg 137 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/151.jpg 138 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/17.jpg 139 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1226.jpg 140 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1294.jpg 141 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1379.jpg 142 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/302.jpg 143 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/313.jpg 144 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1462.jpg 145 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/183.jpg 146 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/257.jpg 147 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1221.jpg 148 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1411.jpg 149 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1304.jpg 150 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/398.jpg 151 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1263.jpg 152 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1451.jpg 153 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1446.jpg 154 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/325.jpg 155 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1499.jpg 156 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1127.jpg 157 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1227.jpg 158 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/206.jpg 159 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/263.jpg 160 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/28.jpg 161 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1284.jpg 162 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1129.jpg 163 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/242.jpg 164 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1152.jpg 165 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/281.jpg 166 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1445.jpg 167 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1463.jpg 168 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1267.jpg 169 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/134.jpg 170 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/338.jpg 171 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1405.jpg 172 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1479.jpg 173 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1297.jpg 174 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/128.jpg 175 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1195.jpg 176 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1079.jpg 177 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/260.jpg 178 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1001.jpg 179 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/272.jpg 180 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/175.jpg 181 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1181.jpg 182 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1300.jpg 183 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1159.jpg 184 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1176.jpg 185 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/222.jpg 186 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/119.jpg 187 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/181.jpg 188 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1390.jpg 189 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/327.jpg 190 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1453.jpg 191 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1062.jpg 192 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1250.jpg 193 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/312.jpg 194 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/252.jpg 195 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/374.jpg 196 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/310.jpg 197 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1258.jpg 198 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1391.jpg 199 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1242.jpg 200 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1125.jpg 201 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/350.jpg 202 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1028.jpg 203 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/176.jpg 204 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/155.jpg 205 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/223.jpg 206 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1305.jpg 207 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1003.jpg 208 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1331.jpg 209 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1437.jpg 210 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1342.jpg 211 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/305.jpg 212 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1009.jpg 213 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1121.jpg 214 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/221.jpg 215 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/354.jpg 216 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/364.jpg 217 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/217.jpg 218 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/295.jpg 219 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1047.jpg 220 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/383.jpg 221 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/297.jpg 222 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/100.jpg 223 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/164.jpg 224 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1119.jpg 225 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/115.jpg 226 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1199.jpg 227 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1011.jpg 228 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/392.jpg 229 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1441.jpg 230 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/210.jpg 231 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/378.jpg 232 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1488.jpg 233 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1470.jpg 234 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/356.jpg 235 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1190.jpg 236 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/379.jpg 237 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/250.jpg 238 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1373.jpg 239 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1017.jpg 240 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/199.jpg 241 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1286.jpg 242 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1324.jpg 243 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1144.jpg 244 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1422.jpg 245 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1069.jpg 246 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/228.jpg 247 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/233.jpg 248 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/304.jpg 249 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/341.jpg 250 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1172.jpg 251 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1401.jpg 252 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1363.jpg 253 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1333.jpg 254 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/165.jpg 255 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1180.jpg 256 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/172.jpg 257 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/375.jpg 258 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/323.jpg 259 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/337.jpg 260 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1137.jpg 261 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1234.jpg 262 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1108.jpg 263 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1387.jpg 264 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1087.jpg 265 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/224.jpg 266 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/287.jpg 267 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1321.jpg 268 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/111.jpg 269 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1266.jpg 270 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1442.jpg 271 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1239.jpg 272 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1435.jpg 273 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1228.jpg 274 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1416.jpg 275 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/286.jpg 276 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1148.jpg 277 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/4.jpg 278 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1385.jpg 279 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1383.jpg 280 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1485.jpg 281 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1122.jpg 282 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1073.jpg 283 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1492.jpg 284 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/179.jpg 285 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1301.jpg 286 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1197.jpg 287 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/186.jpg 288 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1039.jpg 289 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1476.jpg 290 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/33.jpg 291 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1340.jpg 292 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1094.jpg 293 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1486.jpg 294 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/140.jpg 295 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1104.jpg 296 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1494.jpg 297 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/132.jpg 298 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1271.jpg 299 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/32.jpg 300 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1029.jpg 301 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/127.jpg 302 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1098.jpg 303 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/365.jpg 304 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1223.jpg 305 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1212.jpg 306 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/147.jpg 307 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1259.jpg 308 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1088.jpg 309 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1358.jpg 310 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1274.jpg 311 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1468.jpg 312 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1187.jpg 313 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1319.jpg 314 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/353.jpg 315 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1293.jpg 316 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/0.jpg 317 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1257.jpg 318 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/349.jpg 319 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1285.jpg 320 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1134.jpg 321 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/298.jpg 322 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1156.jpg 323 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1092.jpg 324 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1455.jpg 325 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1389.jpg 326 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/37.jpg 327 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/25.jpg 328 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/343.jpg 329 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1237.jpg 330 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/352.jpg 331 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1247.jpg 332 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1406.jpg 333 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/113.jpg 334 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1327.jpg 335 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1007.jpg 336 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1214.jpg 337 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1185.jpg 338 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1335.jpg 339 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/166.jpg 340 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1232.jpg 341 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1472.jpg 342 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1487.jpg 343 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1045.jpg 344 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1248.jpg 345 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/135.jpg 346 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1436.jpg 347 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1481.jpg 348 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/107.jpg 349 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1303.jpg 350 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/377.jpg 351 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1158.jpg 352 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/159.jpg 353 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1136.jpg 354 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1217.jpg 355 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/311.jpg 356 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1188.jpg 357 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1041.jpg 358 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1353.jpg 359 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/13.jpg 360 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/271.jpg 361 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1264.jpg 362 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1110.jpg 363 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1067.jpg 364 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1219.jpg 365 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/275.jpg 366 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/278.jpg 367 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/31.jpg 368 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1191.jpg 369 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/2.jpg 370 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1186.jpg 371 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1008.jpg 372 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1194.jpg 373 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1245.jpg 374 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1310.jpg 375 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1380.jpg 376 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/243.jpg 377 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/241.jpg 378 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1291.jpg 379 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/101.jpg 380 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1399.jpg 381 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1183.jpg 382 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/366.jpg 383 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/150.jpg 384 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1036.jpg 385 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1166.jpg 386 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1396.jpg 387 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1204.jpg 388 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1350.jpg 389 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1113.jpg 390 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1083.jpg 391 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/321.jpg 392 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1457.jpg 393 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1458.jpg 394 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/258.jpg 395 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/105.jpg 396 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/212.jpg 397 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/240.jpg 398 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/203.jpg 399 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/293.jpg 400 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1215.jpg 401 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1005.jpg 402 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/124.jpg 403 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1046.jpg 404 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1146.jpg 405 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/3.jpg 406 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/187.jpg 407 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/320.jpg 408 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1160.jpg 409 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1101.jpg 410 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/163.jpg 411 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1229.jpg 412 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/178.jpg 413 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/133.jpg 414 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1415.jpg 415 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1252.jpg 416 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1106.jpg 417 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1051.jpg 418 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/23.jpg 419 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1262.jpg 420 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1255.jpg 421 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1064.jpg 422 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1179.jpg 423 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1459.jpg 424 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/145.jpg 425 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1287.jpg 426 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/294.jpg 427 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/185.jpg 428 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1.jpg 429 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/386.jpg 430 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1147.jpg 431 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1014.jpg 432 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/232.jpg 433 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/209.jpg 434 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1165.jpg 435 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1053.jpg 436 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1407.jpg 437 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1231.jpg 438 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1426.jpg 439 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1392.jpg 440 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/259.jpg 441 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1272.jpg 442 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/202.jpg 443 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1241.jpg 444 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/351.jpg 445 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1348.jpg 446 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1277.jpg 447 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1124.jpg 448 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/198.jpg 449 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/227.jpg 450 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/35.jpg 451 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1240.jpg 452 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1376.jpg 453 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1349.jpg 454 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1167.jpg 455 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1184.jpg 456 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/347.jpg 457 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1006.jpg 458 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1022.jpg 459 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1063.jpg 460 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1072.jpg 461 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1268.jpg 462 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/125.jpg 463 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/359.jpg 464 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1408.jpg 465 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1260.jpg 466 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1443.jpg 467 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/139.jpg 468 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/161.jpg 469 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/330.jpg 470 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/171.jpg 471 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/106.jpg 472 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/395.jpg 473 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/376.jpg 474 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/12.jpg 475 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1044.jpg 476 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1473.jpg 477 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1109.jpg 478 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1080.jpg 479 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1065.jpg 480 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/334.jpg 481 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/231.jpg 482 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/358.jpg 483 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1357.jpg 484 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1276.jpg 485 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1085.jpg 486 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1066.jpg 487 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/316.jpg 488 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1118.jpg 489 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1138.jpg 490 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/324.jpg 491 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1050.jpg 492 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1372.jpg 493 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/211.jpg 494 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/174.jpg 495 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1329.jpg 496 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1432.jpg 497 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/291.jpg 498 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/169.jpg 499 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1283.jpg 500 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1339.jpg 501 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1341.jpg 502 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/315.jpg 503 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1418.jpg 504 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/194.jpg 505 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1130.jpg 506 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1438.jpg 507 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/182.jpg 508 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1243.jpg 509 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/331.jpg 510 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1038.jpg 511 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/11.jpg 512 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1465.jpg 513 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1040.jpg 514 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1102.jpg 515 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/342.jpg 516 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1484.jpg 517 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/244.jpg 518 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/282.jpg 519 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1057.jpg 520 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/296.jpg 521 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1308.jpg 522 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/399.jpg 523 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1135.jpg 524 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/123.jpg 525 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1123.jpg 526 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1149.jpg 527 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1043.jpg 528 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/396.jpg 529 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1025.jpg 530 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/238.jpg 531 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1496.jpg 532 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1450.jpg 533 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1314.jpg 534 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1423.jpg 535 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/237.jpg 536 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1440.jpg 537 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1235.jpg 538 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1153.jpg 539 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1318.jpg 540 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1495.jpg 541 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/204.jpg 542 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1150.jpg 543 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/138.jpg 544 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1328.jpg 545 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/249.jpg 546 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/216.jpg 547 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1269.jpg 548 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/246.jpg 549 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/226.jpg 550 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1317.jpg 551 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/197.jpg 552 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1424.jpg 553 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1084.jpg 554 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1367.jpg 555 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1182.jpg 556 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/110.jpg 557 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1120.jpg 558 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/273.jpg 559 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/339.jpg 560 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1093.jpg 561 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1225.jpg 562 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/192.jpg 563 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1477.jpg 564 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/153.jpg 565 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/340.jpg 566 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1444.jpg 567 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1346.jpg 568 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1048.jpg 569 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/30.jpg 570 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/126.jpg 571 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/355.jpg 572 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/189.jpg 573 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1449.jpg 574 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/274.jpg 575 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1475.jpg 576 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1403.jpg 577 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1161.jpg 578 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/388.jpg 579 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1081.jpg 580 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1460.jpg 581 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1493.jpg 582 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1366.jpg 583 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/39.jpg 584 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1090.jpg 585 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/143.jpg 586 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/234.jpg 587 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1369.jpg 588 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/158.jpg 589 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1306.jpg 590 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/205.jpg 591 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1409.jpg 592 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1077.jpg 593 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1295.jpg 594 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/382.jpg 595 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1198.jpg 596 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/391.jpg 597 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1178.jpg 598 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/309.jpg 599 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1141.jpg 600 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/188.jpg 601 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1419.jpg 602 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1384.jpg 603 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/314.jpg 604 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/288.jpg 605 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/141.jpg 606 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1433.jpg 607 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/261.jpg 608 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1343.jpg 609 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1244.jpg 610 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1377.jpg 611 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1035.jpg 612 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1099.jpg 613 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/299.jpg 614 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1171.jpg 615 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1480.jpg 616 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/346.jpg 617 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/284.jpg 618 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1193.jpg 619 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/225.jpg 620 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1114.jpg 621 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1296.jpg 622 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1155.jpg 623 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1429.jpg 624 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1208.jpg 625 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1371.jpg 626 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1230.jpg 627 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1200.jpg 628 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1398.jpg 629 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/247.jpg 630 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1273.jpg 631 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/18.jpg 632 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1220.jpg 633 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1490.jpg 634 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/154.jpg 635 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1320.jpg 636 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/239.jpg 637 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1055.jpg 638 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1097.jpg 639 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/160.jpg 640 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1111.jpg 641 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1010.jpg 642 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1254.jpg 643 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1478.jpg 644 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/393.jpg 645 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/36.jpg 646 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1049.jpg 647 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/254.jpg 648 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/322.jpg 649 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/180.jpg 650 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1355.jpg 651 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1163.jpg 652 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/279.jpg 653 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1023.jpg 654 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/40.jpg 655 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/262.jpg 656 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1143.jpg 657 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1270.jpg 658 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1397.jpg 659 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1133.jpg 660 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/269.jpg 661 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1482.jpg 662 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/21.jpg 663 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/280.jpg 664 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1249.jpg 665 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/276.jpg 666 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1091.jpg 667 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/149.jpg 668 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/397.jpg 669 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1278.jpg 670 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1412.jpg 671 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1074.jpg 672 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1024.jpg 673 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1356.jpg 674 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/389.jpg 675 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1095.jpg 676 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1332.jpg 677 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1467.jpg 678 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1417.jpg 679 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1261.jpg 680 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/308.jpg 681 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/333.jpg 682 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/381.jpg 683 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/360.jpg 684 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1471.jpg 685 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1288.jpg 686 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1414.jpg 687 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/184.jpg 688 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1075.jpg 689 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/201.jpg 690 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/146.jpg 691 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/348.jpg 692 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1491.jpg 693 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/270.jpg 694 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1497.jpg 695 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/251.jpg 696 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1169.jpg 697 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/235.jpg 698 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/207.jpg 699 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1374.jpg 700 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/307.jpg 701 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1196.jpg 702 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1031.jpg 703 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1378.jpg 704 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/144.jpg 705 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1351.jpg 706 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1361.jpg 707 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1410.jpg 708 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/24.jpg 709 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/300.jpg 710 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1078.jpg 711 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/220.jpg 712 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1042.jpg 713 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1060.jpg 714 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1056.jpg 715 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/177.jpg 716 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/27.jpg 717 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1448.jpg 718 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/10.jpg 719 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1164.jpg 720 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1336.jpg 721 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1224.jpg 722 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1209.jpg 723 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1334.jpg 724 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/156.jpg 725 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1030.jpg 726 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/173.jpg 727 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1013.jpg 728 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/29.jpg 729 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/20.jpg 730 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1452.jpg 731 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1427.jpg 732 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1170.jpg 733 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1282.jpg 734 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1082.jpg 735 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1027.jpg 736 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1233.jpg 737 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/387.jpg 738 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/256.jpg 739 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1068.jpg 740 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1112.jpg 741 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1151.jpg 742 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1431.jpg 743 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1375.jpg 744 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1447.jpg 745 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1002.jpg 746 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1381.jpg 747 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1316.jpg 748 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/129.jpg 749 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1395.jpg 750 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1213.jpg 751 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1420.jpg 752 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/121.jpg 753 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1362.jpg 754 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1145.jpg 755 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1280.jpg 756 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1222.jpg 757 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1034.jpg 758 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/303.jpg 759 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/116.jpg 760 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1246.jpg 761 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/301.jpg 762 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1404.jpg 763 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/344.jpg 764 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/394.jpg 765 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/367.jpg 766 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/326.jpg 767 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1430.jpg 768 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1033.jpg 769 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/268.jpg 770 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/236.jpg 771 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1037.jpg 772 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1393.jpg 773 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/157.jpg 774 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1364.jpg 775 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1466.jpg 776 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1000.jpg 777 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1313.jpg 778 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1032.jpg 779 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/328.jpg 780 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1386.jpg -------------------------------------------------------------------------------- /data/custom/valid.txt: -------------------------------------------------------------------------------- 1 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1299.jpg 2 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1315.jpg 3 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/289.jpg 4 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1464.jpg 5 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/329.jpg 6 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1402.jpg 7 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/193.jpg 8 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1018.jpg 9 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/277.jpg 10 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1201.jpg 11 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/22.jpg 12 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1103.jpg 13 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1168.jpg 14 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1058.jpg 15 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/168.jpg 16 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1016.jpg 17 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1192.jpg 18 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/120.jpg 19 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1359.jpg 20 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/218.jpg 21 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1322.jpg 22 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1500.jpg 23 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/265.jpg 24 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1236.jpg 25 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/384.jpg 26 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1205.jpg 27 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1365.jpg 28 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1360.jpg 29 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/195.jpg 30 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/215.jpg 31 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1290.jpg 32 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/148.jpg 33 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/245.jpg 34 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/200.jpg 35 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/162.jpg 36 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1323.jpg 37 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1345.jpg 38 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/370.jpg 39 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/336.jpg 40 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1211.jpg 41 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1354.jpg 42 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1054.jpg 43 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/130.jpg 44 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1173.jpg 45 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/372.jpg 46 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1454.jpg 47 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1425.jpg 48 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1210.jpg 49 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/363.jpg 50 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1021.jpg 51 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1216.jpg 52 | /home/zigangzhao/zzg_project/PyTorch-YOLOv3/data/custom/images/0520/1413.jpg -------------------------------------------------------------------------------- /data/owndata/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1.jpg -------------------------------------------------------------------------------- /data/owndata/1010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1010.jpg -------------------------------------------------------------------------------- /data/owndata/1027.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1027.jpg -------------------------------------------------------------------------------- /data/owndata/104.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/104.jpg -------------------------------------------------------------------------------- /data/owndata/1041.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1041.jpg -------------------------------------------------------------------------------- /data/owndata/1048.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1048.jpg -------------------------------------------------------------------------------- /data/owndata/1063.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1063.jpg -------------------------------------------------------------------------------- /data/owndata/120.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/120.jpg -------------------------------------------------------------------------------- /data/owndata/1345.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/1345.jpg -------------------------------------------------------------------------------- /data/owndata/200.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/200.jpg -------------------------------------------------------------------------------- /data/owndata/22.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/22.jpg -------------------------------------------------------------------------------- /data/owndata/29.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/data/owndata/29.jpg -------------------------------------------------------------------------------- /detect_owndata.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from models import * 4 | from utils.utils import * 5 | from utils.datasets import * 6 | 7 | import os 8 | import cv2 9 | import sys 10 | import time 11 | import datetime 12 | import argparse 13 | 14 | from PIL import Image 15 | 16 | import torch 17 | from torch.utils.data import DataLoader 18 | from torchvision import datasets 19 | from torch.autograd import Variable 20 | from utils.datasets import * 21 | 22 | import matplotlib.pyplot as plt 23 | import matplotlib.patches as patches 24 | from matplotlib.ticker import NullLocator 25 | 26 | if __name__ == "__main__": 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument("--image_folder", type=str, default="data/owndata", help="path to dataset") 29 | parser.add_argument("--model_def", type=str, default="config/yolov3-custom.cfg", help="path to model definition file") 30 | parser.add_argument("--weights_path", type=str, default="checkpoints/yolov3_ckpt_195.pth", help="path to weights file") 31 | parser.add_argument("--class_path", type=str, default="data/custom/classes.names", help="path to class label file") 32 | parser.add_argument("--data_config", type=str, default="config/custom.data", help="path to data config file") 33 | parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold") 34 | parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression") 35 | parser.add_argument("--batch_size", type=int, default=1, help="size of the batches") 36 | parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation") 37 | parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension") 38 | parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model") 39 | opt = parser.parse_args() 40 | print(opt) 41 | 42 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 43 | 44 | os.makedirs("output", exist_ok=True) 45 | 46 | # Set up model 47 | model = Darknet(opt.model_def, img_size=opt.img_size).to(device) 48 | 49 | if opt.weights_path.endswith(".weights"): 50 | # Load darknet weights 51 | model.load_darknet_weights(opt.weights_path) 52 | else: 53 | # Load checkpoint weights 54 | model.load_state_dict(torch.load(opt.weights_path)) 55 | 56 | model.eval() # Set in evaluation mode 57 | 58 | dataloader = DataLoader( 59 | ImageFolder(opt.image_folder, img_size=opt.img_size), 60 | batch_size=opt.batch_size, 61 | shuffle=False, 62 | num_workers=opt.n_cpu, 63 | ) 64 | # data_config = parse_data_config(opt.data_config) 65 | # test_path = data_config["valid"] 66 | # dataset = ListDataset(test_path, augment=False) 67 | # dataloader = torch.utils.data.DataLoader( 68 | # dataset, 69 | # batch_size=opt.batch_size, 70 | # shuffle=False, 71 | # num_workers=opt.n_cpu, 72 | # ) 73 | 74 | 75 | 76 | 77 | classes = load_classes(opt.class_path) # Extracts class labels from file 78 | 79 | Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor 80 | 81 | imgs = [] # Stores image paths 82 | img_detections = [] # Stores detections for each image index 83 | 84 | print("\nPerforming object detection:") 85 | prev_time = time.time() 86 | for batch_i, (img_paths, input_imgs) in enumerate(dataloader): 87 | # Configure input 88 | 89 | input_imgs = Variable(input_imgs.type(Tensor)) 90 | 91 | # Get detections 92 | with torch.no_grad(): 93 | # input_imgs = input_imgs[:3] 94 | a = input_imgs.cpu().numpy() 95 | print(a.shape) 96 | detections = model(input_imgs) 97 | detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres) 98 | 99 | # Log progress 100 | current_time = time.time() 101 | inference_time = datetime.timedelta(seconds=current_time - prev_time) 102 | prev_time = current_time 103 | print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time)) 104 | 105 | # Save image and detections 106 | imgs.extend(img_paths) 107 | img_detections.extend(detections) 108 | 109 | # Bounding-box colors 110 | cmap = plt.get_cmap("tab20b") 111 | colors = [cmap(i) for i in np.linspace(0, 1, 20)] 112 | 113 | print("\nSaving images:") 114 | # Iterate through images and save plot of detections 115 | for img_i, (path, detections) in enumerate(zip(imgs, img_detections)): 116 | 117 | print("(%d) Image: '%s'" % (img_i, path)) 118 | 119 | # Create plot 120 | img = np.array(Image.open(path)) 121 | # plt.figure() 122 | # plt.imshow(img) 123 | # plt.show() 124 | fig, ax = plt.subplots(1) 125 | ax.imshow(img) 126 | 127 | # Draw bounding boxes and labels of detections 128 | if detections is not None: 129 | # Rescale boxes to original image 130 | detections = rescale_boxes(detections, opt.img_size, img.shape[:2]) 131 | unique_labels = detections[:, -1].cpu().unique() 132 | n_cls_preds = len(unique_labels) 133 | bbox_colors = random.sample(colors, n_cls_preds) 134 | for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections: 135 | 136 | print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item())) 137 | 138 | box_w = x2 - x1 139 | box_h = y2 - y1 140 | 141 | color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])] 142 | # Create a Rectangle patch 143 | bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none") 144 | # Add the bbox to the plot 145 | ax.add_patch(bbox) 146 | # Add label 147 | plt.text( 148 | x1, 149 | y1, 150 | s=classes[int(cls_pred)], 151 | color="white", 152 | verticalalignment="top", 153 | bbox={"color": color, "pad": 0}, 154 | ) 155 | # plt.figure() 156 | # plt.imshow(img) 157 | # plt.show() 158 | 159 | # Save generated image with detections 160 | plt.axis("off") 161 | plt.gca().xaxis.set_major_locator(NullLocator()) 162 | plt.gca().yaxis.set_major_locator(NullLocator()) 163 | filename = path.split("/")[-1].split(".")[0] 164 | # cv2.imwrite("output/{}.jpg".format(filename), img) 165 | plt.savefig(f"output1/{filename}.png", bbox_inches="tight", pad_inches=0.0) 166 | plt.close() 167 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from torch.autograd import Variable 7 | import numpy as np 8 | 9 | from utils.parse_config import * 10 | from utils.utils import build_targets, to_cpu, non_max_suppression 11 | 12 | import matplotlib.pyplot as plt 13 | import matplotlib.patches as patches 14 | 15 | 16 | def create_modules(module_defs): 17 | """ 18 | Constructs module list of layer blocks from module configuration in module_defs 19 | """ 20 | hyperparams = module_defs.pop(0) 21 | output_filters = [int(hyperparams["channels"])] 22 | module_list = nn.ModuleList() 23 | for module_i, module_def in enumerate(module_defs): 24 | modules = nn.Sequential() 25 | 26 | if module_def["type"] == "convolutional": 27 | bn = int(module_def["batch_normalize"]) 28 | filters = int(module_def["filters"]) 29 | kernel_size = int(module_def["size"]) 30 | pad = (kernel_size - 1) // 2 31 | modules.add_module( 32 | f"conv_{module_i}", 33 | nn.Conv2d( 34 | in_channels=output_filters[-1], 35 | out_channels=filters, 36 | kernel_size=kernel_size, 37 | stride=int(module_def["stride"]), 38 | padding=pad, 39 | bias=not bn, 40 | ), 41 | ) 42 | if bn: 43 | modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5)) 44 | if module_def["activation"] == "leaky": 45 | modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1)) 46 | 47 | elif module_def["type"] == "maxpool": 48 | kernel_size = int(module_def["size"]) 49 | stride = int(module_def["stride"]) 50 | if kernel_size == 2 and stride == 1: 51 | modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1))) 52 | maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2)) 53 | modules.add_module(f"maxpool_{module_i}", maxpool) 54 | 55 | elif module_def["type"] == "upsample": 56 | upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest") 57 | modules.add_module(f"upsample_{module_i}", upsample) 58 | 59 | elif module_def["type"] == "route": 60 | layers = [int(x) for x in module_def["layers"].split(",")] 61 | filters = sum([output_filters[1:][i] for i in layers]) 62 | modules.add_module(f"route_{module_i}", EmptyLayer()) 63 | 64 | elif module_def["type"] == "shortcut": 65 | filters = output_filters[1:][int(module_def["from"])] 66 | modules.add_module(f"shortcut_{module_i}", EmptyLayer()) 67 | 68 | elif module_def["type"] == "yolo": 69 | anchor_idxs = [int(x) for x in module_def["mask"].split(",")] 70 | # Extract anchors 71 | anchors = [int(x) for x in module_def["anchors"].split(",")] 72 | anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)] 73 | anchors = [anchors[i] for i in anchor_idxs] 74 | num_classes = int(module_def["classes"]) 75 | img_size = int(hyperparams["height"]) 76 | # Define detection layer 77 | yolo_layer = YOLOLayer(anchors, num_classes, img_size) 78 | modules.add_module(f"yolo_{module_i}", yolo_layer) 79 | # Register module list and number of output filters 80 | module_list.append(modules) 81 | output_filters.append(filters) 82 | 83 | return hyperparams, module_list 84 | 85 | 86 | class Upsample(nn.Module): 87 | """ nn.Upsample is deprecated """ 88 | 89 | def __init__(self, scale_factor, mode="nearest"): 90 | super(Upsample, self).__init__() 91 | self.scale_factor = scale_factor 92 | self.mode = mode 93 | 94 | def forward(self, x): 95 | x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode) 96 | return x 97 | 98 | 99 | class EmptyLayer(nn.Module): 100 | """Placeholder for 'route' and 'shortcut' layers""" 101 | 102 | def __init__(self): 103 | super(EmptyLayer, self).__init__() 104 | 105 | 106 | class YOLOLayer(nn.Module): 107 | """Detection layer""" 108 | 109 | def __init__(self, anchors, num_classes, img_dim=416): 110 | super(YOLOLayer, self).__init__() 111 | self.anchors = anchors 112 | self.num_anchors = len(anchors) 113 | self.num_classes = num_classes 114 | self.ignore_thres = 0.5 115 | self.mse_loss = nn.MSELoss() 116 | self.bce_loss = nn.BCELoss() 117 | self.obj_scale = 1 118 | self.noobj_scale = 100 119 | self.metrics = {} 120 | self.img_dim = img_dim 121 | self.grid_size = 0 # grid size 122 | 123 | def compute_grid_offsets(self, grid_size, cuda=True): 124 | self.grid_size = grid_size 125 | g = self.grid_size 126 | FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor 127 | self.stride = self.img_dim / self.grid_size 128 | # Calculate offsets for each grid 129 | self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor) 130 | self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor) 131 | self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors]) 132 | self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1)) 133 | self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1)) 134 | 135 | def forward(self, x, targets=None, img_dim=None): 136 | 137 | # Tensors for cuda support 138 | FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor 139 | LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor 140 | ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor 141 | 142 | self.img_dim = img_dim 143 | num_samples = x.size(0) 144 | grid_size = x.size(2) 145 | 146 | prediction = ( 147 | x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size) 148 | .permute(0, 1, 3, 4, 2) 149 | .contiguous() 150 | ) 151 | 152 | # Get outputs 153 | x = torch.sigmoid(prediction[..., 0]) # Center x 154 | y = torch.sigmoid(prediction[..., 1]) # Center y 155 | w = prediction[..., 2] # Width 156 | h = prediction[..., 3] # Height 157 | pred_conf = torch.sigmoid(prediction[..., 4]) # Conf 158 | pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred. 159 | 160 | # If grid size does not match current we compute new offsets 161 | if grid_size != self.grid_size: 162 | self.compute_grid_offsets(grid_size, cuda=x.is_cuda) 163 | 164 | # Add offset and scale with anchors 165 | pred_boxes = FloatTensor(prediction[..., :4].shape) 166 | pred_boxes[..., 0] = x.data + self.grid_x 167 | pred_boxes[..., 1] = y.data + self.grid_y 168 | pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w 169 | pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h 170 | 171 | output = torch.cat( 172 | ( 173 | pred_boxes.view(num_samples, -1, 4) * self.stride, 174 | pred_conf.view(num_samples, -1, 1), 175 | pred_cls.view(num_samples, -1, self.num_classes), 176 | ), 177 | -1, 178 | ) 179 | 180 | if targets is None: 181 | return output, 0 182 | else: 183 | iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets( 184 | pred_boxes=pred_boxes, 185 | pred_cls=pred_cls, 186 | target=targets, 187 | anchors=self.scaled_anchors, 188 | ignore_thres=self.ignore_thres, 189 | ) 190 | 191 | # Loss : Mask outputs to ignore non-existing objects (except with conf. loss) 192 | loss_x = self.mse_loss(x[obj_mask], tx[obj_mask]) 193 | loss_y = self.mse_loss(y[obj_mask], ty[obj_mask]) 194 | loss_w = self.mse_loss(w[obj_mask], tw[obj_mask]) 195 | loss_h = self.mse_loss(h[obj_mask], th[obj_mask]) 196 | loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask]) 197 | loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask]) 198 | loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj 199 | loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask]) 200 | total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls 201 | 202 | # Metrics 203 | cls_acc = 100 * class_mask[obj_mask].mean() 204 | conf_obj = pred_conf[obj_mask].mean() 205 | conf_noobj = pred_conf[noobj_mask].mean() 206 | conf50 = (pred_conf > 0.5).float() 207 | iou50 = (iou_scores > 0.5).float() 208 | iou75 = (iou_scores > 0.75).float() 209 | detected_mask = conf50 * class_mask * tconf 210 | precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16) 211 | recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16) 212 | recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16) 213 | 214 | self.metrics = { 215 | "loss": to_cpu(total_loss).item(), 216 | "x": to_cpu(loss_x).item(), 217 | "y": to_cpu(loss_y).item(), 218 | "w": to_cpu(loss_w).item(), 219 | "h": to_cpu(loss_h).item(), 220 | "conf": to_cpu(loss_conf).item(), 221 | "cls": to_cpu(loss_cls).item(), 222 | "cls_acc": to_cpu(cls_acc).item(), 223 | "recall50": to_cpu(recall50).item(), 224 | "recall75": to_cpu(recall75).item(), 225 | "precision": to_cpu(precision).item(), 226 | "conf_obj": to_cpu(conf_obj).item(), 227 | "conf_noobj": to_cpu(conf_noobj).item(), 228 | "grid_size": grid_size, 229 | } 230 | 231 | return output, total_loss 232 | 233 | 234 | class Darknet(nn.Module): 235 | """YOLOv3 object detection model""" 236 | 237 | def __init__(self, config_path, img_size=416): 238 | super(Darknet, self).__init__() 239 | self.module_defs = parse_model_config(config_path) 240 | self.hyperparams, self.module_list = create_modules(self.module_defs) 241 | self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")] 242 | self.img_size = img_size 243 | self.seen = 0 244 | self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32) 245 | 246 | def forward(self, x, targets=None): 247 | img_dim = x.shape[2] 248 | loss = 0 249 | layer_outputs, yolo_outputs = [], [] 250 | for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)): 251 | if module_def["type"] in ["convolutional", "upsample", "maxpool"]: 252 | x = module(x) 253 | elif module_def["type"] == "route": 254 | x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1) 255 | elif module_def["type"] == "shortcut": 256 | layer_i = int(module_def["from"]) 257 | x = layer_outputs[-1] + layer_outputs[layer_i] 258 | elif module_def["type"] == "yolo": 259 | x, layer_loss = module[0](x, targets, img_dim) 260 | loss += layer_loss 261 | yolo_outputs.append(x) 262 | layer_outputs.append(x) 263 | yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1)) 264 | return yolo_outputs if targets is None else (loss, yolo_outputs) 265 | 266 | def load_darknet_weights(self, weights_path): 267 | """Parses and loads the weights stored in 'weights_path'""" 268 | 269 | # Open the weights file 270 | with open(weights_path, "rb") as f: 271 | header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values 272 | self.header_info = header # Needed to write header when saving weights 273 | self.seen = header[3] # number of images seen during training 274 | weights = np.fromfile(f, dtype=np.float32) # The rest are weights 275 | 276 | # Establish cutoff for loading backbone weights 277 | cutoff = None 278 | if "darknet53.conv.74" in weights_path: 279 | cutoff = 75 280 | 281 | ptr = 0 282 | for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)): 283 | if i == cutoff: 284 | break 285 | if module_def["type"] == "convolutional": 286 | conv_layer = module[0] 287 | if module_def["batch_normalize"]: 288 | # Load BN bias, weights, running mean and running variance 289 | bn_layer = module[1] 290 | num_b = bn_layer.bias.numel() # Number of biases 291 | # Bias 292 | bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias) 293 | bn_layer.bias.data.copy_(bn_b) 294 | ptr += num_b 295 | # Weight 296 | bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight) 297 | bn_layer.weight.data.copy_(bn_w) 298 | ptr += num_b 299 | # Running Mean 300 | bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean) 301 | bn_layer.running_mean.data.copy_(bn_rm) 302 | ptr += num_b 303 | # Running Var 304 | bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var) 305 | bn_layer.running_var.data.copy_(bn_rv) 306 | ptr += num_b 307 | else: 308 | # Load conv. bias 309 | num_b = conv_layer.bias.numel() 310 | conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias) 311 | conv_layer.bias.data.copy_(conv_b) 312 | ptr += num_b 313 | # Load conv. weights 314 | num_w = conv_layer.weight.numel() 315 | conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight) 316 | conv_layer.weight.data.copy_(conv_w) 317 | ptr += num_w 318 | 319 | def save_darknet_weights(self, path, cutoff=-1): 320 | """ 321 | @:param path - path of the new weights file 322 | @:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved) 323 | """ 324 | fp = open(path, "wb") 325 | self.header_info[3] = self.seen 326 | self.header_info.tofile(fp) 327 | 328 | # Iterate through layers 329 | for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])): 330 | if module_def["type"] == "convolutional": 331 | conv_layer = module[0] 332 | # If batch norm, load bn first 333 | if module_def["batch_normalize"]: 334 | bn_layer = module[1] 335 | bn_layer.bias.data.cpu().numpy().tofile(fp) 336 | bn_layer.weight.data.cpu().numpy().tofile(fp) 337 | bn_layer.running_mean.data.cpu().numpy().tofile(fp) 338 | bn_layer.running_var.data.cpu().numpy().tofile(fp) 339 | # Load conv bias 340 | else: 341 | conv_layer.bias.data.cpu().numpy().tofile(fp) 342 | # Load conv weights 343 | conv_layer.weight.data.cpu().numpy().tofile(fp) 344 | 345 | fp.close() 346 | -------------------------------------------------------------------------------- /output/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1.png -------------------------------------------------------------------------------- /output/1010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1010.png -------------------------------------------------------------------------------- /output/1027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1027.png -------------------------------------------------------------------------------- /output/104.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/104.png -------------------------------------------------------------------------------- /output/1041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1041.png -------------------------------------------------------------------------------- /output/1048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1048.png -------------------------------------------------------------------------------- /output/1063.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1063.png -------------------------------------------------------------------------------- /output/120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/120.png -------------------------------------------------------------------------------- /output/1345.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/1345.png -------------------------------------------------------------------------------- /output/200.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/200.png -------------------------------------------------------------------------------- /output/22.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/22.png -------------------------------------------------------------------------------- /output/29.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zigangzhao-ai/Pytorch-Yolov3--Remote-sensing-image/a69cd67fc18ccd27961c9c8889c085cc922584c2/output/29.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | torch>=1.0 3 | torchvision 4 | matplotlib 5 | tensorflow 6 | tensorboard 7 | terminaltables 8 | pillow 9 | tqdm 10 | -------------------------------------------------------------------------------- /scripts/generate_labels.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | # 生成darknet需要的标签 3 | 4 | import os 5 | import os.path as path 6 | import json 7 | 8 | # 保存所有类别对应的id的字典 9 | class_id_dict = {} 10 | 11 | # 判断是不是图片 12 | def isPic(basename): 13 | file_type = basename.split('.')[-1] 14 | pic_file_list = ['png', 'jpg', 'jpeg', 'BMP', 'JPEG', 'JPG', 'JPeG', 'Jpeg', 'PNG', 'TIF', 'bmp', 'tif'] 15 | if file_type in pic_file_list: 16 | return True 17 | return False 18 | 19 | # 判断这个图片有没有对应的json文件 20 | def has_json(img_file): 21 | # 得到json文件名 22 | base_name = path.basename(img_file) 23 | dir_name = img_file[:len(img_file) - len(base_name)] 24 | json_name = base_name.split('.')[0] 25 | json_name = json_name + '.json' 26 | json_name = path.join(dir_name, json_name) 27 | if path.isfile(json_name): 28 | return json_name 29 | return None 30 | 31 | # 生成file_name的label文件,并重新写入 content_list 中内容 32 | def rewrite_labels_file(file_name, content_list): 33 | with open(file_name, 'w') as f: 34 | for line in content_list: 35 | curr_line_str = '' 36 | for element in line: 37 | curr_line_str += str(element) + ' ' 38 | f.write(curr_line_str + '\n') 39 | return 40 | 41 | # 生成file_name的训练图片路径文件 42 | def rewrite_train_name_file(file_name, content_list): 43 | with open(file_name, 'w') as f: 44 | for line in content_list: 45 | f.write(str(line) + '\n') 46 | return 47 | 48 | # 读取文件 49 | def read_file(file_name): 50 | if not path.exists(file_name): 51 | print("warning:不存在文件"+str(file_name)) 52 | return None 53 | with open(file_name, 'r', encoding='utf-8') as f: 54 | result = [] 55 | for line in f.readlines(): 56 | result.append(line.strip('\n')) 57 | return result 58 | 59 | # 加载class_id 60 | def load_class_id(class_name_file): 61 | global class_id_dict 62 | class_list = read_file(class_name_file) 63 | for i in range(len(class_list)): 64 | class_id_dict[str(class_list[i])] = i 65 | return class_id_dict 66 | 67 | # 得到分类的id,未分类是-1 68 | def get_id(class_name, class_name_file): 69 | global class_id_dict 70 | if len(class_id_dict) < 1: 71 | class_id_dict = load_class_id(class_name_file) 72 | print("分类 id 加载完成") 73 | # 补丁:替换掉汉字 "局""段" 74 | class_name = get_id_patch(class_name) 75 | if class_name in class_id_dict.keys(): 76 | return class_id_dict[class_name] 77 | return -1 78 | # 去掉汉字'段'和'局' 79 | def get_id_patch(class_name): 80 | if class_name.strip() == '段': 81 | return 'duan' 82 | if class_name.strip() == '局': 83 | return 'ju' 84 | return class_name 85 | 86 | # 解析一个points,得到坐标序列 87 | def get_relative_point(img_width, img_height, point_list): 88 | # point_list是一个包含两个坐标的list 89 | 90 | dh = 1.0/ img_height 91 | x_min = min(point_list[0][0], point_list[1][0]) 92 | y_min = min(point_list[0][1], point_list[1][1]) 93 | x_max = max(point_list[0][0], point_list[1][0]) 94 | y_max = max(point_list[0][1], point_list[1][1]) 95 | dw = 1.0 / img_width 96 | dh = 1.0/ img_height 97 | # 中心坐标 98 | x = (x_min + x_max)/2.0 99 | y = (y_min + y_max)/2.0 100 | w = x_max - x_min 101 | h = y_max - y_min 102 | x = x*dw 103 | w = w*dw 104 | y = y*dh 105 | h = h*dh 106 | return [x, y, w, h] 107 | 108 | # 解析json文件 109 | def paras_json(json_file, class_name_file): 110 | if not path.exists(json_file): 111 | print("warning:不存在json文件" + str(json_file)) 112 | assert(0) 113 | # 读取json文件拿到基本信息, encoding要注意一下 114 | # try: 115 | # f = open(json_file, encoding="gbk") 116 | # setting = json.loads(f.read()) 117 | # except: 118 | # f = open(json_file, encoding='utf-8') 119 | # setting = json.loads(f.read()) 120 | f = open(json_file) 121 | setting = json.loads(f.read()) 122 | # print(setting) 123 | # f.close() 124 | shapes = setting['annotation'] 125 | height = float(setting['annotation']['size']['height']) ##1280 126 | width = float(setting['annotation']['size']['width']) ##659 127 | # 拿到标签坐标 128 | result = [] 129 | flag = 0 130 | count = 0 131 | 132 | if "object" not in [x for x in shapes]: 133 | return [[0,0,0,0,0]] 134 | 135 | for shape in shapes['object']: 136 | count += 1 137 | a = [x for x in shapes['object']] 138 | # print(shape['name']) 139 | if count == 5 and a[0] == 'name' : 140 | flag = 1 141 | else: 142 | flag = count 143 | 144 | if flag == 1: 145 | shape = shapes['object'] 146 | class_name = shape['name'] #得到分类名 147 | class_id = get_id(class_name, class_name_file) 148 | 149 | point = [] 150 | a = [] 151 | b = [] 152 | a.append(float(shape['bndbox']['xmin'])) 153 | a.append(float(shape['bndbox']['ymin'])) 154 | 155 | b.append(float(shape['bndbox']['xmax'])) 156 | b.append(float(shape['bndbox']['ymax'])) 157 | point.append(a) 158 | point.append(b) 159 | print(point) 160 | locate_result = get_relative_point(1280, 659, point) 161 | locate_result.insert(0, class_id) 162 | result.append(locate_result) 163 | return result 164 | 165 | if flag == count: 166 | 167 | for shape in shapes['object']: 168 | 169 | class_name = shape['name'] #得到分类名 170 | class_id = get_id(class_name, class_name_file) 171 | 172 | point = [] 173 | a = [] 174 | b = [] 175 | a.append(float(shape['bndbox']['xmin'])) 176 | a.append(float(shape['bndbox']['ymin'])) 177 | 178 | b.append(float(shape['bndbox']['xmax'])) 179 | b.append(float(shape['bndbox']['ymax'])) 180 | 181 | point.append(a) 182 | point.append(b) 183 | print(point) 184 | 185 | locate_result = get_relative_point(1280, 659, point) 186 | # 插入id 187 | locate_result.insert(0, class_id) 188 | result.append(locate_result) 189 | print(result) 190 | return result 191 | # else: 192 | # return None 193 | 194 | # 得到文件夹下所有的图片文件 195 | def get_pic_file_from_dir(dir_name): 196 | ''' 197 | return:所有的图片文件名 198 | ''' 199 | if not path.isdir(dir_name): 200 | print("warning:路径 %s 不是文件夹" %dir_name) 201 | return [] 202 | result = [] 203 | for f in os.listdir(dir_name): 204 | curr_file = path.join(dir_name, f) 205 | if not path.isfile(curr_file): 206 | continue 207 | if not isPic(curr_file): 208 | continue 209 | result.append(f) 210 | return result 211 | 212 | def main(class_name="classes.names", img_dir="images/", train_txt='train.txt', labels_dir='labels'): 213 | 214 | cwd = os.getcwd() 215 | img_dir = path.join(cwd, img_dir) 216 | 217 | # print(img_dir) 218 | labels_dir = path.join(cwd, labels_dir) 219 | 220 | if not path.exists(img_dir): 221 | print("error:没有发现图片文件夹 ", img_dir) 222 | 223 | if not path.exists(labels_dir): 224 | os.mkdir(labels_dir) 225 | 226 | count = 0 227 | dir_len = len(os.listdir(img_dir)) # 进度条 228 | # print(dir_len) 229 | imgs = [] 230 | for f in os.listdir(img_dir): 231 | # print(f) 232 | curr_path = path.join(img_dir, f) 233 | # print(curr_path) 234 | if not path.isdir(curr_path): # 不是文件夹就先跳过 235 | continue 236 | curr_train_dir = curr_path 237 | # print(curr_train_dir) 238 | # 是文件夹就创建labels对应的文件夹 239 | curr_labels_dir = path.join(labels_dir, f) 240 | if not path.isdir(curr_labels_dir): 241 | os.mkdir(curr_labels_dir) 242 | # 拿到文件夹下所有的图片文件 243 | curr_dir_imgs = get_pic_file_from_dir(curr_train_dir) 244 | # print(curr_dir_imgs) 245 | # 解析这些图片的json文件 246 | for img_file in curr_dir_imgs: 247 | curr_img_file = path.join(curr_train_dir, img_file) 248 | # print(curr_img_file) 249 | json_file = has_json(curr_img_file) 250 | print(json_file) 251 | if json_file: 252 | # 保存图片路径 253 | imgs.append(curr_img_file) 254 | # 得到json信息 list 255 | json_inf = paras_json(json_file, class_name) 256 | # print(json_inf) 257 | # 标签文件名 258 | label_name = img_file.split('/')[-1].split('.')[0] + '.txt' 259 | curr_labels_file = path.join(curr_labels_dir, label_name) 260 | # 写入标签 261 | rewrite_labels_file(curr_labels_file, json_inf) 262 | count += 1 263 | print("\r当前进度: {:02f} %".format(count/dir_len * 100.0), end='') 264 | print("\n 保存训练图片路径到: ", train_txt) 265 | rewrite_train_name_file(train_txt, imgs) 266 | return 267 | 268 | if __name__ == "__main__": 269 | main() -------------------------------------------------------------------------------- /scripts/k_means.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # 获得anchors 3 | from __future__ import division, print_function 4 | 5 | import os 6 | import os.path as path 7 | import json 8 | import math 9 | import numpy as np 10 | import generate_labels as tool 11 | 12 | 13 | # 生成file_name文件,并追加写入name_list中内容 14 | def write_file(file_name, name_list): 15 | base_name = path.basename(file_name) 16 | dir_name = file_name[:len(file_name)-len(base_name)] 17 | if not path.exists(dir_name): 18 | os.mkdir(dir_name) 19 | with open(file_name, 'a') as f: 20 | for name in name_list: 21 | f.write(name+'\n') 22 | return 23 | 24 | # 生成file_name文件,并重新写入name_list中内容 25 | def rewrite_file(file_name, name_list): 26 | base_name = path.basename(file_name) 27 | dir_name = file_name[:len(file_name)-len(base_name)] 28 | if not path.exists(dir_name): 29 | os.mkdir(dir_name) 30 | with open(file_name, 'w') as f: 31 | for i in range(len(name_list)): 32 | f.write(str(i) + ' ' + name_list[i] +'\n') 33 | return 34 | 35 | # 解析一个points,得到坐标序列 36 | def get_point(point_list): 37 | # point_list是一个包含两个坐标的list 38 | x_min = min(point_list[0][0], point_list[1][0]) 39 | y_min = min(point_list[0][1], point_list[1][1]) 40 | x_max = max(point_list[0][0], point_list[1][0]) 41 | y_max = max(point_list[0][1], point_list[1][1]) 42 | result = str(x_min) + ' ' + str(y_min) + ' ' + str(x_max) + ' ' + str(y_max) 43 | return result 44 | 45 | # 解析json文件 46 | def paras_json(json_file, class_name_file): 47 | if not path.exists(json_file): 48 | print("warning:不存在json文件" + str(json_file)) 49 | assert(0) 50 | f = open(json_file) 51 | setting = json.loads(f.read()) 52 | # print(setting) 53 | # f.close() 54 | shapes = setting['annotation'] 55 | height = setting['annotation']['size']['height'] 56 | width = setting['annotation']['size']['width'] 57 | # 拿到标签坐标 58 | result = "" 59 | flag = 0 60 | count = 0 61 | 62 | if "object" not in [x for x in shapes]: 63 | 64 | result += ' ' + '0' + ' ' + get_point([[0,0],[0,0]]) 65 | return str(width) + ' ' + str(height) + result 66 | 67 | for shape in shapes['object']: 68 | count += 1 69 | a = [x for x in shapes['object']] 70 | # print(shape['name']) 71 | if count == 5 and a[0] == 'name' : 72 | flag = 1 73 | else: 74 | flag = count 75 | 76 | if flag == 1: 77 | shape = shapes['object'] 78 | class_name = shape['name'] #得到分类名 79 | 80 | class_id = tool.get_id(class_name, class_name_file) 81 | point = [] 82 | a = [] 83 | b = [] 84 | a.append(float(shape['bndbox']['xmin'])) 85 | a.append(float(shape['bndbox']['ymin'])) 86 | b.append(float(shape['bndbox']['xmax'])) 87 | b.append(float(shape['bndbox']['ymax'])) 88 | point.append(a) 89 | point.append(b) 90 | # print(point) 91 | locate_result = get_point(point) 92 | 93 | result += ' ' + str(class_id) + ' ' + locate_result 94 | return str(width) + ' ' + str(height) + result 95 | 96 | if flag == count: 97 | for shape in shapes['object']: 98 | 99 | class_name = shape['name'] #得到分类名 100 | class_id = tool.get_id(class_name, class_name_file) 101 | 102 | point = [] 103 | a = [] 104 | b = [] 105 | a.append(float(shape['bndbox']['xmin'])) 106 | a.append(float(shape['bndbox']['ymin'])) 107 | b.append(float(shape['bndbox']['xmax'])) 108 | b.append(float(shape['bndbox']['ymax'])) 109 | 110 | point.append(a) 111 | point.append(b) 112 | # print(point) 113 | locate_result = get_point(point) 114 | result += ' ' + str(class_id) + ' ' + locate_result 115 | return str(width) + ' ' + str(height) + result 116 | 117 | # 得到文件夹里面所有的图片路径 118 | def get_pic_file(dir_path): 119 | result = [] 120 | if not path.isdir(dir_path): 121 | print("exception: 路径%s不是文件夹" %(dir_path)) 122 | return result 123 | # 读取图片路径 124 | for f in os.listdir(dir_path): 125 | curr_file = path.join(dir_path, f) 126 | if not path.isfile(curr_file): 127 | continue 128 | if not tool.isPic(f): 129 | continue 130 | result.append(curr_file) 131 | return result 132 | 133 | # 生成k_means需要的数据 134 | def generate_k_means_data(class_name='train.names', train_dir='./JPEGImages'): 135 | train_list = [] 136 | img_index = 0 137 | dir_list = os.listdir(train_dir) 138 | for i in range(len(dir_list)): 139 | f = dir_list[i] 140 | curr_path = os.path.join(train_dir, f) 141 | if not path.isdir(curr_path): 142 | continue 143 | curr_dir_imgs = get_pic_file(curr_path) 144 | # 判断有没有对应的json文件,并解析json文件保存到list中 145 | for img_file in curr_dir_imgs: 146 | json_file = tool.has_json(img_file) 147 | if json_file: 148 | # 有这个json文件就保存这个img 149 | json_inf = paras_json(json_file, class_name) 150 | train_list.append(str(img_index) + ' ' + str(img_file) + ' ' + str(json_inf)) 151 | img_index += 1 152 | print("\r文件夹处理进度:{:2f}%".format( (i+1)*100 / dir_list.__len__() ), end='') 153 | print() 154 | return train_list 155 | 156 | 157 | # ################ k_means ############# 158 | 159 | def iou(box, clusters): 160 | """ 161 | Calculates the Intersection over Union (IoU) between a box and k clusters. 162 | param: 163 | box: tuple or array, shifted to the origin (i. e. width and height) 164 | clusters: numpy array of shape (k, 2) where k is the number of clusters 165 | return: 166 | numpy array of shape (k, 0) where k is the number of clusters 167 | """ 168 | x = np.minimum(clusters[:, 0], box[0]) 169 | y = np.minimum(clusters[:, 1], box[1]) 170 | if np.count_nonzero(x == 0) > 0 or np.count_nonzero(y == 0) > 0: 171 | raise ValueError("Box has no area") 172 | 173 | intersection = x * y 174 | box_area = box[0] * box[1] 175 | cluster_area = clusters[:, 0] * clusters[:, 1] 176 | 177 | iou_ = np.true_divide(intersection, box_area + cluster_area - intersection + 1e-10) 178 | # iou_ = intersection / (box_area + cluster_area - intersection + 1e-10) 179 | 180 | return iou_ 181 | 182 | 183 | def avg_iou(boxes, clusters): 184 | """ 185 | Calculates the average Intersection over Union (IoU) between a numpy array of boxes and k clusters. 186 | param: 187 | boxes: numpy array of shape (r, 2), where r is the number of rows 188 | clusters: numpy array of shape (k, 2) where k is the number of clusters 189 | return: 190 | average IoU as a single float 191 | """ 192 | return np.mean([np.max(iou(boxes[i], clusters)) for i in range(boxes.shape[0])]) 193 | 194 | 195 | def translate_boxes(boxes): 196 | """ 197 | Translates all the boxes to the origin. 198 | param: 199 | boxes: numpy array of shape (r, 4) 200 | return: 201 | numpy array of shape (r, 2) 202 | """ 203 | new_boxes = boxes.copy() 204 | for row in range(new_boxes.shape[0]): 205 | new_boxes[row][2] = np.abs(new_boxes[row][2] - new_boxes[row][0]) 206 | new_boxes[row][3] = np.abs(new_boxes[row][3] - new_boxes[row][1]) 207 | return np.delete(new_boxes, [0, 1], axis=1) 208 | 209 | 210 | def kmeans(boxes, k, dist=np.median): 211 | """ 212 | Calculates k-means clustering with the Intersection over Union (IoU) metric. 213 | param: 214 | boxes: numpy array of shape (r, 2), where r is the number of rows 215 | k: number of clusters 216 | dist: distance function 217 | return: 218 | numpy array of shape (k, 2) 219 | """ 220 | rows = boxes.shape[0] 221 | 222 | distances = np.empty((rows, k)) 223 | last_clusters = np.zeros((rows,)) 224 | 225 | np.random.seed() 226 | 227 | # the Forgy method will fail if the whole array contains the same rows 228 | clusters = boxes[np.random.choice(rows, k, replace=False)] 229 | 230 | while True: 231 | for row in range(rows): 232 | distances[row] = 1 - iou(boxes[row], clusters) 233 | 234 | nearest_clusters = np.argmin(distances, axis=1) 235 | 236 | if (last_clusters == nearest_clusters).all(): 237 | break 238 | 239 | for cluster in range(k): 240 | clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0) 241 | 242 | last_clusters = nearest_clusters 243 | 244 | return clusters 245 | 246 | def parse_anno(data, target_size=None): 247 | result = [] 248 | for line in data: 249 | 250 | s = line.strip().split(' ') 251 | 252 | if len(s[2])<=4: 253 | 254 | # img_w = int(s[2]) 255 | # img_h = int(s[3]) 256 | img_w = 1280 257 | img_h = 659 258 | s = s[4:] 259 | box_cnt = len(s) // 5 260 | if len(s)%5 != 0: 261 | print(s) 262 | for i in range(box_cnt): 263 | x_min, y_min, x_max, y_max = float(s[i*5+1]), float(s[i*5+2]), float(s[i*5+3]), float(s[i*5+4]) 264 | width = x_max - x_min 265 | height = y_max - y_min 266 | if width == 0 or height == 0: 267 | continue 268 | # assert width >= 0 269 | # assert height >= 0 270 | # use letterbox resize, i.e. keep the original aspect ratio 271 | # get k-means anchors on the resized target image size 272 | if target_size is not None and img_w != 0 and img_h != 0: 273 | resize_ratio = min(target_size[0] / img_w, target_size[1] / img_h) 274 | width *= resize_ratio 275 | height *= resize_ratio 276 | result.append([width, height]) 277 | # get k-means anchors on the original image size 278 | else: 279 | result.append([width, height]) 280 | 281 | # print(s) 282 | else: 283 | # img_w = int(s[3]) 284 | # img_h = int(s[4]) 285 | img_w = 1280 286 | img_h = 659 287 | s = s[5:] 288 | 289 | box_cnt = len(s) // 5 290 | for i in range(box_cnt): 291 | x_min, y_min, x_max, y_max = float(s[i*5+1]), float(s[i*5+2]), float(s[i*5+3]), float(s[i*5+4]) 292 | width = x_max - x_min 293 | height = y_max - y_min 294 | 295 | if width == 0 or height == 0: 296 | continue 297 | # assert width >= 0 298 | # assert height >= 0 299 | # use letterbox resize, i.e. keep the original aspect ratio 300 | # get k-means anchors on the resized target image size 301 | if target_size is not None and img_w != 0 and img_h != 0: 302 | resize_ratio = min(target_size[0] / img_w, target_size[1] / img_h) 303 | width *= resize_ratio 304 | height *= resize_ratio 305 | result.append([width, height]) 306 | # get k-means anchors on the original image size 307 | else: 308 | result.append([width, height]) 309 | result = np.asarray(result) 310 | return result 311 | 312 | 313 | def get_kmeans(anno, cluster_num=9): 314 | 315 | anchors = kmeans(anno, cluster_num) 316 | ave_iou = avg_iou(anno, anchors) 317 | 318 | anchors = anchors.astype('int').tolist() 319 | 320 | anchors = sorted(anchors, key=lambda x: x[0] * x[1]) 321 | 322 | return anchors, ave_iou 323 | 324 | # ############################ 325 | 326 | if __name__ == "__main__": 327 | # target resize format: [width, height] 328 | # if target_resize is speficied, the anchors are on the resized image scale 329 | # if target_resize is set to None, the anchors are on the original image scale 330 | # target_size = [416, 416] 331 | target_size = [416, 416] 332 | data = generate_k_means_data() 333 | # print(data) 334 | anno_result = parse_anno(data, target_size=target_size) 335 | 336 | anchors, ave_iou = get_kmeans(anno_result, 9) 337 | 338 | anchor_string = '' 339 | for anchor in anchors: 340 | anchor_string += '{},{}, '.format(anchor[0], anchor[1]) 341 | anchor_string = anchor_string[:-2] 342 | 343 | print('anchors are:') 344 | print(anchor_string) 345 | print('the average iou is:') 346 | print(ave_iou) 347 | # generate_val_txt() 348 | pass 349 | -------------------------------------------------------------------------------- /scripts/xml_to_json.py: -------------------------------------------------------------------------------- 1 | ''' 2 | code by zzg 2020-05-13 3 | ''' 4 | 5 | #批量修改文件夹下的xml为json并存储到另一个文件夹 6 | 7 | import glob 8 | import xmltodict 9 | import json 10 | 11 | path = 'xml/' 12 | path2 = 'json/' 13 | 14 | xml_dir = glob.glob(path + '*.xml') 15 | print(xml_dir) 16 | 17 | def pythonXmlToJson(path): 18 | 19 | xml_dir = glob.glob(path + '*.xml') 20 | # print(len(xml_dir)) 21 | for x in xml_dir: 22 | with open(x) as fd: 23 | convertedDict = xmltodict.parse(fd.read()) 24 | jsonStr = json.dumps(convertedDict, indent=1) 25 | print("jsonStr=",jsonStr) 26 | print(x.split('.')[0]) 27 | json_file = x.split('.')[0].split('/')[-1] +'.json' 28 | with open(path2 + '/' + json_file, 'w') as json_file: 29 | json_file.write(jsonStr) 30 | print("xml_json finished!") 31 | print(len(xml_dir)) 32 | pythonXmlToJson(path) 33 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from models import * 4 | from utils.utils import * 5 | from utils.datasets import * 6 | from utils.parse_config import * 7 | 8 | import os 9 | import sys 10 | import time 11 | import datetime 12 | import argparse 13 | import tqdm 14 | 15 | import torch 16 | from torch.utils.data import DataLoader 17 | from torchvision import datasets 18 | from torchvision import transforms 19 | from torch.autograd import Variable 20 | import torch.optim as optim 21 | 22 | 23 | def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size, batch_size): 24 | model.eval() 25 | 26 | # Get dataloader 27 | dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False) 28 | dataloader = torch.utils.data.DataLoader( 29 | dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn 30 | ) 31 | 32 | Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor 33 | 34 | labels = [] 35 | sample_metrics = [] # List of tuples (TP, confs, pred) 36 | for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")): 37 | 38 | # Extract labels 39 | labels += targets[:, 1].tolist() 40 | # Rescale target 41 | targets[:, 2:] = xywh2xyxy(targets[:, 2:]) 42 | targets[:, 2:] *= img_size 43 | 44 | imgs = Variable(imgs.type(Tensor), requires_grad=False) 45 | 46 | with torch.no_grad(): 47 | outputs = model(imgs) 48 | outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres) 49 | 50 | sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres) 51 | 52 | # Concatenate sample statistics 53 | true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))] 54 | precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels) 55 | 56 | return precision, recall, AP, f1, ap_class 57 | 58 | 59 | if __name__ == "__main__": 60 | parser = argparse.ArgumentParser() 61 | parser.add_argument("--batch_size", type=int, default=8, help="size of each image batch") 62 | parser.add_argument("--model_def", type=str, default="config/yolov3-custom.cfg", help="path to model definition file") 63 | parser.add_argument("--data_config", type=str, default="config/custom.data", help="path to data config file") 64 | parser.add_argument("--weights_path", type=str, default="checkpoints/yolov3_ckpt_195.pth", help="path to weights file") 65 | parser.add_argument("--class_path", type=str, default="data/custom/classes.names", help="path to class label file") 66 | parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected") 67 | parser.add_argument("--conf_thres", type=float, default=0.001, help="object confidence threshold") 68 | parser.add_argument("--nms_thres", type=float, default=0.5, help="iou thresshold for non-maximum suppression") 69 | parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation") 70 | parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension") 71 | opt = parser.parse_args() 72 | print(opt) 73 | 74 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 75 | 76 | data_config = parse_data_config(opt.data_config) 77 | valid_path = data_config["valid"] 78 | class_names = load_classes(data_config["names"]) 79 | 80 | # Initiate model 81 | model = Darknet(opt.model_def).to(device) 82 | if opt.weights_path.endswith(".weights"): 83 | # Load darknet weights 84 | model.load_darknet_weights(opt.weights_path) 85 | else: 86 | # Load checkpoint weights 87 | model.load_state_dict(torch.load(opt.weights_path)) 88 | 89 | print("Compute mAP...") 90 | 91 | precision, recall, AP, f1, ap_class = evaluate( 92 | model, 93 | path=valid_path, 94 | iou_thres=opt.iou_thres, 95 | conf_thres=opt.conf_thres, 96 | nms_thres=opt.nms_thres, 97 | img_size=opt.img_size, 98 | batch_size=8, 99 | ) 100 | 101 | print("Average Precisions:") 102 | for i, c in enumerate(ap_class): 103 | print(f"+ Class '{c}' ({class_names[c]}) - AP: {AP[i]}") 104 | 105 | print(f"mAP: {AP.mean()}") 106 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from models import * 4 | # from utils.logger import * 5 | from utils.utils import * 6 | from utils.datasets import * 7 | from utils.parse_config import * 8 | from test import evaluate 9 | 10 | from terminaltables import AsciiTable 11 | 12 | 13 | import os 14 | import sys 15 | import time 16 | import datetime 17 | import argparse 18 | import warnings 19 | warnings.filterwarnings("ignore") 20 | 21 | import torch 22 | from torch.utils.data import DataLoader 23 | from torchvision import datasets 24 | from torchvision import transforms 25 | from torch.autograd import Variable 26 | import torch.optim as optim 27 | 28 | if __name__ == "__main__": 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument("--epochs", type=int, default=300, help="number of epochs") 31 | parser.add_argument("--batch_size", type=int, default=8, help="size of each image batch") 32 | parser.add_argument("--gradient_accumulations", type=int, default=2, help="number of gradient accums before step") 33 | parser.add_argument("--model_def", type=str, default="config/yolov3-custom.cfg", help="path to model definition file") 34 | parser.add_argument("--data_config", type=str, default="config/custom.data", help="path to data config file") 35 | parser.add_argument("--pretrained_weights", type=str, help="if specified starts from checkpoint model") 36 | parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation") 37 | parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension") 38 | parser.add_argument("--checkpoint_interval", type=int, default=5, help="interval between saving model weights") 39 | parser.add_argument("--evaluation_interval", type=int, default=1, help="interval evaluations on validation set") 40 | parser.add_argument("--compute_map", default=False, help="if True computes mAP every tenth batch") 41 | parser.add_argument("--multiscale_training", default=True, help="allow for multi-scale training") 42 | opt = parser.parse_args() 43 | print(opt) 44 | 45 | # logger = Logger("logs") 46 | 47 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 48 | 49 | os.makedirs("output", exist_ok=True) 50 | os.makedirs("checkpoints", exist_ok=True) 51 | 52 | # Get data configuration 53 | data_config = parse_data_config(opt.data_config) 54 | train_path = data_config["train"] 55 | valid_path = data_config["valid"] 56 | class_names = load_classes(data_config["names"]) 57 | 58 | # Initiate model 59 | model = Darknet(opt.model_def).to(device) 60 | model.apply(weights_init_normal) 61 | 62 | # If specified we start from checkpoint 63 | if opt.pretrained_weights: 64 | if opt.pretrained_weights.endswith(".pth"): 65 | model.load_state_dict(torch.load(opt.pretrained_weights)) 66 | else: 67 | model.load_darknet_weights(opt.pretrained_weights) 68 | 69 | # Get dataloader 70 | dataset = ListDataset(train_path, augment=True, multiscale=opt.multiscale_training) 71 | dataloader = torch.utils.data.DataLoader( 72 | dataset, 73 | batch_size=opt.batch_size, 74 | shuffle=True, 75 | num_workers=opt.n_cpu, 76 | pin_memory=True, 77 | collate_fn=dataset.collate_fn, 78 | ) 79 | 80 | optimizer = torch.optim.Adam(model.parameters()) 81 | 82 | metrics = [ 83 | "grid_size", 84 | "loss", 85 | "x", 86 | "y", 87 | "w", 88 | "h", 89 | "conf", 90 | "cls", 91 | "cls_acc", 92 | "recall50", 93 | "recall75", 94 | "precision", 95 | "conf_obj", 96 | "conf_noobj", 97 | ] 98 | 99 | for epoch in range(opt.epochs): 100 | model.train() 101 | start_time = time.time() 102 | for batch_i, (_, imgs, targets) in enumerate(dataloader): 103 | 104 | # print(imgs) 105 | batches_done = len(dataloader) * epoch + batch_i 106 | 107 | # imgs = Variable(imgs.to(device)) 108 | # targets = Variable(targets.to(device), requires_grad=False) 109 | 110 | imgs = imgs.to(device) 111 | targets = targets.to(device) 112 | 113 | loss, outputs = model(imgs, targets) 114 | loss.backward() 115 | 116 | if batches_done % opt.gradient_accumulations: 117 | # Accumulates gradient before each step 118 | optimizer.step() 119 | optimizer.zero_grad() 120 | 121 | # ---------------- 122 | # Log progress 123 | # ---------------- 124 | 125 | log_str = "\n---- [Epoch %d/%d, Batch %d/%d] ----\n" % (epoch, opt.epochs, batch_i, len(dataloader)) 126 | 127 | metric_table = [["Metrics", *[f"YOLO Layer {i}" for i in range(len(model.yolo_layers))]]] 128 | 129 | # Log metrics at each YOLO layer 130 | for i, metric in enumerate(metrics): 131 | formats = {m: "%.6f" for m in metrics} 132 | formats["grid_size"] = "%2d" 133 | formats["cls_acc"] = "%.2f%%" 134 | row_metrics = [formats[metric] % yolo.metrics.get(metric, 0) for yolo in model.yolo_layers] 135 | metric_table += [[metric, *row_metrics]] 136 | 137 | # Tensorboard logging 138 | tensorboard_log = [] 139 | for j, yolo in enumerate(model.yolo_layers): 140 | for name, metric in yolo.metrics.items(): 141 | if name != "grid_size": 142 | tensorboard_log += [(f"{name}_{j+1}", metric)] 143 | tensorboard_log += [("loss", loss.item())] 144 | # logger.list_of_scalars_summary(tensorboard_log, batches_done) 145 | 146 | log_str += AsciiTable(metric_table).table 147 | log_str += f"\nTotal loss {loss.item()}" 148 | 149 | # Determine approximate time left for epoch 150 | epoch_batches_left = len(dataloader) - (batch_i + 1) 151 | time_left = datetime.timedelta(seconds=epoch_batches_left * (time.time() - start_time) / (batch_i + 1)) 152 | log_str += f"\n---- ETA {time_left}" 153 | 154 | print(log_str) 155 | 156 | model.seen += imgs.size(0) 157 | 158 | if epoch % opt.evaluation_interval == 1: 159 | 160 | print("\n---- Evaluating Model ----") 161 | # Evaluate the model on the validation set 162 | precision, recall, AP, f1, ap_class = evaluate( 163 | model, 164 | path=valid_path, 165 | iou_thres=0.5, 166 | conf_thres=0.5, 167 | nms_thres=0.5, 168 | img_size=opt.img_size, 169 | batch_size=8, 170 | ) 171 | evaluation_metrics = [ 172 | ("val_precision", precision.mean()), 173 | ("val_recall", recall.mean()), 174 | ("val_mAP", AP.mean()), 175 | ("val_f1", f1.mean()), 176 | ] 177 | # logger.list_of_scalars_summary(evaluation_metrics, epoch) 178 | 179 | # Print class APs and mAP 180 | ap_table = [["Index", "Class name", "AP"]] 181 | for i, c in enumerate(ap_class): 182 | ap_table += [[c, class_names[c], "%.5f" % AP[i]]] 183 | print(AsciiTable(ap_table).table) 184 | print(f"---- mAP {AP.mean()}") 185 | 186 | if epoch % opt.checkpoint_interval == 0: 187 | torch.save(model.state_dict(), f"checkpoints/yolov3_ckpt_%d.pth" % epoch) 188 | -------------------------------------------------------------------------------- /utils/augmentations.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import numpy as np 4 | 5 | 6 | def horisontal_flip(images, targets): 7 | 8 | images = torch.flip(images, [-1]) 9 | targets[:, 2] = 1 - targets[:, 2] 10 | return images, targets -------------------------------------------------------------------------------- /utils/datasets.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import random 3 | import os 4 | import sys 5 | import numpy as np 6 | from PIL import Image 7 | import torch 8 | import torch.nn.functional as F 9 | 10 | from utils.augmentations import horisontal_flip 11 | from torch.utils.data import Dataset 12 | import torchvision.transforms as transforms 13 | 14 | 15 | def pad_to_square(img, pad_value): 16 | """ 17 | input:四维或者五维的tensor Variabe 18 | pad:不同Tensor的填充方式 19 | 1.四维Tensor:传入四元素tuple(pad_l, pad_r, pad_t, pad_b), 20 | 指的是(左填充,右填充,上填充,下填充),其数值代表填充次数 21 | 2.六维Tensor:传入六元素tuple(pleft, pright, ptop, pbottom, pfront, pback), 22 | 指的是(左填充,右填充,上填充,下填充,前填充,后填充),其数值代表填充次数 23 | mode: ’constant‘, ‘reflect’ or ‘replicate’三种模式,指的是常量,反射,复制三种模式 24 | value:填充的数值,在"contant"模式下默认填充0,mode="reflect" or "replicate"时没有 25 | value参数 26 | 27 | """ 28 | 29 | c, h, w = img.shape 30 | dim_diff = np.abs(h - w) 31 | # (upper / left) padding and (lower / right) padding 32 | pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2 33 | # Determine padding 34 | pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0) 35 | # Add padding 36 | img = F.pad(img, pad, "constant", value=pad_value) 37 | 38 | return img, pad 39 | 40 | 41 | def resize(image, size): 42 | image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0) 43 | return image 44 | 45 | 46 | def random_resize(images, min_size=288, max_size=448): 47 | new_size = random.sample(list(range(min_size, max_size + 1, 32)), 1)[0] 48 | images = F.interpolate(images, size=new_size, mode="nearest") 49 | return images 50 | 51 | 52 | class ImageFolder(Dataset): 53 | def __init__(self, folder_path, img_size=416): 54 | self.files = sorted(glob.glob("%s/*.*" % folder_path)) 55 | self.img_size = img_size 56 | 57 | def __getitem__(self, index): 58 | img_path = self.files[index % len(self.files)] 59 | # Extract image as PyTorch tensor 60 | img = transforms.ToTensor()(Image.open(img_path).convert('RGB')) 61 | # Pad to square resolution 62 | img, _ = pad_to_square(img, 0) 63 | # Resize 64 | img = resize(img, self.img_size) 65 | 66 | return img_path, img 67 | 68 | def __len__(self): 69 | return len(self.files) 70 | 71 | 72 | class ListDataset(Dataset): 73 | def __init__(self, list_path, img_size=416, augment=True, multiscale=True, normalized_labels=True): 74 | with open(list_path, "r") as file: 75 | self.img_files = file.readlines() 76 | # print(self.img_files) 77 | 78 | self.label_files = [ 79 | path.replace("images", "labels").replace(".png", ".txt").replace(".jpg", ".txt") 80 | for path in self.img_files 81 | ] 82 | # print(self.label_files ) 83 | # print(self.label_files) 84 | self.img_size = img_size 85 | self.max_objects = 100 86 | self.augment = augment 87 | self.multiscale = multiscale 88 | self.normalized_labels = normalized_labels 89 | self.min_size = self.img_size - 3 * 32 90 | self.max_size = self.img_size + 3 * 32 91 | self.batch_count = 0 92 | 93 | def __getitem__(self, index): 94 | 95 | # --------- 96 | # Image 97 | # --------- 98 | 99 | img_path = self.img_files[index % len(self.img_files)].rstrip() 100 | # print(img_path) 101 | 102 | # Extract image as PyTorch tensor 103 | img = transforms.ToTensor()(Image.open(img_path).convert('RGB')) 104 | # print(img) 105 | # Handle images with less than three channels 106 | if len(img.shape) != 3: 107 | img = img.unsqueeze(0) 108 | img = img.expand((3, img.shape[1:])) 109 | 110 | _, h, w = img.shape 111 | h_factor, w_factor = (h, w) if self.normalized_labels else (1, 1) 112 | # Pad to square resolution 113 | img, pad = pad_to_square(img, 0) 114 | _, padded_h, padded_w = img.shape 115 | 116 | # --------- 117 | # Label 118 | # --------- 119 | 120 | label_path = self.label_files[index % len(self.img_files)].rstrip() 121 | # print(label_path) 122 | 123 | targets = None 124 | if os.path.exists(label_path): 125 | boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5)) 126 | # Extract coordinates for unpadded + unscaled image 127 | x1 = w_factor * (boxes[:, 1] - boxes[:, 3] / 2) 128 | y1 = h_factor * (boxes[:, 2] - boxes[:, 4] / 2) 129 | x2 = w_factor * (boxes[:, 1] + boxes[:, 3] / 2) 130 | y2 = h_factor * (boxes[:, 2] + boxes[:, 4] / 2) 131 | # Adjust for added padding 132 | x1 += pad[0] 133 | y1 += pad[2] 134 | x2 += pad[1] 135 | y2 += pad[3] 136 | # Returns (x, y, w, h) 137 | boxes[:, 1] = ((x1 + x2) / 2) / padded_w 138 | boxes[:, 2] = ((y1 + y2) / 2) / padded_h 139 | boxes[:, 3] *= w_factor / padded_w 140 | boxes[:, 4] *= h_factor / padded_h 141 | 142 | targets = torch.zeros((len(boxes), 6)) 143 | targets[:, 1:] = boxes 144 | # print(targets) 145 | 146 | # Apply augmentations 147 | if self.augment: 148 | if np.random.random() < 0.5: 149 | img, targets = horisontal_flip(img, targets) 150 | 151 | return img_path, img, targets 152 | 153 | def collate_fn(self, batch): 154 | paths, imgs, targets = list(zip(*batch)) 155 | # Remove empty placeholder targets 156 | targets = [boxes for boxes in targets if boxes is not None] 157 | # Add sample index to targets 158 | for i, boxes in enumerate(targets): 159 | boxes[:, 0] = i 160 | if targets is None: 161 | return False 162 | targets = torch.cat(targets, 0) 163 | # Selects new image size every tenth batch 164 | if self.multiscale and self.batch_count % 10 == 0: 165 | self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32)) 166 | # Resize images to input shape 167 | imgs = torch.stack([resize(img, self.img_size) for img in imgs]) 168 | self.batch_count += 1 169 | return paths, imgs, targets 170 | 171 | def __len__(self): 172 | return len(self.img_files) 173 | -------------------------------------------------------------------------------- /utils/parse_config.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def parse_model_config(path): 4 | """Parses the yolo-v3 layer configuration file and returns module definitions""" 5 | file = open(path, 'r') 6 | lines = file.read().split('\n') 7 | lines = [x for x in lines if x and not x.startswith('#')] 8 | lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces 9 | module_defs = [] 10 | for line in lines: 11 | if line.startswith('['): # This marks the start of a new block 12 | module_defs.append({}) 13 | module_defs[-1]['type'] = line[1:-1].rstrip() 14 | if module_defs[-1]['type'] == 'convolutional': 15 | module_defs[-1]['batch_normalize'] = 0 16 | else: 17 | key, value = line.split("=") 18 | value = value.strip() 19 | module_defs[-1][key.rstrip()] = value.strip() 20 | 21 | return module_defs 22 | 23 | def parse_data_config(path): 24 | """Parses the data configuration file""" 25 | options = dict() 26 | options['gpus'] = '0,1,2,3' 27 | options['num_workers'] = '10' 28 | with open(path, 'r') as fp: 29 | lines = fp.readlines() 30 | for line in lines: 31 | line = line.strip() 32 | if line == '' or line.startswith('#'): 33 | continue 34 | key, value = line.split('=') 35 | options[key.strip()] = value.strip() 36 | return options 37 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import math 3 | import time 4 | import tqdm 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | from torch.autograd import Variable 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import matplotlib.patches as patches 12 | 13 | 14 | def to_cpu(tensor): 15 | return tensor.detach().cpu() 16 | 17 | 18 | def load_classes(path): 19 | """ 20 | Loads class labels at 'path' 21 | """ 22 | fp = open(path, "r") 23 | names = fp.read().split("\n")[:-1] 24 | return names 25 | 26 | 27 | def weights_init_normal(m): 28 | classname = m.__class__.__name__ 29 | if classname.find("Conv") != -1: 30 | torch.nn.init.normal_(m.weight.data, 0.0, 0.02) 31 | elif classname.find("BatchNorm2d") != -1: 32 | torch.nn.init.normal_(m.weight.data, 1.0, 0.02) 33 | torch.nn.init.constant_(m.bias.data, 0.0) 34 | 35 | 36 | def rescale_boxes(boxes, current_dim, original_shape): 37 | """ Rescales bounding boxes to the original shape """ 38 | orig_h, orig_w = original_shape 39 | # The amount of padding that was added 40 | pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape)) 41 | pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape)) 42 | # Image height and width after padding is removed 43 | unpad_h = current_dim - pad_y 44 | unpad_w = current_dim - pad_x 45 | # Rescale bounding boxes to dimension of original image 46 | boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w 47 | boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h 48 | boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w 49 | boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h 50 | return boxes 51 | 52 | 53 | def xywh2xyxy(x): 54 | y = x.new(x.shape) 55 | y[..., 0] = x[..., 0] - x[..., 2] / 2 56 | y[..., 1] = x[..., 1] - x[..., 3] / 2 57 | y[..., 2] = x[..., 0] + x[..., 2] / 2 58 | y[..., 3] = x[..., 1] + x[..., 3] / 2 59 | return y 60 | 61 | 62 | def ap_per_class(tp, conf, pred_cls, target_cls): 63 | """ Compute the average precision, given the recall and precision curves. 64 | Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. 65 | # Arguments 66 | tp: True positives (list). 67 | conf: Objectness value from 0-1 (list). 68 | pred_cls: Predicted object classes (list). 69 | target_cls: True object classes (list). 70 | # Returns 71 | The average precision as computed in py-faster-rcnn. 72 | """ 73 | 74 | # Sort by objectness 75 | i = np.argsort(-conf) 76 | tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] 77 | 78 | # Find unique classes 79 | unique_classes = np.unique(target_cls) 80 | 81 | # Create Precision-Recall curve and compute AP for each class 82 | ap, p, r = [], [], [] 83 | for c in tqdm.tqdm(unique_classes, desc="Computing AP"): 84 | i = pred_cls == c 85 | n_gt = (target_cls == c).sum() # Number of ground truth objects 86 | n_p = i.sum() # Number of predicted objects 87 | 88 | if n_p == 0 and n_gt == 0: 89 | continue 90 | elif n_p == 0 or n_gt == 0: 91 | ap.append(0) 92 | r.append(0) 93 | p.append(0) 94 | else: 95 | # Accumulate FPs and TPs 96 | fpc = (1 - tp[i]).cumsum() 97 | tpc = (tp[i]).cumsum() 98 | 99 | # Recall 100 | recall_curve = tpc / (n_gt + 1e-16) 101 | r.append(recall_curve[-1]) 102 | 103 | # Precision 104 | precision_curve = tpc / (tpc + fpc) 105 | p.append(precision_curve[-1]) 106 | 107 | # AP from recall-precision curve 108 | ap.append(compute_ap(recall_curve, precision_curve)) 109 | 110 | # Compute F1 score (harmonic mean of precision and recall) 111 | p, r, ap = np.array(p), np.array(r), np.array(ap) 112 | f1 = 2 * p * r / (p + r + 1e-16) 113 | 114 | return p, r, ap, f1, unique_classes.astype("int32") 115 | 116 | 117 | def compute_ap(recall, precision): 118 | """ Compute the average precision, given the recall and precision curves. 119 | Code originally from https://github.com/rbgirshick/py-faster-rcnn. 120 | 121 | # Arguments 122 | recall: The recall curve (list). 123 | precision: The precision curve (list). 124 | # Returns 125 | The average precision as computed in py-faster-rcnn. 126 | """ 127 | # correct AP calculation 128 | # first append sentinel values at the end 129 | mrec = np.concatenate(([0.0], recall, [1.0])) 130 | mpre = np.concatenate(([0.0], precision, [0.0])) 131 | 132 | # compute the precision envelope 133 | for i in range(mpre.size - 1, 0, -1): 134 | mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) 135 | 136 | # to calculate area under PR curve, look for points 137 | # where X axis (recall) changes value 138 | i = np.where(mrec[1:] != mrec[:-1])[0] 139 | 140 | # and sum (\Delta recall) * prec 141 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) 142 | return ap 143 | 144 | 145 | def get_batch_statistics(outputs, targets, iou_threshold): 146 | """ Compute true positives, predicted scores and predicted labels per sample """ 147 | batch_metrics = [] 148 | for sample_i in range(len(outputs)): 149 | 150 | if outputs[sample_i] is None: 151 | continue 152 | 153 | output = outputs[sample_i] 154 | pred_boxes = output[:, :4] 155 | pred_scores = output[:, 4] 156 | pred_labels = output[:, -1] 157 | 158 | true_positives = np.zeros(pred_boxes.shape[0]) 159 | 160 | annotations = targets[targets[:, 0] == sample_i][:, 1:] 161 | target_labels = annotations[:, 0] if len(annotations) else [] 162 | if len(annotations): 163 | detected_boxes = [] 164 | target_boxes = annotations[:, 1:] 165 | 166 | for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)): 167 | 168 | # If targets are found break 169 | if len(detected_boxes) == len(annotations): 170 | break 171 | 172 | # Ignore if label is not one of the target labels 173 | if pred_label not in target_labels: 174 | continue 175 | 176 | iou, box_index = bbox_iou(pred_box.unsqueeze(0), target_boxes).max(0) 177 | if iou >= iou_threshold and box_index not in detected_boxes: 178 | true_positives[pred_i] = 1 179 | detected_boxes += [box_index] 180 | batch_metrics.append([true_positives, pred_scores, pred_labels]) 181 | return batch_metrics 182 | 183 | 184 | def bbox_wh_iou(wh1, wh2): 185 | wh2 = wh2.t() 186 | w1, h1 = wh1[0], wh1[1] 187 | w2, h2 = wh2[0], wh2[1] 188 | inter_area = torch.min(w1, w2) * torch.min(h1, h2) 189 | union_area = (w1 * h1 + 1e-16) + w2 * h2 - inter_area 190 | return inter_area / union_area 191 | 192 | 193 | def bbox_iou(box1, box2, x1y1x2y2=True): 194 | """ 195 | Returns the IoU of two bounding boxes 196 | """ 197 | if not x1y1x2y2: 198 | # Transform from center and width to exact coordinates 199 | b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2 200 | b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2 201 | b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2 202 | b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2 203 | else: 204 | # Get the coordinates of bounding boxes 205 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3] 206 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3] 207 | 208 | # get the corrdinates of the intersection rectangle 209 | inter_rect_x1 = torch.max(b1_x1, b2_x1) 210 | inter_rect_y1 = torch.max(b1_y1, b2_y1) 211 | inter_rect_x2 = torch.min(b1_x2, b2_x2) 212 | inter_rect_y2 = torch.min(b1_y2, b2_y2) 213 | # Intersection area 214 | inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp( 215 | inter_rect_y2 - inter_rect_y1 + 1, min=0 216 | ) 217 | # Union Area 218 | b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) 219 | b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) 220 | 221 | iou = inter_area / (b1_area + b2_area - inter_area + 1e-16) 222 | 223 | return iou 224 | 225 | 226 | def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4): 227 | """ 228 | Removes detections with lower object confidence score than 'conf_thres' and performs 229 | Non-Maximum Suppression to further filter detections. 230 | Returns detections with shape: 231 | (x1, y1, x2, y2, object_conf, class_score, class_pred) 232 | """ 233 | 234 | # From (center x, center y, width, height) to (x1, y1, x2, y2) 235 | prediction[..., :4] = xywh2xyxy(prediction[..., :4]) 236 | output = [None for _ in range(len(prediction))] 237 | for image_i, image_pred in enumerate(prediction): 238 | # Filter out confidence scores below threshold 239 | image_pred = image_pred[image_pred[:, 4] >= conf_thres] 240 | # If none are remaining => process next image 241 | if not image_pred.size(0): 242 | continue 243 | # Object confidence times class confidence 244 | score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0] 245 | # Sort by it 246 | image_pred = image_pred[(-score).argsort()] 247 | class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True) 248 | detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1) 249 | # Perform non-maximum suppression 250 | keep_boxes = [] 251 | while detections.size(0): 252 | large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres 253 | label_match = detections[0, -1] == detections[:, -1] 254 | # Indices of boxes with lower confidence scores, large IOUs and matching labels 255 | invalid = large_overlap & label_match 256 | weights = detections[invalid, 4:5] 257 | # Merge overlapping bboxes by order of confidence 258 | detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum() 259 | keep_boxes += [detections[0]] 260 | detections = detections[~invalid] 261 | if keep_boxes: 262 | output[image_i] = torch.stack(keep_boxes) 263 | 264 | return output 265 | 266 | 267 | def build_targets(pred_boxes, pred_cls, target, anchors, ignore_thres): 268 | 269 | ByteTensor = torch.cuda.ByteTensor if pred_boxes.is_cuda else torch.ByteTensor 270 | FloatTensor = torch.cuda.FloatTensor if pred_boxes.is_cuda else torch.FloatTensor 271 | 272 | nB = pred_boxes.size(0) 273 | nA = pred_boxes.size(1) 274 | nC = pred_cls.size(-1) 275 | nG = pred_boxes.size(2) 276 | 277 | # Output tensors 278 | obj_mask = ByteTensor(nB, nA, nG, nG).fill_(0) 279 | noobj_mask = ByteTensor(nB, nA, nG, nG).fill_(1) 280 | class_mask = FloatTensor(nB, nA, nG, nG).fill_(0) 281 | iou_scores = FloatTensor(nB, nA, nG, nG).fill_(0) 282 | tx = FloatTensor(nB, nA, nG, nG).fill_(0) 283 | ty = FloatTensor(nB, nA, nG, nG).fill_(0) 284 | tw = FloatTensor(nB, nA, nG, nG).fill_(0) 285 | th = FloatTensor(nB, nA, nG, nG).fill_(0) 286 | tcls = FloatTensor(nB, nA, nG, nG, nC).fill_(0) 287 | 288 | # Convert to position relative to box 289 | target_boxes = target[:, 2:6] * nG 290 | gxy = target_boxes[:, :2] 291 | gwh = target_boxes[:, 2:] 292 | # Get anchors with best iou 293 | ious = torch.stack([bbox_wh_iou(anchor, gwh) for anchor in anchors]) 294 | best_ious, best_n = ious.max(0) 295 | # Separate target values 296 | b, target_labels = target[:, :2].long().t() 297 | gx, gy = gxy.t() 298 | gw, gh = gwh.t() 299 | gi, gj = gxy.long().t() 300 | ####### 301 | gi[gi < 0] = 0 302 | gj[gj < 0] = 0 303 | gi[gi > nG - 1] = nG - 1 304 | gj[gj > nG - 1] = nG - 1 305 | # Set masks 306 | obj_mask[b, best_n, gj, gi] = 1 307 | noobj_mask[b, best_n, gj, gi] = 0 308 | 309 | # Set noobj mask to zero where iou exceeds ignore threshold 310 | for i, anchor_ious in enumerate(ious.t()): 311 | noobj_mask[b[i], anchor_ious > ignore_thres, gj[i], gi[i]] = 0 312 | 313 | # Coordinates 314 | tx[b, best_n, gj, gi] = gx - gx.floor() 315 | ty[b, best_n, gj, gi] = gy - gy.floor() 316 | # Width and height 317 | tw[b, best_n, gj, gi] = torch.log(gw / anchors[best_n][:, 0] + 1e-16) 318 | th[b, best_n, gj, gi] = torch.log(gh / anchors[best_n][:, 1] + 1e-16) 319 | # One-hot encoding of label 320 | tcls[b, best_n, gj, gi, target_labels] = 1 321 | # Compute label correctness and iou at best anchor 322 | class_mask[b, best_n, gj, gi] = (pred_cls[b, best_n, gj, gi].argmax(-1) == target_labels).float() 323 | iou_scores[b, best_n, gj, gi] = bbox_iou(pred_boxes[b, best_n, gj, gi], target_boxes, x1y1x2y2=False) 324 | 325 | tconf = obj_mask.float() 326 | return iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf 327 | -------------------------------------------------------------------------------- /weights/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Download weights for vanilla YOLOv3 3 | wget -c https://pjreddie.com/media/files/yolov3.weights 4 | # # Download weights for tiny YOLOv3 5 | wget -c https://pjreddie.com/media/files/yolov3-tiny.weights 6 | # Download weights for backbone network 7 | wget -c https://pjreddie.com/media/files/darknet53.conv.74 8 | --------------------------------------------------------------------------------