├── IRG.prototxt ├── IRG_Transformation.prototxt ├── LICENSE ├── README.md ├── ResNet-20.prototxt ├── batch_euclidean_map_layer.cpp ├── batch_euclidean_map_layer.cu ├── batch_euclidean_map_layer.hpp ├── batch_euclidean_vector_layer.cpp ├── batch_euclidean_vector_layer.cu ├── batch_euclidean_vector_layer.hpp └── img ├── Exp_results.png └── framework.png /IRG.prototxt: -------------------------------------------------------------------------------- 1 | name: "resnet_cifar10_IRG" 2 | layer { 3 | name: "Data1" 4 | type: "Data" 5 | top: "Data1" 6 | top: "Data2" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mean_file: "yourpath/mean_pad.binaryproto" 12 | crop_size: 32 13 | mirror:true 14 | } 15 | data_param { 16 | source: "yourpath/cifar-10-train_lmdb" 17 | batch_size: 64 18 | backend: LMDB 19 | } 20 | } 21 | layer { 22 | name: "Data1" 23 | type: "Data" 24 | top: "Data1" 25 | top: "Data2" 26 | include { 27 | phase: TEST 28 | } 29 | transform_param { 30 | mean_file: "yourpath/mean.binaryproto" 31 | } 32 | data_param { 33 | source: "yourpath/cifar10_test_lmdb" 34 | batch_size: 100 35 | backend: LMDB 36 | } 37 | } 38 | layer { 39 | name: "Convolution1" 40 | type: "Convolution" 41 | bottom: "Data1" 42 | top: "Convolution1" 43 | param { 44 | lr_mult: 0 45 | decay_mult: 0 46 | } 47 | param { 48 | lr_mult: 0 49 | decay_mult: 0 50 | } 51 | convolution_param { 52 | num_output: 16 53 | pad: 1 54 | kernel_size: 3 55 | stride: 1 56 | weight_filler { 57 | type: "msra" 58 | } 59 | bias_filler { 60 | type: "constant" 61 | value: 0 62 | } 63 | } 64 | } 65 | layer { 66 | name: "BatchNorm1" 67 | type: "BatchNorm" 68 | bottom: "Convolution1" 69 | top: "Convolution1" 70 | param { 71 | lr_mult: 0 72 | decay_mult: 0 73 | } 74 | param { 75 | lr_mult: 0 76 | decay_mult: 0 77 | } 78 | param { 79 | lr_mult: 0 80 | decay_mult: 0 81 | } 82 | batch_norm_param { 83 | use_global_stats: true 84 | } 85 | } 86 | layer { 87 | name: "Scale1" 88 | type: "Scale" 89 | bottom: "Convolution1" 90 | top: "Convolution1" 91 | param { 92 | lr_mult: 0 93 | decay_mult: 0 94 | } 95 | param { 96 | lr_mult: 0 97 | decay_mult: 0 98 | } 99 | scale_param { 100 | bias_term: true 101 | } 102 | } 103 | layer { 104 | name: "ReLU1" 105 | type: "ReLU" 106 | bottom: "Convolution1" 107 | top: "Convolution1" 108 | } 109 | layer { 110 | name: "Convolution2" 111 | type: "Convolution" 112 | bottom: "Convolution1" 113 | top: "Convolution2" 114 | param { 115 | lr_mult: 0 116 | decay_mult: 0 117 | } 118 | param { 119 | lr_mult: 0 120 | decay_mult: 0 121 | } 122 | convolution_param { 123 | num_output: 16 124 | pad: 1 125 | kernel_size: 3 126 | stride: 1 127 | weight_filler { 128 | type: "msra" 129 | } 130 | bias_filler { 131 | type: "constant" 132 | value: 0 133 | } 134 | } 135 | } 136 | layer { 137 | name: "BatchNorm2" 138 | type: "BatchNorm" 139 | bottom: "Convolution2" 140 | top: "Convolution2" 141 | param { 142 | lr_mult: 0 143 | decay_mult: 0 144 | } 145 | param { 146 | lr_mult: 0 147 | decay_mult: 0 148 | } 149 | param { 150 | lr_mult: 0 151 | decay_mult: 0 152 | } 153 | batch_norm_param { 154 | use_global_stats: true 155 | } 156 | } 157 | layer { 158 | name: "Scale2" 159 | type: "Scale" 160 | bottom: "Convolution2" 161 | top: "Convolution2" 162 | param { 163 | lr_mult: 0 164 | decay_mult: 0 165 | } 166 | param { 167 | lr_mult: 0 168 | decay_mult: 0 169 | } 170 | scale_param { 171 | bias_term: true 172 | } 173 | } 174 | layer { 175 | name: "ReLU2" 176 | type: "ReLU" 177 | bottom: "Convolution2" 178 | top: "Convolution2" 179 | } 180 | layer { 181 | name: "Convolution3" 182 | type: "Convolution" 183 | bottom: "Convolution2" 184 | top: "Convolution3" 185 | param { 186 | lr_mult: 0 187 | decay_mult: 0 188 | } 189 | param { 190 | lr_mult: 0 191 | decay_mult: 0 192 | } 193 | convolution_param { 194 | num_output: 16 195 | pad: 1 196 | kernel_size: 3 197 | stride: 1 198 | weight_filler { 199 | type: "msra" 200 | } 201 | bias_filler { 202 | type: "constant" 203 | value: 0 204 | } 205 | } 206 | } 207 | layer { 208 | name: "BatchNorm3" 209 | type: "BatchNorm" 210 | bottom: "Convolution3" 211 | top: "Convolution3" 212 | param { 213 | lr_mult: 0 214 | decay_mult: 0 215 | } 216 | param { 217 | lr_mult: 0 218 | decay_mult: 0 219 | } 220 | param { 221 | lr_mult: 0 222 | decay_mult: 0 223 | } 224 | batch_norm_param { 225 | use_global_stats: true 226 | } 227 | } 228 | layer { 229 | name: "Scale3" 230 | type: "Scale" 231 | bottom: "Convolution3" 232 | top: "Convolution3" 233 | param { 234 | lr_mult: 0 235 | decay_mult: 0 236 | } 237 | param { 238 | lr_mult: 0 239 | decay_mult: 0 240 | } 241 | scale_param { 242 | bias_term: true 243 | } 244 | } 245 | layer { 246 | name: "Eltwise1" 247 | type: "Eltwise" 248 | bottom: "Convolution1" 249 | bottom: "Convolution3" 250 | top: "Eltwise1" 251 | eltwise_param { 252 | operation: SUM 253 | } 254 | } 255 | layer { 256 | name: "ReLU3" 257 | type: "ReLU" 258 | bottom: "Eltwise1" 259 | top: "Eltwise1" 260 | } 261 | layer { 262 | name: "Convolution4" 263 | type: "Convolution" 264 | bottom: "Eltwise1" 265 | top: "Convolution4" 266 | param { 267 | lr_mult: 0 268 | decay_mult: 0 269 | } 270 | param { 271 | lr_mult: 0 272 | decay_mult: 0 273 | } 274 | convolution_param { 275 | num_output: 16 276 | pad: 1 277 | kernel_size: 3 278 | stride: 1 279 | weight_filler { 280 | type: "msra" 281 | } 282 | bias_filler { 283 | type: "constant" 284 | value: 0 285 | } 286 | } 287 | } 288 | layer { 289 | name: "BatchNorm4" 290 | type: "BatchNorm" 291 | bottom: "Convolution4" 292 | top: "Convolution4" 293 | param { 294 | lr_mult: 0 295 | decay_mult: 0 296 | } 297 | param { 298 | lr_mult: 0 299 | decay_mult: 0 300 | } 301 | param { 302 | lr_mult: 0 303 | decay_mult: 0 304 | } 305 | batch_norm_param { 306 | use_global_stats: true 307 | } 308 | } 309 | layer { 310 | name: "Scale4" 311 | type: "Scale" 312 | bottom: "Convolution4" 313 | top: "Convolution4" 314 | param { 315 | lr_mult: 0 316 | decay_mult: 0 317 | } 318 | param { 319 | lr_mult: 0 320 | decay_mult: 0 321 | } 322 | scale_param { 323 | bias_term: true 324 | } 325 | } 326 | layer { 327 | name: "ReLU4" 328 | type: "ReLU" 329 | bottom: "Convolution4" 330 | top: "Convolution4" 331 | } 332 | layer { 333 | name: "Convolution5" 334 | type: "Convolution" 335 | bottom: "Convolution4" 336 | top: "Convolution5" 337 | param { 338 | lr_mult: 0 339 | decay_mult: 0 340 | } 341 | param { 342 | lr_mult: 0 343 | decay_mult: 0 344 | } 345 | convolution_param { 346 | num_output: 16 347 | pad: 1 348 | kernel_size: 3 349 | stride: 1 350 | weight_filler { 351 | type: "msra" 352 | } 353 | bias_filler { 354 | type: "constant" 355 | value: 0 356 | } 357 | } 358 | } 359 | layer { 360 | name: "BatchNorm5" 361 | type: "BatchNorm" 362 | bottom: "Convolution5" 363 | top: "Convolution5" 364 | param { 365 | lr_mult: 0 366 | decay_mult: 0 367 | } 368 | param { 369 | lr_mult: 0 370 | decay_mult: 0 371 | } 372 | param { 373 | lr_mult: 0 374 | decay_mult: 0 375 | } 376 | batch_norm_param { 377 | use_global_stats: true 378 | } 379 | } 380 | layer { 381 | name: "Scale5" 382 | type: "Scale" 383 | bottom: "Convolution5" 384 | top: "Convolution5" 385 | param { 386 | lr_mult: 0 387 | decay_mult: 0 388 | } 389 | param { 390 | lr_mult: 0 391 | decay_mult: 0 392 | } 393 | scale_param { 394 | bias_term: true 395 | } 396 | } 397 | layer { 398 | name: "Eltwise2" 399 | type: "Eltwise" 400 | bottom: "Eltwise1" 401 | bottom: "Convolution5" 402 | top: "Eltwise2" 403 | eltwise_param { 404 | operation: SUM 405 | } 406 | } 407 | layer { 408 | name: "ReLU5" 409 | type: "ReLU" 410 | bottom: "Eltwise2" 411 | top: "Eltwise2" 412 | } 413 | layer { 414 | name: "Convolution6" 415 | type: "Convolution" 416 | bottom: "Eltwise2" 417 | top: "Convolution6" 418 | param { 419 | lr_mult: 0 420 | decay_mult: 0 421 | } 422 | param { 423 | lr_mult: 0 424 | decay_mult: 0 425 | } 426 | convolution_param { 427 | num_output: 16 428 | pad: 1 429 | kernel_size: 3 430 | stride: 1 431 | weight_filler { 432 | type: "msra" 433 | } 434 | bias_filler { 435 | type: "constant" 436 | value: 0 437 | } 438 | } 439 | } 440 | layer { 441 | name: "BatchNorm6" 442 | type: "BatchNorm" 443 | bottom: "Convolution6" 444 | top: "Convolution6" 445 | param { 446 | lr_mult: 0 447 | decay_mult: 0 448 | } 449 | param { 450 | lr_mult: 0 451 | decay_mult: 0 452 | } 453 | param { 454 | lr_mult: 0 455 | decay_mult: 0 456 | } 457 | batch_norm_param { 458 | use_global_stats: true 459 | } 460 | } 461 | layer { 462 | name: "Scale6" 463 | type: "Scale" 464 | bottom: "Convolution6" 465 | top: "Convolution6" 466 | param { 467 | lr_mult: 0 468 | decay_mult: 0 469 | } 470 | param { 471 | lr_mult: 0 472 | decay_mult: 0 473 | } 474 | scale_param { 475 | bias_term: true 476 | } 477 | } 478 | layer { 479 | name: "ReLU6" 480 | type: "ReLU" 481 | bottom: "Convolution6" 482 | top: "Convolution6" 483 | } 484 | layer { 485 | name: "Convolution7" 486 | type: "Convolution" 487 | bottom: "Convolution6" 488 | top: "Convolution7" 489 | param { 490 | lr_mult: 0 491 | decay_mult: 0 492 | } 493 | param { 494 | lr_mult: 0 495 | decay_mult: 0 496 | } 497 | convolution_param { 498 | num_output: 16 499 | pad: 1 500 | kernel_size: 3 501 | stride: 1 502 | weight_filler { 503 | type: "msra" 504 | } 505 | bias_filler { 506 | type: "constant" 507 | value: 0 508 | } 509 | } 510 | } 511 | layer { 512 | name: "BatchNorm7" 513 | type: "BatchNorm" 514 | bottom: "Convolution7" 515 | top: "Convolution7" 516 | param { 517 | lr_mult: 0 518 | decay_mult: 0 519 | } 520 | param { 521 | lr_mult: 0 522 | decay_mult: 0 523 | } 524 | param { 525 | lr_mult: 0 526 | decay_mult: 0 527 | } 528 | batch_norm_param { 529 | use_global_stats: true 530 | } 531 | } 532 | layer { 533 | name: "Scale7" 534 | type: "Scale" 535 | bottom: "Convolution7" 536 | top: "Convolution7" 537 | param { 538 | lr_mult: 0 539 | decay_mult: 0 540 | } 541 | param { 542 | lr_mult: 0 543 | decay_mult: 0 544 | } 545 | scale_param { 546 | bias_term: true 547 | } 548 | } 549 | layer { 550 | name: "Eltwise3" 551 | type: "Eltwise" 552 | bottom: "Eltwise2" 553 | bottom: "Convolution7" 554 | top: "Eltwise3" 555 | eltwise_param { 556 | operation: SUM 557 | } 558 | } 559 | layer { 560 | name: "ReLU7" 561 | type: "ReLU" 562 | bottom: "Eltwise3" 563 | top: "Eltwise3" 564 | } 565 | layer { 566 | name: "Convolution8" 567 | type: "Convolution" 568 | bottom: "Eltwise3" 569 | top: "Convolution8" 570 | param { 571 | lr_mult: 0 572 | decay_mult: 0 573 | } 574 | param { 575 | lr_mult: 0 576 | decay_mult: 0 577 | } 578 | convolution_param { 579 | num_output: 32 580 | pad: 0 581 | kernel_size: 1 582 | stride: 2 583 | weight_filler { 584 | type: "msra" 585 | } 586 | bias_filler { 587 | type: "constant" 588 | value: 0 589 | } 590 | } 591 | } 592 | layer { 593 | name: "BatchNorm8" 594 | type: "BatchNorm" 595 | bottom: "Convolution8" 596 | top: "Convolution8" 597 | param { 598 | lr_mult: 0 599 | decay_mult: 0 600 | } 601 | param { 602 | lr_mult: 0 603 | decay_mult: 0 604 | } 605 | param { 606 | lr_mult: 0 607 | decay_mult: 0 608 | } 609 | batch_norm_param { 610 | use_global_stats: true 611 | } 612 | } 613 | layer { 614 | name: "Scale8" 615 | type: "Scale" 616 | bottom: "Convolution8" 617 | top: "Convolution8" 618 | param { 619 | lr_mult: 0 620 | decay_mult: 0 621 | } 622 | param { 623 | lr_mult: 0 624 | decay_mult: 0 625 | } 626 | scale_param { 627 | bias_term: true 628 | } 629 | } 630 | layer { 631 | name: "Convolution9" 632 | type: "Convolution" 633 | bottom: "Eltwise3" 634 | top: "Convolution9" 635 | param { 636 | lr_mult: 0 637 | decay_mult: 0 638 | } 639 | param { 640 | lr_mult: 0 641 | decay_mult: 0 642 | } 643 | convolution_param { 644 | num_output: 32 645 | pad: 1 646 | kernel_size: 3 647 | stride: 2 648 | weight_filler { 649 | type: "msra" 650 | } 651 | bias_filler { 652 | type: "constant" 653 | value: 0 654 | } 655 | } 656 | } 657 | layer { 658 | name: "BatchNorm9" 659 | type: "BatchNorm" 660 | bottom: "Convolution9" 661 | top: "Convolution9" 662 | param { 663 | lr_mult: 0 664 | decay_mult: 0 665 | } 666 | param { 667 | lr_mult: 0 668 | decay_mult: 0 669 | } 670 | param { 671 | lr_mult: 0 672 | decay_mult: 0 673 | } 674 | batch_norm_param { 675 | use_global_stats: true 676 | } 677 | } 678 | layer { 679 | name: "Scale9" 680 | type: "Scale" 681 | bottom: "Convolution9" 682 | top: "Convolution9" 683 | param { 684 | lr_mult: 0 685 | decay_mult: 0 686 | } 687 | param { 688 | lr_mult: 0 689 | decay_mult: 0 690 | } 691 | scale_param { 692 | bias_term: true 693 | } 694 | } 695 | layer { 696 | name: "ReLU8" 697 | type: "ReLU" 698 | bottom: "Convolution9" 699 | top: "Convolution9" 700 | } 701 | layer { 702 | name: "Convolution10" 703 | type: "Convolution" 704 | bottom: "Convolution9" 705 | top: "Convolution10" 706 | param { 707 | lr_mult: 0 708 | decay_mult: 0 709 | } 710 | param { 711 | lr_mult: 0 712 | decay_mult: 0 713 | } 714 | convolution_param { 715 | num_output: 32 716 | pad: 1 717 | kernel_size: 3 718 | stride: 1 719 | weight_filler { 720 | type: "msra" 721 | } 722 | bias_filler { 723 | type: "constant" 724 | value: 0 725 | } 726 | } 727 | } 728 | layer { 729 | name: "BatchNorm10" 730 | type: "BatchNorm" 731 | bottom: "Convolution10" 732 | top: "Convolution10" 733 | param { 734 | lr_mult: 0 735 | decay_mult: 0 736 | } 737 | param { 738 | lr_mult: 0 739 | decay_mult: 0 740 | } 741 | param { 742 | lr_mult: 0 743 | decay_mult: 0 744 | } 745 | batch_norm_param { 746 | use_global_stats: true 747 | } 748 | } 749 | layer { 750 | name: "Scale10" 751 | type: "Scale" 752 | bottom: "Convolution10" 753 | top: "Convolution10" 754 | param { 755 | lr_mult: 0 756 | decay_mult: 0 757 | } 758 | param { 759 | lr_mult: 0 760 | decay_mult: 0 761 | } 762 | scale_param { 763 | bias_term: true 764 | } 765 | } 766 | layer { 767 | name: "Eltwise4" 768 | type: "Eltwise" 769 | bottom: "Convolution8" 770 | bottom: "Convolution10" 771 | top: "Eltwise4" 772 | eltwise_param { 773 | operation: SUM 774 | } 775 | } 776 | layer { 777 | name: "ReLU9" 778 | type: "ReLU" 779 | bottom: "Eltwise4" 780 | top: "Eltwise4" 781 | } 782 | layer { 783 | name: "Convolution11" 784 | type: "Convolution" 785 | bottom: "Eltwise4" 786 | top: "Convolution11" 787 | param { 788 | lr_mult: 0 789 | decay_mult: 0 790 | } 791 | param { 792 | lr_mult: 0 793 | decay_mult: 0 794 | } 795 | convolution_param { 796 | num_output: 32 797 | pad: 1 798 | kernel_size: 3 799 | stride: 1 800 | weight_filler { 801 | type: "msra" 802 | } 803 | bias_filler { 804 | type: "constant" 805 | value: 0 806 | } 807 | } 808 | } 809 | layer { 810 | name: "BatchNorm11" 811 | type: "BatchNorm" 812 | bottom: "Convolution11" 813 | top: "Convolution11" 814 | param { 815 | lr_mult: 0 816 | decay_mult: 0 817 | } 818 | param { 819 | lr_mult: 0 820 | decay_mult: 0 821 | } 822 | param { 823 | lr_mult: 0 824 | decay_mult: 0 825 | } 826 | batch_norm_param { 827 | use_global_stats: true 828 | } 829 | } 830 | layer { 831 | name: "Scale11" 832 | type: "Scale" 833 | bottom: "Convolution11" 834 | top: "Convolution11" 835 | param { 836 | lr_mult: 0 837 | decay_mult: 0 838 | } 839 | param { 840 | lr_mult: 0 841 | decay_mult: 0 842 | } 843 | scale_param { 844 | bias_term: true 845 | } 846 | } 847 | layer { 848 | name: "ReLU10" 849 | type: "ReLU" 850 | bottom: "Convolution11" 851 | top: "Convolution11" 852 | } 853 | layer { 854 | name: "Convolution12" 855 | type: "Convolution" 856 | bottom: "Convolution11" 857 | top: "Convolution12" 858 | param { 859 | lr_mult: 0 860 | decay_mult: 0 861 | } 862 | param { 863 | lr_mult: 0 864 | decay_mult: 0 865 | } 866 | convolution_param { 867 | num_output: 32 868 | pad: 1 869 | kernel_size: 3 870 | stride: 1 871 | weight_filler { 872 | type: "msra" 873 | } 874 | bias_filler { 875 | type: "constant" 876 | value: 0 877 | } 878 | } 879 | } 880 | layer { 881 | name: "BatchNorm12" 882 | type: "BatchNorm" 883 | bottom: "Convolution12" 884 | top: "Convolution12" 885 | param { 886 | lr_mult: 0 887 | decay_mult: 0 888 | } 889 | param { 890 | lr_mult: 0 891 | decay_mult: 0 892 | } 893 | param { 894 | lr_mult: 0 895 | decay_mult: 0 896 | } 897 | batch_norm_param { 898 | use_global_stats: true 899 | } 900 | } 901 | layer { 902 | name: "Scale12" 903 | type: "Scale" 904 | bottom: "Convolution12" 905 | top: "Convolution12" 906 | param { 907 | lr_mult: 0 908 | decay_mult: 0 909 | } 910 | param { 911 | lr_mult: 0 912 | decay_mult: 0 913 | } 914 | scale_param { 915 | bias_term: true 916 | } 917 | } 918 | layer { 919 | name: "Eltwise5" 920 | type: "Eltwise" 921 | bottom: "Eltwise4" 922 | bottom: "Convolution12" 923 | top: "Eltwise5" 924 | eltwise_param { 925 | operation: SUM 926 | } 927 | } 928 | layer { 929 | name: "ReLU11" 930 | type: "ReLU" 931 | bottom: "Eltwise5" 932 | top: "Eltwise5" 933 | } 934 | layer { 935 | name: "Convolution13" 936 | type: "Convolution" 937 | bottom: "Eltwise5" 938 | top: "Convolution13" 939 | param { 940 | lr_mult: 0 941 | decay_mult: 0 942 | } 943 | param { 944 | lr_mult: 0 945 | decay_mult: 0 946 | } 947 | convolution_param { 948 | num_output: 32 949 | pad: 1 950 | kernel_size: 3 951 | stride: 1 952 | weight_filler { 953 | type: "msra" 954 | } 955 | bias_filler { 956 | type: "constant" 957 | value: 0 958 | } 959 | } 960 | } 961 | layer { 962 | name: "BatchNorm13" 963 | type: "BatchNorm" 964 | bottom: "Convolution13" 965 | top: "Convolution13" 966 | param { 967 | lr_mult: 0 968 | decay_mult: 0 969 | } 970 | param { 971 | lr_mult: 0 972 | decay_mult: 0 973 | } 974 | param { 975 | lr_mult: 0 976 | decay_mult: 0 977 | } 978 | batch_norm_param { 979 | use_global_stats: true 980 | } 981 | } 982 | layer { 983 | name: "Scale13" 984 | type: "Scale" 985 | bottom: "Convolution13" 986 | top: "Convolution13" 987 | param { 988 | lr_mult: 0 989 | decay_mult: 0 990 | } 991 | param { 992 | lr_mult: 0 993 | decay_mult: 0 994 | } 995 | scale_param { 996 | bias_term: true 997 | } 998 | } 999 | layer { 1000 | name: "ReLU12" 1001 | type: "ReLU" 1002 | bottom: "Convolution13" 1003 | top: "Convolution13" 1004 | } 1005 | layer { 1006 | name: "Convolution14" 1007 | type: "Convolution" 1008 | bottom: "Convolution13" 1009 | top: "Convolution14" 1010 | param { 1011 | lr_mult: 0 1012 | decay_mult: 0 1013 | } 1014 | param { 1015 | lr_mult: 0 1016 | decay_mult: 0 1017 | } 1018 | convolution_param { 1019 | num_output: 32 1020 | pad: 1 1021 | kernel_size: 3 1022 | stride: 1 1023 | weight_filler { 1024 | type: "msra" 1025 | } 1026 | bias_filler { 1027 | type: "constant" 1028 | value: 0 1029 | } 1030 | } 1031 | } 1032 | layer { 1033 | name: "BatchNorm14" 1034 | type: "BatchNorm" 1035 | bottom: "Convolution14" 1036 | top: "Convolution14" 1037 | param { 1038 | lr_mult: 0 1039 | decay_mult: 0 1040 | } 1041 | param { 1042 | lr_mult: 0 1043 | decay_mult: 0 1044 | } 1045 | param { 1046 | lr_mult: 0 1047 | decay_mult: 0 1048 | } 1049 | batch_norm_param { 1050 | use_global_stats: true 1051 | } 1052 | } 1053 | layer { 1054 | name: "Scale14" 1055 | type: "Scale" 1056 | bottom: "Convolution14" 1057 | top: "Convolution14" 1058 | param { 1059 | lr_mult: 0 1060 | decay_mult: 0 1061 | } 1062 | param { 1063 | lr_mult: 0 1064 | decay_mult: 0 1065 | } 1066 | scale_param { 1067 | bias_term: true 1068 | } 1069 | } 1070 | layer { 1071 | name: "Eltwise6" 1072 | type: "Eltwise" 1073 | bottom: "Eltwise5" 1074 | bottom: "Convolution14" 1075 | top: "Eltwise6" 1076 | eltwise_param { 1077 | operation: SUM 1078 | } 1079 | } 1080 | layer { 1081 | name: "ReLU13" 1082 | type: "ReLU" 1083 | bottom: "Eltwise6" 1084 | top: "Eltwise6" 1085 | } 1086 | layer { 1087 | name: "Convolution15" 1088 | type: "Convolution" 1089 | bottom: "Eltwise6" 1090 | top: "Convolution15" 1091 | param { 1092 | lr_mult: 0 1093 | decay_mult: 0 1094 | } 1095 | param { 1096 | lr_mult: 0 1097 | decay_mult: 0 1098 | } 1099 | convolution_param { 1100 | num_output: 64 1101 | pad: 0 1102 | kernel_size: 1 1103 | stride: 2 1104 | weight_filler { 1105 | type: "msra" 1106 | } 1107 | bias_filler { 1108 | type: "constant" 1109 | value: 0 1110 | } 1111 | } 1112 | } 1113 | layer { 1114 | name: "BatchNorm15" 1115 | type: "BatchNorm" 1116 | bottom: "Convolution15" 1117 | top: "Convolution15" 1118 | param { 1119 | lr_mult: 0 1120 | decay_mult: 0 1121 | } 1122 | param { 1123 | lr_mult: 0 1124 | decay_mult: 0 1125 | } 1126 | param { 1127 | lr_mult: 0 1128 | decay_mult: 0 1129 | } 1130 | batch_norm_param { 1131 | use_global_stats: true 1132 | } 1133 | } 1134 | layer { 1135 | name: "Scale15" 1136 | type: "Scale" 1137 | bottom: "Convolution15" 1138 | top: "Convolution15" 1139 | param { 1140 | lr_mult: 0 1141 | decay_mult: 0 1142 | } 1143 | param { 1144 | lr_mult: 0 1145 | decay_mult: 0 1146 | } 1147 | scale_param { 1148 | bias_term: true 1149 | } 1150 | } 1151 | layer { 1152 | name: "Convolution16" 1153 | type: "Convolution" 1154 | bottom: "Eltwise6" 1155 | top: "Convolution16" 1156 | param { 1157 | lr_mult: 0 1158 | decay_mult: 0 1159 | } 1160 | param { 1161 | lr_mult: 0 1162 | decay_mult: 0 1163 | } 1164 | convolution_param { 1165 | num_output: 64 1166 | pad: 1 1167 | kernel_size: 3 1168 | stride: 2 1169 | weight_filler { 1170 | type: "msra" 1171 | } 1172 | bias_filler { 1173 | type: "constant" 1174 | value: 0 1175 | } 1176 | } 1177 | } 1178 | layer { 1179 | name: "BatchNorm16" 1180 | type: "BatchNorm" 1181 | bottom: "Convolution16" 1182 | top: "Convolution16" 1183 | param { 1184 | lr_mult: 0 1185 | decay_mult: 0 1186 | } 1187 | param { 1188 | lr_mult: 0 1189 | decay_mult: 0 1190 | } 1191 | param { 1192 | lr_mult: 0 1193 | decay_mult: 0 1194 | } 1195 | batch_norm_param { 1196 | use_global_stats: true 1197 | } 1198 | } 1199 | layer { 1200 | name: "Scale16" 1201 | type: "Scale" 1202 | bottom: "Convolution16" 1203 | top: "Convolution16" 1204 | param { 1205 | lr_mult: 0 1206 | decay_mult: 0 1207 | } 1208 | param { 1209 | lr_mult: 0 1210 | decay_mult: 0 1211 | } 1212 | scale_param { 1213 | bias_term: true 1214 | } 1215 | } 1216 | layer { 1217 | name: "ReLU14" 1218 | type: "ReLU" 1219 | bottom: "Convolution16" 1220 | top: "Convolution16" 1221 | } 1222 | layer { 1223 | name: "Convolution17" 1224 | type: "Convolution" 1225 | bottom: "Convolution16" 1226 | top: "Convolution17" 1227 | param { 1228 | lr_mult: 0 1229 | decay_mult: 0 1230 | } 1231 | param { 1232 | lr_mult: 0 1233 | decay_mult: 0 1234 | } 1235 | convolution_param { 1236 | num_output: 64 1237 | pad: 1 1238 | kernel_size: 3 1239 | stride: 1 1240 | weight_filler { 1241 | type: "msra" 1242 | } 1243 | bias_filler { 1244 | type: "constant" 1245 | value: 0 1246 | } 1247 | } 1248 | } 1249 | layer { 1250 | name: "BatchNorm17" 1251 | type: "BatchNorm" 1252 | bottom: "Convolution17" 1253 | top: "Convolution17" 1254 | param { 1255 | lr_mult: 0 1256 | decay_mult: 0 1257 | } 1258 | param { 1259 | lr_mult: 0 1260 | decay_mult: 0 1261 | } 1262 | param { 1263 | lr_mult: 0 1264 | decay_mult: 0 1265 | } 1266 | batch_norm_param { 1267 | use_global_stats: true 1268 | } 1269 | } 1270 | layer { 1271 | name: "Scale17" 1272 | type: "Scale" 1273 | bottom: "Convolution17" 1274 | top: "Convolution17" 1275 | param { 1276 | lr_mult: 0 1277 | decay_mult: 0 1278 | } 1279 | param { 1280 | lr_mult: 0 1281 | decay_mult: 0 1282 | } 1283 | scale_param { 1284 | bias_term: true 1285 | } 1286 | } 1287 | layer { 1288 | name: "Eltwise7" 1289 | type: "Eltwise" 1290 | bottom: "Convolution15" 1291 | bottom: "Convolution17" 1292 | top: "Eltwise7" 1293 | eltwise_param { 1294 | operation: SUM 1295 | } 1296 | } 1297 | layer { 1298 | name: "ReLU15" 1299 | type: "ReLU" 1300 | bottom: "Eltwise7" 1301 | top: "Eltwise7" 1302 | } 1303 | layer { 1304 | name: "Convolution18" 1305 | type: "Convolution" 1306 | bottom: "Eltwise7" 1307 | top: "Convolution18" 1308 | param { 1309 | lr_mult: 0 1310 | decay_mult: 0 1311 | } 1312 | param { 1313 | lr_mult: 0 1314 | decay_mult: 0 1315 | } 1316 | convolution_param { 1317 | num_output: 64 1318 | pad: 1 1319 | kernel_size: 3 1320 | stride: 1 1321 | weight_filler { 1322 | type: "msra" 1323 | } 1324 | bias_filler { 1325 | type: "constant" 1326 | value: 0 1327 | } 1328 | } 1329 | } 1330 | layer { 1331 | name: "BatchNorm18" 1332 | type: "BatchNorm" 1333 | bottom: "Convolution18" 1334 | top: "Convolution18" 1335 | param { 1336 | lr_mult: 0 1337 | decay_mult: 0 1338 | } 1339 | param { 1340 | lr_mult: 0 1341 | decay_mult: 0 1342 | } 1343 | param { 1344 | lr_mult: 0 1345 | decay_mult: 0 1346 | } 1347 | batch_norm_param { 1348 | use_global_stats: true 1349 | } 1350 | } 1351 | layer { 1352 | name: "Scale18" 1353 | type: "Scale" 1354 | bottom: "Convolution18" 1355 | top: "Convolution18" 1356 | param { 1357 | lr_mult: 0 1358 | decay_mult: 0 1359 | } 1360 | param { 1361 | lr_mult: 0 1362 | decay_mult: 0 1363 | } 1364 | scale_param { 1365 | bias_term: true 1366 | } 1367 | } 1368 | layer { 1369 | name: "ReLU16" 1370 | type: "ReLU" 1371 | bottom: "Convolution18" 1372 | top: "Convolution18" 1373 | } 1374 | layer { 1375 | name: "Convolution19" 1376 | type: "Convolution" 1377 | bottom: "Convolution18" 1378 | top: "Convolution19" 1379 | param { 1380 | lr_mult: 0 1381 | decay_mult: 0 1382 | } 1383 | param { 1384 | lr_mult: 0 1385 | decay_mult: 0 1386 | } 1387 | convolution_param { 1388 | num_output: 64 1389 | pad: 1 1390 | kernel_size: 3 1391 | stride: 1 1392 | weight_filler { 1393 | type: "msra" 1394 | } 1395 | bias_filler { 1396 | type: "constant" 1397 | value: 0 1398 | } 1399 | } 1400 | } 1401 | layer { 1402 | name: "BatchNorm19" 1403 | type: "BatchNorm" 1404 | bottom: "Convolution19" 1405 | top: "Convolution19" 1406 | param { 1407 | lr_mult: 0 1408 | decay_mult: 0 1409 | } 1410 | param { 1411 | lr_mult: 0 1412 | decay_mult: 0 1413 | } 1414 | param { 1415 | lr_mult: 0 1416 | decay_mult: 0 1417 | } 1418 | batch_norm_param { 1419 | use_global_stats: true 1420 | } 1421 | } 1422 | layer { 1423 | name: "Scale19" 1424 | type: "Scale" 1425 | bottom: "Convolution19" 1426 | top: "Convolution19" 1427 | param { 1428 | lr_mult: 0 1429 | decay_mult: 0 1430 | } 1431 | param { 1432 | lr_mult: 0 1433 | decay_mult: 0 1434 | } 1435 | scale_param { 1436 | bias_term: true 1437 | } 1438 | } 1439 | layer { 1440 | name: "Eltwise8" 1441 | type: "Eltwise" 1442 | bottom: "Eltwise7" 1443 | bottom: "Convolution19" 1444 | top: "Eltwise8" 1445 | eltwise_param { 1446 | operation: SUM 1447 | } 1448 | } 1449 | layer { 1450 | name: "ReLU17" 1451 | type: "ReLU" 1452 | bottom: "Eltwise8" 1453 | top: "Eltwise8" 1454 | } 1455 | layer { 1456 | name: "Convolution20" 1457 | type: "Convolution" 1458 | bottom: "Eltwise8" 1459 | top: "Convolution20" 1460 | param { 1461 | lr_mult: 0 1462 | decay_mult: 0 1463 | } 1464 | param { 1465 | lr_mult: 0 1466 | decay_mult: 0 1467 | } 1468 | convolution_param { 1469 | num_output: 64 1470 | pad: 1 1471 | kernel_size: 3 1472 | stride: 1 1473 | weight_filler { 1474 | type: "msra" 1475 | } 1476 | bias_filler { 1477 | type: "constant" 1478 | value: 0 1479 | } 1480 | } 1481 | } 1482 | layer { 1483 | name: "BatchNorm20" 1484 | type: "BatchNorm" 1485 | bottom: "Convolution20" 1486 | top: "Convolution20" 1487 | param { 1488 | lr_mult: 0 1489 | decay_mult: 0 1490 | } 1491 | param { 1492 | lr_mult: 0 1493 | decay_mult: 0 1494 | } 1495 | param { 1496 | lr_mult: 0 1497 | decay_mult: 0 1498 | } 1499 | batch_norm_param { 1500 | use_global_stats: true 1501 | } 1502 | } 1503 | layer { 1504 | name: "Scale20" 1505 | type: "Scale" 1506 | bottom: "Convolution20" 1507 | top: "Convolution20" 1508 | param { 1509 | lr_mult: 0 1510 | decay_mult: 0 1511 | } 1512 | param { 1513 | lr_mult: 0 1514 | decay_mult: 0 1515 | } 1516 | scale_param { 1517 | bias_term: true 1518 | } 1519 | } 1520 | layer { 1521 | name: "ReLU18" 1522 | type: "ReLU" 1523 | bottom: "Convolution20" 1524 | top: "Convolution20" 1525 | } 1526 | layer { 1527 | name: "Convolution21" 1528 | type: "Convolution" 1529 | bottom: "Convolution20" 1530 | top: "Convolution21" 1531 | param { 1532 | lr_mult: 0 1533 | decay_mult: 0 1534 | } 1535 | param { 1536 | lr_mult: 0 1537 | decay_mult: 0 1538 | } 1539 | convolution_param { 1540 | num_output: 64 1541 | pad: 1 1542 | kernel_size: 3 1543 | stride: 1 1544 | weight_filler { 1545 | type: "msra" 1546 | } 1547 | bias_filler { 1548 | type: "constant" 1549 | value: 0 1550 | } 1551 | } 1552 | } 1553 | layer { 1554 | name: "BatchNorm21" 1555 | type: "BatchNorm" 1556 | bottom: "Convolution21" 1557 | top: "Convolution21" 1558 | param { 1559 | lr_mult: 0 1560 | decay_mult: 0 1561 | } 1562 | param { 1563 | lr_mult: 0 1564 | decay_mult: 0 1565 | } 1566 | param { 1567 | lr_mult: 0 1568 | decay_mult: 0 1569 | } 1570 | batch_norm_param { 1571 | use_global_stats: true 1572 | } 1573 | } 1574 | layer { 1575 | name: "Scale21" 1576 | type: "Scale" 1577 | bottom: "Convolution21" 1578 | top: "Convolution21" 1579 | param { 1580 | lr_mult: 0 1581 | decay_mult: 0 1582 | } 1583 | param { 1584 | lr_mult: 0 1585 | decay_mult: 0 1586 | } 1587 | scale_param { 1588 | bias_term: true 1589 | } 1590 | } 1591 | layer { 1592 | name: "Eltwise9" 1593 | type: "Eltwise" 1594 | bottom: "Eltwise8" 1595 | bottom: "Convolution21" 1596 | top: "Eltwise9" 1597 | eltwise_param { 1598 | operation: SUM 1599 | } 1600 | } 1601 | layer { 1602 | name: "ReLU19" 1603 | type: "ReLU" 1604 | bottom: "Eltwise9" 1605 | top: "Eltwise9" 1606 | } 1607 | layer { 1608 | name: "Pooling1" 1609 | type: "Pooling" 1610 | bottom: "Eltwise9" 1611 | top: "Pooling1" 1612 | pooling_param { 1613 | pool: AVE 1614 | global_pooling: true 1615 | } 1616 | } 1617 | layer { 1618 | name: "bem" 1619 | type: "BatchEuclideanMap" 1620 | bottom: "Pooling1" 1621 | top: "bem" 1622 | } 1623 | layer { 1624 | name: "InnerProduct1" 1625 | type: "InnerProduct" 1626 | bottom: "Pooling1" 1627 | top: "InnerProduct1" 1628 | param { 1629 | lr_mult: 0 1630 | decay_mult: 0 1631 | } 1632 | param { 1633 | lr_mult: 0 1634 | decay_mult: 0 1635 | } 1636 | inner_product_param { 1637 | num_output: 10 1638 | weight_filler { 1639 | type: "msra" 1640 | } 1641 | bias_filler { 1642 | type: "constant" 1643 | value: 0 1644 | } 1645 | } 1646 | } 1647 | layer { 1648 | name: "SoftmaxWithLoss1" 1649 | type: "SoftmaxWithLoss" 1650 | bottom: "InnerProduct1" 1651 | bottom: "Data2" 1652 | top: "SoftmaxWithLoss1" 1653 | loss_weight: 0 1654 | } 1655 | layer { 1656 | name: "Accuracy1" 1657 | type: "Accuracy" 1658 | bottom: "InnerProduct1" 1659 | bottom: "Data2" 1660 | top: "Accuracy1" 1661 | include { 1662 | phase: TEST 1663 | } 1664 | } 1665 | 1666 | layer { 1667 | name: "stu_Convolution1" 1668 | type: "Convolution" 1669 | bottom: "Data1" 1670 | top: "stu_Convolution1" 1671 | param { 1672 | lr_mult: 1 1673 | decay_mult: 1 1674 | } 1675 | param { 1676 | lr_mult: 2 1677 | decay_mult: 0 1678 | } 1679 | convolution_param { 1680 | num_output: 8 1681 | pad: 1 1682 | kernel_size: 3 1683 | stride: 1 1684 | weight_filler { 1685 | type: "msra" 1686 | } 1687 | bias_filler { 1688 | type: "constant" 1689 | value: 0 1690 | } 1691 | } 1692 | } 1693 | layer { 1694 | name: "stu_BatchNorm1" 1695 | type: "BatchNorm" 1696 | bottom: "stu_Convolution1" 1697 | top: "stu_Convolution1" 1698 | param { 1699 | lr_mult: 0 1700 | decay_mult: 0 1701 | } 1702 | param { 1703 | lr_mult: 0 1704 | decay_mult: 0 1705 | } 1706 | param { 1707 | lr_mult: 0 1708 | decay_mult: 0 1709 | } 1710 | } 1711 | layer { 1712 | name: "stu_Scale1" 1713 | type: "Scale" 1714 | bottom: "stu_Convolution1" 1715 | top: "stu_Convolution1" 1716 | scale_param { 1717 | bias_term: true 1718 | } 1719 | } 1720 | layer { 1721 | name: "stu_ReLU1" 1722 | type: "ReLU" 1723 | bottom: "stu_Convolution1" 1724 | top: "stu_Convolution1" 1725 | } 1726 | layer { 1727 | name: "stu_Convolution2" 1728 | type: "Convolution" 1729 | bottom: "stu_Convolution1" 1730 | top: "stu_Convolution2" 1731 | param { 1732 | lr_mult: 1 1733 | decay_mult: 1 1734 | } 1735 | param { 1736 | lr_mult: 2 1737 | decay_mult: 0 1738 | } 1739 | convolution_param { 1740 | num_output: 8 1741 | pad: 1 1742 | kernel_size: 3 1743 | stride: 1 1744 | weight_filler { 1745 | type: "msra" 1746 | } 1747 | bias_filler { 1748 | type: "constant" 1749 | value: 0 1750 | } 1751 | } 1752 | } 1753 | layer { 1754 | name: "stu_BatchNorm2" 1755 | type: "BatchNorm" 1756 | bottom: "stu_Convolution2" 1757 | top: "stu_Convolution2" 1758 | param { 1759 | lr_mult: 0 1760 | decay_mult: 0 1761 | } 1762 | param { 1763 | lr_mult: 0 1764 | decay_mult: 0 1765 | } 1766 | param { 1767 | lr_mult: 0 1768 | decay_mult: 0 1769 | } 1770 | } 1771 | layer { 1772 | name: "stu_Scale2" 1773 | type: "Scale" 1774 | bottom: "stu_Convolution2" 1775 | top: "stu_Convolution2" 1776 | scale_param { 1777 | bias_term: true 1778 | } 1779 | } 1780 | layer { 1781 | name: "stu_ReLU2" 1782 | type: "ReLU" 1783 | bottom: "stu_Convolution2" 1784 | top: "stu_Convolution2" 1785 | } 1786 | layer { 1787 | name: "stu_Convolution3" 1788 | type: "Convolution" 1789 | bottom: "stu_Convolution2" 1790 | top: "stu_Convolution3" 1791 | param { 1792 | lr_mult: 1 1793 | decay_mult: 1 1794 | } 1795 | param { 1796 | lr_mult: 2 1797 | decay_mult: 0 1798 | } 1799 | convolution_param { 1800 | num_output: 8 1801 | pad: 1 1802 | kernel_size: 3 1803 | stride: 1 1804 | weight_filler { 1805 | type: "msra" 1806 | } 1807 | bias_filler { 1808 | type: "constant" 1809 | value: 0 1810 | } 1811 | } 1812 | } 1813 | layer { 1814 | name: "stu_BatchNorm3" 1815 | type: "BatchNorm" 1816 | bottom: "stu_Convolution3" 1817 | top: "stu_Convolution3" 1818 | param { 1819 | lr_mult: 0 1820 | decay_mult: 0 1821 | } 1822 | param { 1823 | lr_mult: 0 1824 | decay_mult: 0 1825 | } 1826 | param { 1827 | lr_mult: 0 1828 | decay_mult: 0 1829 | } 1830 | } 1831 | layer { 1832 | name: "stu_Scale3" 1833 | type: "Scale" 1834 | bottom: "stu_Convolution3" 1835 | top: "stu_Convolution3" 1836 | scale_param { 1837 | bias_term: true 1838 | } 1839 | } 1840 | layer { 1841 | name: "stu_Eltwise1" 1842 | type: "Eltwise" 1843 | bottom: "stu_Convolution1" 1844 | bottom: "stu_Convolution3" 1845 | top: "stu_Eltwise1" 1846 | eltwise_param { 1847 | operation: SUM 1848 | } 1849 | } 1850 | layer { 1851 | name: "stu_ReLU3" 1852 | type: "ReLU" 1853 | bottom: "stu_Eltwise1" 1854 | top: "stu_Eltwise1" 1855 | } 1856 | layer { 1857 | name: "stu_Convolution4" 1858 | type: "Convolution" 1859 | bottom: "stu_Eltwise1" 1860 | top: "stu_Convolution4" 1861 | param { 1862 | lr_mult: 1 1863 | decay_mult: 1 1864 | } 1865 | param { 1866 | lr_mult: 2 1867 | decay_mult: 0 1868 | } 1869 | convolution_param { 1870 | num_output: 8 1871 | pad: 1 1872 | kernel_size: 3 1873 | stride: 1 1874 | weight_filler { 1875 | type: "msra" 1876 | } 1877 | bias_filler { 1878 | type: "constant" 1879 | value: 0 1880 | } 1881 | } 1882 | } 1883 | layer { 1884 | name: "stu_BatchNorm4" 1885 | type: "BatchNorm" 1886 | bottom: "stu_Convolution4" 1887 | top: "stu_Convolution4" 1888 | param { 1889 | lr_mult: 0 1890 | decay_mult: 0 1891 | } 1892 | param { 1893 | lr_mult: 0 1894 | decay_mult: 0 1895 | } 1896 | param { 1897 | lr_mult: 0 1898 | decay_mult: 0 1899 | } 1900 | } 1901 | layer { 1902 | name: "stu_Scale4" 1903 | type: "Scale" 1904 | bottom: "stu_Convolution4" 1905 | top: "stu_Convolution4" 1906 | scale_param { 1907 | bias_term: true 1908 | } 1909 | } 1910 | layer { 1911 | name: "stu_ReLU4" 1912 | type: "ReLU" 1913 | bottom: "stu_Convolution4" 1914 | top: "stu_Convolution4" 1915 | } 1916 | layer { 1917 | name: "stu_Convolution5" 1918 | type: "Convolution" 1919 | bottom: "stu_Convolution4" 1920 | top: "stu_Convolution5" 1921 | param { 1922 | lr_mult: 1 1923 | decay_mult: 1 1924 | } 1925 | param { 1926 | lr_mult: 2 1927 | decay_mult: 0 1928 | } 1929 | convolution_param { 1930 | num_output: 8 1931 | pad: 1 1932 | kernel_size: 3 1933 | stride: 1 1934 | weight_filler { 1935 | type: "msra" 1936 | } 1937 | bias_filler { 1938 | type: "constant" 1939 | value: 0 1940 | } 1941 | } 1942 | } 1943 | layer { 1944 | name: "stu_BatchNorm5" 1945 | type: "BatchNorm" 1946 | bottom: "stu_Convolution5" 1947 | top: "stu_Convolution5" 1948 | param { 1949 | lr_mult: 0 1950 | decay_mult: 0 1951 | } 1952 | param { 1953 | lr_mult: 0 1954 | decay_mult: 0 1955 | } 1956 | param { 1957 | lr_mult: 0 1958 | decay_mult: 0 1959 | } 1960 | } 1961 | layer { 1962 | name: "stu_Scale5" 1963 | type: "Scale" 1964 | bottom: "stu_Convolution5" 1965 | top: "stu_Convolution5" 1966 | scale_param { 1967 | bias_term: true 1968 | } 1969 | } 1970 | layer { 1971 | name: "stu_Eltwise2" 1972 | type: "Eltwise" 1973 | bottom: "stu_Eltwise1" 1974 | bottom: "stu_Convolution5" 1975 | top: "stu_Eltwise2" 1976 | eltwise_param { 1977 | operation: SUM 1978 | } 1979 | } 1980 | layer { 1981 | name: "stu_ReLU5" 1982 | type: "ReLU" 1983 | bottom: "stu_Eltwise2" 1984 | top: "stu_Eltwise2" 1985 | } 1986 | layer { 1987 | name: "stu_Convolution6" 1988 | type: "Convolution" 1989 | bottom: "stu_Eltwise2" 1990 | top: "stu_Convolution6" 1991 | param { 1992 | lr_mult: 1 1993 | decay_mult: 1 1994 | } 1995 | param { 1996 | lr_mult: 2 1997 | decay_mult: 0 1998 | } 1999 | convolution_param { 2000 | num_output: 8 2001 | pad: 1 2002 | kernel_size: 3 2003 | stride: 1 2004 | weight_filler { 2005 | type: "msra" 2006 | } 2007 | bias_filler { 2008 | type: "constant" 2009 | value: 0 2010 | } 2011 | } 2012 | } 2013 | layer { 2014 | name: "stu_BatchNorm6" 2015 | type: "BatchNorm" 2016 | bottom: "stu_Convolution6" 2017 | top: "stu_Convolution6" 2018 | param { 2019 | lr_mult: 0 2020 | decay_mult: 0 2021 | } 2022 | param { 2023 | lr_mult: 0 2024 | decay_mult: 0 2025 | } 2026 | param { 2027 | lr_mult: 0 2028 | decay_mult: 0 2029 | } 2030 | } 2031 | layer { 2032 | name: "stu_Scale6" 2033 | type: "Scale" 2034 | bottom: "stu_Convolution6" 2035 | top: "stu_Convolution6" 2036 | scale_param { 2037 | bias_term: true 2038 | } 2039 | } 2040 | layer { 2041 | name: "stu_ReLU6" 2042 | type: "ReLU" 2043 | bottom: "stu_Convolution6" 2044 | top: "stu_Convolution6" 2045 | } 2046 | layer { 2047 | name: "stu_Convolution7" 2048 | type: "Convolution" 2049 | bottom: "stu_Convolution6" 2050 | top: "stu_Convolution7" 2051 | param { 2052 | lr_mult: 1 2053 | decay_mult: 1 2054 | } 2055 | param { 2056 | lr_mult: 2 2057 | decay_mult: 0 2058 | } 2059 | convolution_param { 2060 | num_output: 8 2061 | pad: 1 2062 | kernel_size: 3 2063 | stride: 1 2064 | weight_filler { 2065 | type: "msra" 2066 | } 2067 | bias_filler { 2068 | type: "constant" 2069 | value: 0 2070 | } 2071 | } 2072 | } 2073 | layer { 2074 | name: "stu_BatchNorm7" 2075 | type: "BatchNorm" 2076 | bottom: "stu_Convolution7" 2077 | top: "stu_Convolution7" 2078 | param { 2079 | lr_mult: 0 2080 | decay_mult: 0 2081 | } 2082 | param { 2083 | lr_mult: 0 2084 | decay_mult: 0 2085 | } 2086 | param { 2087 | lr_mult: 0 2088 | decay_mult: 0 2089 | } 2090 | } 2091 | layer { 2092 | name: "stu_Scale7" 2093 | type: "Scale" 2094 | bottom: "stu_Convolution7" 2095 | top: "stu_Convolution7" 2096 | scale_param { 2097 | bias_term: true 2098 | } 2099 | } 2100 | layer { 2101 | name: "stu_Eltwise3" 2102 | type: "Eltwise" 2103 | bottom: "stu_Eltwise2" 2104 | bottom: "stu_Convolution7" 2105 | top: "stu_Eltwise3" 2106 | eltwise_param { 2107 | operation: SUM 2108 | } 2109 | } 2110 | layer { 2111 | name: "stu_ReLU7" 2112 | type: "ReLU" 2113 | bottom: "stu_Eltwise3" 2114 | top: "stu_Eltwise3" 2115 | } 2116 | layer { 2117 | name: "stu_Convolution8" 2118 | type: "Convolution" 2119 | bottom: "stu_Eltwise3" 2120 | top: "stu_Convolution8" 2121 | param { 2122 | lr_mult: 1 2123 | decay_mult: 1 2124 | } 2125 | param { 2126 | lr_mult: 2 2127 | decay_mult: 0 2128 | } 2129 | convolution_param { 2130 | num_output: 16 2131 | pad: 0 2132 | kernel_size: 1 2133 | stride: 2 2134 | weight_filler { 2135 | type: "msra" 2136 | } 2137 | bias_filler { 2138 | type: "constant" 2139 | value: 0 2140 | } 2141 | } 2142 | } 2143 | layer { 2144 | name: "stu_BatchNorm8" 2145 | type: "BatchNorm" 2146 | bottom: "stu_Convolution8" 2147 | top: "stu_Convolution8" 2148 | param { 2149 | lr_mult: 0 2150 | decay_mult: 0 2151 | } 2152 | param { 2153 | lr_mult: 0 2154 | decay_mult: 0 2155 | } 2156 | param { 2157 | lr_mult: 0 2158 | decay_mult: 0 2159 | } 2160 | } 2161 | layer { 2162 | name: "stu_Scale8" 2163 | type: "Scale" 2164 | bottom: "stu_Convolution8" 2165 | top: "stu_Convolution8" 2166 | scale_param { 2167 | bias_term: true 2168 | } 2169 | } 2170 | layer { 2171 | name: "stu_Convolution9" 2172 | type: "Convolution" 2173 | bottom: "stu_Eltwise3" 2174 | top: "stu_Convolution9" 2175 | param { 2176 | lr_mult: 1 2177 | decay_mult: 1 2178 | } 2179 | param { 2180 | lr_mult: 2 2181 | decay_mult: 0 2182 | } 2183 | convolution_param { 2184 | num_output: 16 2185 | pad: 1 2186 | kernel_size: 3 2187 | stride: 2 2188 | weight_filler { 2189 | type: "msra" 2190 | } 2191 | bias_filler { 2192 | type: "constant" 2193 | value: 0 2194 | } 2195 | } 2196 | } 2197 | layer { 2198 | name: "stu_BatchNorm9" 2199 | type: "BatchNorm" 2200 | bottom: "stu_Convolution9" 2201 | top: "stu_Convolution9" 2202 | param { 2203 | lr_mult: 0 2204 | decay_mult: 0 2205 | } 2206 | param { 2207 | lr_mult: 0 2208 | decay_mult: 0 2209 | } 2210 | param { 2211 | lr_mult: 0 2212 | decay_mult: 0 2213 | } 2214 | } 2215 | layer { 2216 | name: "stu_Scale9" 2217 | type: "Scale" 2218 | bottom: "stu_Convolution9" 2219 | top: "stu_Convolution9" 2220 | scale_param { 2221 | bias_term: true 2222 | } 2223 | } 2224 | layer { 2225 | name: "stu_ReLU8" 2226 | type: "ReLU" 2227 | bottom: "stu_Convolution9" 2228 | top: "stu_Convolution9" 2229 | } 2230 | layer { 2231 | name: "stu_Convolution10" 2232 | type: "Convolution" 2233 | bottom: "stu_Convolution9" 2234 | top: "stu_Convolution10" 2235 | param { 2236 | lr_mult: 1 2237 | decay_mult: 1 2238 | } 2239 | param { 2240 | lr_mult: 2 2241 | decay_mult: 0 2242 | } 2243 | convolution_param { 2244 | num_output: 16 2245 | pad: 1 2246 | kernel_size: 3 2247 | stride: 1 2248 | weight_filler { 2249 | type: "msra" 2250 | } 2251 | bias_filler { 2252 | type: "constant" 2253 | value: 0 2254 | } 2255 | } 2256 | } 2257 | layer { 2258 | name: "stu_BatchNorm10" 2259 | type: "BatchNorm" 2260 | bottom: "stu_Convolution10" 2261 | top: "stu_Convolution10" 2262 | param { 2263 | lr_mult: 0 2264 | decay_mult: 0 2265 | } 2266 | param { 2267 | lr_mult: 0 2268 | decay_mult: 0 2269 | } 2270 | param { 2271 | lr_mult: 0 2272 | decay_mult: 0 2273 | } 2274 | } 2275 | layer { 2276 | name: "stu_Scale10" 2277 | type: "Scale" 2278 | bottom: "stu_Convolution10" 2279 | top: "stu_Convolution10" 2280 | scale_param { 2281 | bias_term: true 2282 | } 2283 | } 2284 | layer { 2285 | name: "stu_Eltwise4" 2286 | type: "Eltwise" 2287 | bottom: "stu_Convolution8" 2288 | bottom: "stu_Convolution10" 2289 | top: "stu_Eltwise4" 2290 | eltwise_param { 2291 | operation: SUM 2292 | } 2293 | } 2294 | layer { 2295 | name: "stu_ReLU9" 2296 | type: "ReLU" 2297 | bottom: "stu_Eltwise4" 2298 | top: "stu_Eltwise4" 2299 | } 2300 | layer { 2301 | name: "stu_Convolution11" 2302 | type: "Convolution" 2303 | bottom: "stu_Eltwise4" 2304 | top: "stu_Convolution11" 2305 | param { 2306 | lr_mult: 1 2307 | decay_mult: 1 2308 | } 2309 | param { 2310 | lr_mult: 2 2311 | decay_mult: 0 2312 | } 2313 | convolution_param { 2314 | num_output: 16 2315 | pad: 1 2316 | kernel_size: 3 2317 | stride: 1 2318 | weight_filler { 2319 | type: "msra" 2320 | } 2321 | bias_filler { 2322 | type: "constant" 2323 | value: 0 2324 | } 2325 | } 2326 | } 2327 | layer { 2328 | name: "stu_BatchNorm11" 2329 | type: "BatchNorm" 2330 | bottom: "stu_Convolution11" 2331 | top: "stu_Convolution11" 2332 | param { 2333 | lr_mult: 0 2334 | decay_mult: 0 2335 | } 2336 | param { 2337 | lr_mult: 0 2338 | decay_mult: 0 2339 | } 2340 | param { 2341 | lr_mult: 0 2342 | decay_mult: 0 2343 | } 2344 | } 2345 | layer { 2346 | name: "stu_Scale11" 2347 | type: "Scale" 2348 | bottom: "stu_Convolution11" 2349 | top: "stu_Convolution11" 2350 | scale_param { 2351 | bias_term: true 2352 | } 2353 | } 2354 | layer { 2355 | name: "stu_ReLU10" 2356 | type: "ReLU" 2357 | bottom: "stu_Convolution11" 2358 | top: "stu_Convolution11" 2359 | } 2360 | layer { 2361 | name: "stu_Convolution12" 2362 | type: "Convolution" 2363 | bottom: "stu_Convolution11" 2364 | top: "stu_Convolution12" 2365 | param { 2366 | lr_mult: 1 2367 | decay_mult: 1 2368 | } 2369 | param { 2370 | lr_mult: 2 2371 | decay_mult: 0 2372 | } 2373 | convolution_param { 2374 | num_output: 16 2375 | pad: 1 2376 | kernel_size: 3 2377 | stride: 1 2378 | weight_filler { 2379 | type: "msra" 2380 | } 2381 | bias_filler { 2382 | type: "constant" 2383 | value: 0 2384 | } 2385 | } 2386 | } 2387 | layer { 2388 | name: "stu_BatchNorm12" 2389 | type: "BatchNorm" 2390 | bottom: "stu_Convolution12" 2391 | top: "stu_Convolution12" 2392 | param { 2393 | lr_mult: 0 2394 | decay_mult: 0 2395 | } 2396 | param { 2397 | lr_mult: 0 2398 | decay_mult: 0 2399 | } 2400 | param { 2401 | lr_mult: 0 2402 | decay_mult: 0 2403 | } 2404 | } 2405 | layer { 2406 | name: "stu_Scale12" 2407 | type: "Scale" 2408 | bottom: "stu_Convolution12" 2409 | top: "stu_Convolution12" 2410 | scale_param { 2411 | bias_term: true 2412 | } 2413 | } 2414 | layer { 2415 | name: "stu_Eltwise5" 2416 | type: "Eltwise" 2417 | bottom: "stu_Eltwise4" 2418 | bottom: "stu_Convolution12" 2419 | top: "stu_Eltwise5" 2420 | eltwise_param { 2421 | operation: SUM 2422 | } 2423 | } 2424 | layer { 2425 | name: "stu_ReLU11" 2426 | type: "ReLU" 2427 | bottom: "stu_Eltwise5" 2428 | top: "stu_Eltwise5" 2429 | } 2430 | layer { 2431 | name: "stu_Convolution13" 2432 | type: "Convolution" 2433 | bottom: "stu_Eltwise5" 2434 | top: "stu_Convolution13" 2435 | param { 2436 | lr_mult: 1 2437 | decay_mult: 1 2438 | } 2439 | param { 2440 | lr_mult: 2 2441 | decay_mult: 0 2442 | } 2443 | convolution_param { 2444 | num_output: 16 2445 | pad: 1 2446 | kernel_size: 3 2447 | stride: 1 2448 | weight_filler { 2449 | type: "msra" 2450 | } 2451 | bias_filler { 2452 | type: "constant" 2453 | value: 0 2454 | } 2455 | } 2456 | } 2457 | layer { 2458 | name: "stu_BatchNorm13" 2459 | type: "BatchNorm" 2460 | bottom: "stu_Convolution13" 2461 | top: "stu_Convolution13" 2462 | param { 2463 | lr_mult: 0 2464 | decay_mult: 0 2465 | } 2466 | param { 2467 | lr_mult: 0 2468 | decay_mult: 0 2469 | } 2470 | param { 2471 | lr_mult: 0 2472 | decay_mult: 0 2473 | } 2474 | } 2475 | layer { 2476 | name: "stu_Scale13" 2477 | type: "Scale" 2478 | bottom: "stu_Convolution13" 2479 | top: "stu_Convolution13" 2480 | scale_param { 2481 | bias_term: true 2482 | } 2483 | } 2484 | layer { 2485 | name: "stu_ReLU12" 2486 | type: "ReLU" 2487 | bottom: "stu_Convolution13" 2488 | top: "stu_Convolution13" 2489 | } 2490 | layer { 2491 | name: "stu_Convolution14" 2492 | type: "Convolution" 2493 | bottom: "stu_Convolution13" 2494 | top: "stu_Convolution14" 2495 | param { 2496 | lr_mult: 1 2497 | decay_mult: 1 2498 | } 2499 | param { 2500 | lr_mult: 2 2501 | decay_mult: 0 2502 | } 2503 | convolution_param { 2504 | num_output: 16 2505 | pad: 1 2506 | kernel_size: 3 2507 | stride: 1 2508 | weight_filler { 2509 | type: "msra" 2510 | } 2511 | bias_filler { 2512 | type: "constant" 2513 | value: 0 2514 | } 2515 | } 2516 | } 2517 | layer { 2518 | name: "stu_BatchNorm14" 2519 | type: "BatchNorm" 2520 | bottom: "stu_Convolution14" 2521 | top: "stu_Convolution14" 2522 | param { 2523 | lr_mult: 0 2524 | decay_mult: 0 2525 | } 2526 | param { 2527 | lr_mult: 0 2528 | decay_mult: 0 2529 | } 2530 | param { 2531 | lr_mult: 0 2532 | decay_mult: 0 2533 | } 2534 | } 2535 | layer { 2536 | name: "stu_Scale14" 2537 | type: "Scale" 2538 | bottom: "stu_Convolution14" 2539 | top: "stu_Convolution14" 2540 | scale_param { 2541 | bias_term: true 2542 | } 2543 | } 2544 | layer { 2545 | name: "stu_Eltwise6" 2546 | type: "Eltwise" 2547 | bottom: "stu_Eltwise5" 2548 | bottom: "stu_Convolution14" 2549 | top: "stu_Eltwise6" 2550 | eltwise_param { 2551 | operation: SUM 2552 | } 2553 | } 2554 | layer { 2555 | name: "stu_ReLU13" 2556 | type: "ReLU" 2557 | bottom: "stu_Eltwise6" 2558 | top: "stu_Eltwise6" 2559 | } 2560 | layer { 2561 | name: "stu_Convolution15" 2562 | type: "Convolution" 2563 | bottom: "stu_Eltwise6" 2564 | top: "stu_Convolution15" 2565 | param { 2566 | lr_mult: 1 2567 | decay_mult: 1 2568 | } 2569 | param { 2570 | lr_mult: 2 2571 | decay_mult: 0 2572 | } 2573 | convolution_param { 2574 | num_output: 32 2575 | pad: 0 2576 | kernel_size: 1 2577 | stride: 2 2578 | weight_filler { 2579 | type: "msra" 2580 | } 2581 | bias_filler { 2582 | type: "constant" 2583 | value: 0 2584 | } 2585 | } 2586 | } 2587 | layer { 2588 | name: "stu_BatchNorm15" 2589 | type: "BatchNorm" 2590 | bottom: "stu_Convolution15" 2591 | top: "stu_Convolution15" 2592 | param { 2593 | lr_mult: 0 2594 | decay_mult: 0 2595 | } 2596 | param { 2597 | lr_mult: 0 2598 | decay_mult: 0 2599 | } 2600 | param { 2601 | lr_mult: 0 2602 | decay_mult: 0 2603 | } 2604 | } 2605 | layer { 2606 | name: "stu_Scale15" 2607 | type: "Scale" 2608 | bottom: "stu_Convolution15" 2609 | top: "stu_Convolution15" 2610 | scale_param { 2611 | bias_term: true 2612 | } 2613 | } 2614 | layer { 2615 | name: "stu_Convolution16" 2616 | type: "Convolution" 2617 | bottom: "stu_Eltwise6" 2618 | top: "stu_Convolution16" 2619 | param { 2620 | lr_mult: 1 2621 | decay_mult: 1 2622 | } 2623 | param { 2624 | lr_mult: 2 2625 | decay_mult: 0 2626 | } 2627 | convolution_param { 2628 | num_output: 32 2629 | pad: 1 2630 | kernel_size: 3 2631 | stride: 2 2632 | weight_filler { 2633 | type: "msra" 2634 | } 2635 | bias_filler { 2636 | type: "constant" 2637 | value: 0 2638 | } 2639 | } 2640 | } 2641 | layer { 2642 | name: "stu_BatchNorm16" 2643 | type: "BatchNorm" 2644 | bottom: "stu_Convolution16" 2645 | top: "stu_Convolution16" 2646 | param { 2647 | lr_mult: 0 2648 | decay_mult: 0 2649 | } 2650 | param { 2651 | lr_mult: 0 2652 | decay_mult: 0 2653 | } 2654 | param { 2655 | lr_mult: 0 2656 | decay_mult: 0 2657 | } 2658 | } 2659 | layer { 2660 | name: "stu_Scale16" 2661 | type: "Scale" 2662 | bottom: "stu_Convolution16" 2663 | top: "stu_Convolution16" 2664 | scale_param { 2665 | bias_term: true 2666 | } 2667 | } 2668 | layer { 2669 | name: "stu_ReLU14" 2670 | type: "ReLU" 2671 | bottom: "stu_Convolution16" 2672 | top: "stu_Convolution16" 2673 | } 2674 | layer { 2675 | name: "stu_Convolution17" 2676 | type: "Convolution" 2677 | bottom: "stu_Convolution16" 2678 | top: "stu_Convolution17" 2679 | param { 2680 | lr_mult: 1 2681 | decay_mult: 1 2682 | } 2683 | param { 2684 | lr_mult: 2 2685 | decay_mult: 0 2686 | } 2687 | convolution_param { 2688 | num_output: 32 2689 | pad: 1 2690 | kernel_size: 3 2691 | stride: 1 2692 | weight_filler { 2693 | type: "msra" 2694 | } 2695 | bias_filler { 2696 | type: "constant" 2697 | value: 0 2698 | } 2699 | } 2700 | } 2701 | layer { 2702 | name: "stu_BatchNorm17" 2703 | type: "BatchNorm" 2704 | bottom: "stu_Convolution17" 2705 | top: "stu_Convolution17" 2706 | param { 2707 | lr_mult: 0 2708 | decay_mult: 0 2709 | } 2710 | param { 2711 | lr_mult: 0 2712 | decay_mult: 0 2713 | } 2714 | param { 2715 | lr_mult: 0 2716 | decay_mult: 0 2717 | } 2718 | } 2719 | layer { 2720 | name: "stu_Scale17" 2721 | type: "Scale" 2722 | bottom: "stu_Convolution17" 2723 | top: "stu_Convolution17" 2724 | scale_param { 2725 | bias_term: true 2726 | } 2727 | } 2728 | layer { 2729 | name: "stu_Eltwise7" 2730 | type: "Eltwise" 2731 | bottom: "stu_Convolution15" 2732 | bottom: "stu_Convolution17" 2733 | top: "stu_Eltwise7" 2734 | eltwise_param { 2735 | operation: SUM 2736 | } 2737 | } 2738 | layer { 2739 | name: "stu_ReLU15" 2740 | type: "ReLU" 2741 | bottom: "stu_Eltwise7" 2742 | top: "stu_Eltwise7" 2743 | } 2744 | layer { 2745 | name: "stu_Convolution18" 2746 | type: "Convolution" 2747 | bottom: "stu_Eltwise7" 2748 | top: "stu_Convolution18" 2749 | param { 2750 | lr_mult: 1 2751 | decay_mult: 1 2752 | } 2753 | param { 2754 | lr_mult: 2 2755 | decay_mult: 0 2756 | } 2757 | convolution_param { 2758 | num_output: 32 2759 | pad: 1 2760 | kernel_size: 3 2761 | stride: 1 2762 | weight_filler { 2763 | type: "msra" 2764 | } 2765 | bias_filler { 2766 | type: "constant" 2767 | value: 0 2768 | } 2769 | } 2770 | } 2771 | layer { 2772 | name: "stu_BatchNorm18" 2773 | type: "BatchNorm" 2774 | bottom: "stu_Convolution18" 2775 | top: "stu_Convolution18" 2776 | param { 2777 | lr_mult: 0 2778 | decay_mult: 0 2779 | } 2780 | param { 2781 | lr_mult: 0 2782 | decay_mult: 0 2783 | } 2784 | param { 2785 | lr_mult: 0 2786 | decay_mult: 0 2787 | } 2788 | } 2789 | layer { 2790 | name: "stu_Scale18" 2791 | type: "Scale" 2792 | bottom: "stu_Convolution18" 2793 | top: "stu_Convolution18" 2794 | scale_param { 2795 | bias_term: true 2796 | } 2797 | } 2798 | layer { 2799 | name: "stu_ReLU16" 2800 | type: "ReLU" 2801 | bottom: "stu_Convolution18" 2802 | top: "stu_Convolution18" 2803 | } 2804 | layer { 2805 | name: "stu_Convolution19" 2806 | type: "Convolution" 2807 | bottom: "stu_Convolution18" 2808 | top: "stu_Convolution19" 2809 | param { 2810 | lr_mult: 1 2811 | decay_mult: 1 2812 | } 2813 | param { 2814 | lr_mult: 2 2815 | decay_mult: 0 2816 | } 2817 | convolution_param { 2818 | num_output: 32 2819 | pad: 1 2820 | kernel_size: 3 2821 | stride: 1 2822 | weight_filler { 2823 | type: "msra" 2824 | } 2825 | bias_filler { 2826 | type: "constant" 2827 | value: 0 2828 | } 2829 | } 2830 | } 2831 | layer { 2832 | name: "stu_BatchNorm19" 2833 | type: "BatchNorm" 2834 | bottom: "stu_Convolution19" 2835 | top: "stu_Convolution19" 2836 | param { 2837 | lr_mult: 0 2838 | decay_mult: 0 2839 | } 2840 | param { 2841 | lr_mult: 0 2842 | decay_mult: 0 2843 | } 2844 | param { 2845 | lr_mult: 0 2846 | decay_mult: 0 2847 | } 2848 | } 2849 | layer { 2850 | name: "stu_Scale19" 2851 | type: "Scale" 2852 | bottom: "stu_Convolution19" 2853 | top: "stu_Convolution19" 2854 | scale_param { 2855 | bias_term: true 2856 | } 2857 | } 2858 | layer { 2859 | name: "stu_Eltwise8" 2860 | type: "Eltwise" 2861 | bottom: "stu_Eltwise7" 2862 | bottom: "stu_Convolution19" 2863 | top: "stu_Eltwise8" 2864 | eltwise_param { 2865 | operation: SUM 2866 | } 2867 | } 2868 | layer { 2869 | name: "stu_ReLU17" 2870 | type: "ReLU" 2871 | bottom: "stu_Eltwise8" 2872 | top: "stu_Eltwise8" 2873 | } 2874 | layer { 2875 | name: "stu_Convolution20" 2876 | type: "Convolution" 2877 | bottom: "stu_Eltwise8" 2878 | top: "stu_Convolution20" 2879 | param { 2880 | lr_mult: 1 2881 | decay_mult: 1 2882 | } 2883 | param { 2884 | lr_mult: 2 2885 | decay_mult: 0 2886 | } 2887 | convolution_param { 2888 | num_output: 32 2889 | pad: 1 2890 | kernel_size: 3 2891 | stride: 1 2892 | weight_filler { 2893 | type: "msra" 2894 | } 2895 | bias_filler { 2896 | type: "constant" 2897 | value: 0 2898 | } 2899 | } 2900 | } 2901 | layer { 2902 | name: "stu_BatchNorm20" 2903 | type: "BatchNorm" 2904 | bottom: "stu_Convolution20" 2905 | top: "stu_Convolution20" 2906 | param { 2907 | lr_mult: 0 2908 | decay_mult: 0 2909 | } 2910 | param { 2911 | lr_mult: 0 2912 | decay_mult: 0 2913 | } 2914 | param { 2915 | lr_mult: 0 2916 | decay_mult: 0 2917 | } 2918 | } 2919 | layer { 2920 | name: "stu_Scale20" 2921 | type: "Scale" 2922 | bottom: "stu_Convolution20" 2923 | top: "stu_Convolution20" 2924 | scale_param { 2925 | bias_term: true 2926 | } 2927 | } 2928 | layer { 2929 | name: "stu_ReLU18" 2930 | type: "ReLU" 2931 | bottom: "stu_Convolution20" 2932 | top: "stu_Convolution20" 2933 | } 2934 | layer { 2935 | name: "stu_Convolution21" 2936 | type: "Convolution" 2937 | bottom: "stu_Convolution20" 2938 | top: "stu_Convolution21" 2939 | param { 2940 | lr_mult: 1 2941 | decay_mult: 1 2942 | } 2943 | param { 2944 | lr_mult: 2 2945 | decay_mult: 0 2946 | } 2947 | convolution_param { 2948 | num_output: 32 2949 | pad: 1 2950 | kernel_size: 3 2951 | stride: 1 2952 | weight_filler { 2953 | type: "msra" 2954 | } 2955 | bias_filler { 2956 | type: "constant" 2957 | value: 0 2958 | } 2959 | } 2960 | } 2961 | layer { 2962 | name: "stu_BatchNorm21" 2963 | type: "BatchNorm" 2964 | bottom: "stu_Convolution21" 2965 | top: "stu_Convolution21" 2966 | param { 2967 | lr_mult: 0 2968 | decay_mult: 0 2969 | } 2970 | param { 2971 | lr_mult: 0 2972 | decay_mult: 0 2973 | } 2974 | param { 2975 | lr_mult: 0 2976 | decay_mult: 0 2977 | } 2978 | } 2979 | layer { 2980 | name: "stu_Scale21" 2981 | type: "Scale" 2982 | bottom: "stu_Convolution21" 2983 | top: "stu_Convolution21" 2984 | scale_param { 2985 | bias_term: true 2986 | } 2987 | } 2988 | layer { 2989 | name: "stu_Eltwise9" 2990 | type: "Eltwise" 2991 | bottom: "stu_Eltwise8" 2992 | bottom: "stu_Convolution21" 2993 | top: "stu_Eltwise9" 2994 | eltwise_param { 2995 | operation: SUM 2996 | } 2997 | } 2998 | layer { 2999 | name: "stu_ReLU19" 3000 | type: "ReLU" 3001 | bottom: "stu_Eltwise9" 3002 | top: "stu_Eltwise9" 3003 | } 3004 | layer { 3005 | name: "stu_Pooling1" 3006 | type: "Pooling" 3007 | bottom: "stu_Eltwise9" 3008 | top: "stu_Pooling1" 3009 | pooling_param { 3010 | pool: AVE 3011 | global_pooling: true 3012 | } 3013 | } 3014 | layer { 3015 | name: "stu_bem" 3016 | type: "BatchEuclideanMap" 3017 | bottom: "stu_Pooling1" 3018 | top: "stu_bem" 3019 | } 3020 | layer { 3021 | name: "bem_loss" 3022 | type: "EuclideanLoss" 3023 | bottom: "bem" 3024 | bottom: "stu_bem" 3025 | top: "bem_loss" 3026 | loss_weight: 0.005 3027 | } 3028 | layer { 3029 | name: "stu_InnerProduct1" 3030 | type: "InnerProduct" 3031 | bottom: "stu_Pooling1" 3032 | top: "stu_InnerProduct1" 3033 | param { 3034 | lr_mult: 1 3035 | decay_mult: 1 3036 | } 3037 | param { 3038 | lr_mult: 2 3039 | decay_mult: 1 3040 | } 3041 | inner_product_param { 3042 | num_output: 10 3043 | weight_filler { 3044 | type: "msra" 3045 | } 3046 | bias_filler { 3047 | type: "constant" 3048 | value: 0 3049 | } 3050 | } 3051 | } 3052 | layer { 3053 | name: "stu_SoftmaxWithLoss1" 3054 | type: "SoftmaxWithLoss" 3055 | bottom: "stu_InnerProduct1" 3056 | bottom: "Data2" 3057 | top: "stu_SoftmaxWithLoss1" 3058 | } 3059 | layer { 3060 | name: "stu_Accuracy1" 3061 | type: "Accuracy" 3062 | bottom: "stu_InnerProduct1" 3063 | bottom: "Data2" 3064 | top: "stu_Accuracy1" 3065 | include { 3066 | phase: TEST 3067 | } 3068 | } 3069 | -------------------------------------------------------------------------------- /IRG_Transformation.prototxt: -------------------------------------------------------------------------------- 1 | name: "resnet_cifar10_IRGTransformation" 2 | layer { 3 | name: "Data1" 4 | type: "Data" 5 | top: "Data1" 6 | top: "Data2" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mean_file: "yourpath/mean_pad.binaryproto" 12 | crop_size: 32 13 | mirror:true 14 | } 15 | data_param { 16 | source: "yourpath/cifar-10-train_lmdb" 17 | batch_size: 64 18 | backend: LMDB 19 | } 20 | } 21 | layer { 22 | name: "Data1" 23 | type: "Data" 24 | top: "Data1" 25 | top: "Data2" 26 | include { 27 | phase: TEST 28 | } 29 | transform_param { 30 | mean_file: "yourpath/mean.binaryproto" 31 | } 32 | data_param { 33 | source: "yourpath/cifar10_test_lmdb" 34 | batch_size: 100 35 | backend: LMDB 36 | } 37 | } 38 | layer { 39 | name: "Convolution1" 40 | type: "Convolution" 41 | bottom: "Data1" 42 | top: "Convolution1" 43 | param { 44 | lr_mult: 0 45 | decay_mult: 0 46 | } 47 | param { 48 | lr_mult: 0 49 | decay_mult: 0 50 | } 51 | convolution_param { 52 | num_output: 16 53 | pad: 1 54 | kernel_size: 3 55 | stride: 1 56 | weight_filler { 57 | type: "msra" 58 | } 59 | bias_filler { 60 | type: "constant" 61 | value: 0 62 | } 63 | } 64 | } 65 | layer { 66 | name: "BatchNorm1" 67 | type: "BatchNorm" 68 | bottom: "Convolution1" 69 | top: "Convolution1" 70 | param { 71 | lr_mult: 0 72 | decay_mult: 0 73 | } 74 | param { 75 | lr_mult: 0 76 | decay_mult: 0 77 | } 78 | param { 79 | lr_mult: 0 80 | decay_mult: 0 81 | } 82 | batch_norm_param { 83 | use_global_stats: true 84 | } 85 | } 86 | layer { 87 | name: "Scale1" 88 | type: "Scale" 89 | bottom: "Convolution1" 90 | top: "Convolution1" 91 | param { 92 | lr_mult: 0 93 | decay_mult: 0 94 | } 95 | param { 96 | lr_mult: 0 97 | decay_mult: 0 98 | } 99 | scale_param { 100 | bias_term: true 101 | } 102 | } 103 | layer { 104 | name: "ReLU1" 105 | type: "ReLU" 106 | bottom: "Convolution1" 107 | top: "Convolution1" 108 | } 109 | layer { 110 | name: "Convolution2" 111 | type: "Convolution" 112 | bottom: "Convolution1" 113 | top: "Convolution2" 114 | param { 115 | lr_mult: 0 116 | decay_mult: 0 117 | } 118 | param { 119 | lr_mult: 0 120 | decay_mult: 0 121 | } 122 | convolution_param { 123 | num_output: 16 124 | pad: 1 125 | kernel_size: 3 126 | stride: 1 127 | weight_filler { 128 | type: "msra" 129 | } 130 | bias_filler { 131 | type: "constant" 132 | value: 0 133 | } 134 | } 135 | } 136 | layer { 137 | name: "BatchNorm2" 138 | type: "BatchNorm" 139 | bottom: "Convolution2" 140 | top: "Convolution2" 141 | param { 142 | lr_mult: 0 143 | decay_mult: 0 144 | } 145 | param { 146 | lr_mult: 0 147 | decay_mult: 0 148 | } 149 | param { 150 | lr_mult: 0 151 | decay_mult: 0 152 | } 153 | batch_norm_param { 154 | use_global_stats: true 155 | } 156 | } 157 | layer { 158 | name: "Scale2" 159 | type: "Scale" 160 | bottom: "Convolution2" 161 | top: "Convolution2" 162 | param { 163 | lr_mult: 0 164 | decay_mult: 0 165 | } 166 | param { 167 | lr_mult: 0 168 | decay_mult: 0 169 | } 170 | scale_param { 171 | bias_term: true 172 | } 173 | } 174 | layer { 175 | name: "ReLU2" 176 | type: "ReLU" 177 | bottom: "Convolution2" 178 | top: "Convolution2" 179 | } 180 | layer { 181 | name: "Convolution3" 182 | type: "Convolution" 183 | bottom: "Convolution2" 184 | top: "Convolution3" 185 | param { 186 | lr_mult: 0 187 | decay_mult: 0 188 | } 189 | param { 190 | lr_mult: 0 191 | decay_mult: 0 192 | } 193 | convolution_param { 194 | num_output: 16 195 | pad: 1 196 | kernel_size: 3 197 | stride: 1 198 | weight_filler { 199 | type: "msra" 200 | } 201 | bias_filler { 202 | type: "constant" 203 | value: 0 204 | } 205 | } 206 | } 207 | layer { 208 | name: "BatchNorm3" 209 | type: "BatchNorm" 210 | bottom: "Convolution3" 211 | top: "Convolution3" 212 | param { 213 | lr_mult: 0 214 | decay_mult: 0 215 | } 216 | param { 217 | lr_mult: 0 218 | decay_mult: 0 219 | } 220 | param { 221 | lr_mult: 0 222 | decay_mult: 0 223 | } 224 | batch_norm_param { 225 | use_global_stats: true 226 | } 227 | } 228 | layer { 229 | name: "Scale3" 230 | type: "Scale" 231 | bottom: "Convolution3" 232 | top: "Convolution3" 233 | param { 234 | lr_mult: 0 235 | decay_mult: 0 236 | } 237 | param { 238 | lr_mult: 0 239 | decay_mult: 0 240 | } 241 | scale_param { 242 | bias_term: true 243 | } 244 | } 245 | layer { 246 | name: "Eltwise1" 247 | type: "Eltwise" 248 | bottom: "Convolution1" 249 | bottom: "Convolution3" 250 | top: "Eltwise1" 251 | eltwise_param { 252 | operation: SUM 253 | } 254 | } 255 | layer { 256 | name: "ReLU3" 257 | type: "ReLU" 258 | bottom: "Eltwise1" 259 | top: "Eltwise1" 260 | } 261 | layer { 262 | name: "Convolution4" 263 | type: "Convolution" 264 | bottom: "Eltwise1" 265 | top: "Convolution4" 266 | param { 267 | lr_mult: 0 268 | decay_mult: 0 269 | } 270 | param { 271 | lr_mult: 0 272 | decay_mult: 0 273 | } 274 | convolution_param { 275 | num_output: 16 276 | pad: 1 277 | kernel_size: 3 278 | stride: 1 279 | weight_filler { 280 | type: "msra" 281 | } 282 | bias_filler { 283 | type: "constant" 284 | value: 0 285 | } 286 | } 287 | } 288 | layer { 289 | name: "BatchNorm4" 290 | type: "BatchNorm" 291 | bottom: "Convolution4" 292 | top: "Convolution4" 293 | param { 294 | lr_mult: 0 295 | decay_mult: 0 296 | } 297 | param { 298 | lr_mult: 0 299 | decay_mult: 0 300 | } 301 | param { 302 | lr_mult: 0 303 | decay_mult: 0 304 | } 305 | batch_norm_param { 306 | use_global_stats: true 307 | } 308 | } 309 | layer { 310 | name: "Scale4" 311 | type: "Scale" 312 | bottom: "Convolution4" 313 | top: "Convolution4" 314 | param { 315 | lr_mult: 0 316 | decay_mult: 0 317 | } 318 | param { 319 | lr_mult: 0 320 | decay_mult: 0 321 | } 322 | scale_param { 323 | bias_term: true 324 | } 325 | } 326 | layer { 327 | name: "ReLU4" 328 | type: "ReLU" 329 | bottom: "Convolution4" 330 | top: "Convolution4" 331 | } 332 | layer { 333 | name: "Convolution5" 334 | type: "Convolution" 335 | bottom: "Convolution4" 336 | top: "Convolution5" 337 | param { 338 | lr_mult: 0 339 | decay_mult: 0 340 | } 341 | param { 342 | lr_mult: 0 343 | decay_mult: 0 344 | } 345 | convolution_param { 346 | num_output: 16 347 | pad: 1 348 | kernel_size: 3 349 | stride: 1 350 | weight_filler { 351 | type: "msra" 352 | } 353 | bias_filler { 354 | type: "constant" 355 | value: 0 356 | } 357 | } 358 | } 359 | layer { 360 | name: "BatchNorm5" 361 | type: "BatchNorm" 362 | bottom: "Convolution5" 363 | top: "Convolution5" 364 | param { 365 | lr_mult: 0 366 | decay_mult: 0 367 | } 368 | param { 369 | lr_mult: 0 370 | decay_mult: 0 371 | } 372 | param { 373 | lr_mult: 0 374 | decay_mult: 0 375 | } 376 | batch_norm_param { 377 | use_global_stats: true 378 | } 379 | } 380 | layer { 381 | name: "Scale5" 382 | type: "Scale" 383 | bottom: "Convolution5" 384 | top: "Convolution5" 385 | param { 386 | lr_mult: 0 387 | decay_mult: 0 388 | } 389 | param { 390 | lr_mult: 0 391 | decay_mult: 0 392 | } 393 | scale_param { 394 | bias_term: true 395 | } 396 | } 397 | layer { 398 | name: "Eltwise2" 399 | type: "Eltwise" 400 | bottom: "Eltwise1" 401 | bottom: "Convolution5" 402 | top: "Eltwise2" 403 | eltwise_param { 404 | operation: SUM 405 | } 406 | } 407 | layer { 408 | name: "ReLU5" 409 | type: "ReLU" 410 | bottom: "Eltwise2" 411 | top: "Eltwise2" 412 | } 413 | layer { 414 | name: "Convolution6" 415 | type: "Convolution" 416 | bottom: "Eltwise2" 417 | top: "Convolution6" 418 | param { 419 | lr_mult: 0 420 | decay_mult: 0 421 | } 422 | param { 423 | lr_mult: 0 424 | decay_mult: 0 425 | } 426 | convolution_param { 427 | num_output: 16 428 | pad: 1 429 | kernel_size: 3 430 | stride: 1 431 | weight_filler { 432 | type: "msra" 433 | } 434 | bias_filler { 435 | type: "constant" 436 | value: 0 437 | } 438 | } 439 | } 440 | layer { 441 | name: "BatchNorm6" 442 | type: "BatchNorm" 443 | bottom: "Convolution6" 444 | top: "Convolution6" 445 | param { 446 | lr_mult: 0 447 | decay_mult: 0 448 | } 449 | param { 450 | lr_mult: 0 451 | decay_mult: 0 452 | } 453 | param { 454 | lr_mult: 0 455 | decay_mult: 0 456 | } 457 | batch_norm_param { 458 | use_global_stats: true 459 | } 460 | } 461 | layer { 462 | name: "Scale6" 463 | type: "Scale" 464 | bottom: "Convolution6" 465 | top: "Convolution6" 466 | param { 467 | lr_mult: 0 468 | decay_mult: 0 469 | } 470 | param { 471 | lr_mult: 0 472 | decay_mult: 0 473 | } 474 | scale_param { 475 | bias_term: true 476 | } 477 | } 478 | layer { 479 | name: "ReLU6" 480 | type: "ReLU" 481 | bottom: "Convolution6" 482 | top: "Convolution6" 483 | } 484 | layer { 485 | name: "Convolution7" 486 | type: "Convolution" 487 | bottom: "Convolution6" 488 | top: "Convolution7" 489 | param { 490 | lr_mult: 0 491 | decay_mult: 0 492 | } 493 | param { 494 | lr_mult: 0 495 | decay_mult: 0 496 | } 497 | convolution_param { 498 | num_output: 16 499 | pad: 1 500 | kernel_size: 3 501 | stride: 1 502 | weight_filler { 503 | type: "msra" 504 | } 505 | bias_filler { 506 | type: "constant" 507 | value: 0 508 | } 509 | } 510 | } 511 | layer { 512 | name: "BatchNorm7" 513 | type: "BatchNorm" 514 | bottom: "Convolution7" 515 | top: "Convolution7" 516 | param { 517 | lr_mult: 0 518 | decay_mult: 0 519 | } 520 | param { 521 | lr_mult: 0 522 | decay_mult: 0 523 | } 524 | param { 525 | lr_mult: 0 526 | decay_mult: 0 527 | } 528 | batch_norm_param { 529 | use_global_stats: true 530 | } 531 | } 532 | layer { 533 | name: "Scale7" 534 | type: "Scale" 535 | bottom: "Convolution7" 536 | top: "Convolution7" 537 | param { 538 | lr_mult: 0 539 | decay_mult: 0 540 | } 541 | param { 542 | lr_mult: 0 543 | decay_mult: 0 544 | } 545 | scale_param { 546 | bias_term: true 547 | } 548 | } 549 | layer { 550 | name: "Eltwise3" 551 | type: "Eltwise" 552 | bottom: "Eltwise2" 553 | bottom: "Convolution7" 554 | top: "Eltwise3" 555 | eltwise_param { 556 | operation: SUM 557 | } 558 | } 559 | layer { 560 | name: "ReLU7" 561 | type: "ReLU" 562 | bottom: "Eltwise3" 563 | top: "Eltwise3" 564 | } 565 | layer { 566 | name: "Convolution8" 567 | type: "Convolution" 568 | bottom: "Eltwise3" 569 | top: "Convolution8" 570 | param { 571 | lr_mult: 0 572 | decay_mult: 0 573 | } 574 | param { 575 | lr_mult: 0 576 | decay_mult: 0 577 | } 578 | convolution_param { 579 | num_output: 32 580 | pad: 0 581 | kernel_size: 1 582 | stride: 2 583 | weight_filler { 584 | type: "msra" 585 | } 586 | bias_filler { 587 | type: "constant" 588 | value: 0 589 | } 590 | } 591 | } 592 | layer { 593 | name: "BatchNorm8" 594 | type: "BatchNorm" 595 | bottom: "Convolution8" 596 | top: "Convolution8" 597 | param { 598 | lr_mult: 0 599 | decay_mult: 0 600 | } 601 | param { 602 | lr_mult: 0 603 | decay_mult: 0 604 | } 605 | param { 606 | lr_mult: 0 607 | decay_mult: 0 608 | } 609 | batch_norm_param { 610 | use_global_stats: true 611 | } 612 | } 613 | layer { 614 | name: "Scale8" 615 | type: "Scale" 616 | bottom: "Convolution8" 617 | top: "Convolution8" 618 | param { 619 | lr_mult: 0 620 | decay_mult: 0 621 | } 622 | param { 623 | lr_mult: 0 624 | decay_mult: 0 625 | } 626 | scale_param { 627 | bias_term: true 628 | } 629 | } 630 | layer { 631 | name: "Convolution9" 632 | type: "Convolution" 633 | bottom: "Eltwise3" 634 | top: "Convolution9" 635 | param { 636 | lr_mult: 0 637 | decay_mult: 0 638 | } 639 | param { 640 | lr_mult: 0 641 | decay_mult: 0 642 | } 643 | convolution_param { 644 | num_output: 32 645 | pad: 1 646 | kernel_size: 3 647 | stride: 2 648 | weight_filler { 649 | type: "msra" 650 | } 651 | bias_filler { 652 | type: "constant" 653 | value: 0 654 | } 655 | } 656 | } 657 | layer { 658 | name: "BatchNorm9" 659 | type: "BatchNorm" 660 | bottom: "Convolution9" 661 | top: "Convolution9" 662 | param { 663 | lr_mult: 0 664 | decay_mult: 0 665 | } 666 | param { 667 | lr_mult: 0 668 | decay_mult: 0 669 | } 670 | param { 671 | lr_mult: 0 672 | decay_mult: 0 673 | } 674 | batch_norm_param { 675 | use_global_stats: true 676 | } 677 | } 678 | layer { 679 | name: "Scale9" 680 | type: "Scale" 681 | bottom: "Convolution9" 682 | top: "Convolution9" 683 | param { 684 | lr_mult: 0 685 | decay_mult: 0 686 | } 687 | param { 688 | lr_mult: 0 689 | decay_mult: 0 690 | } 691 | scale_param { 692 | bias_term: true 693 | } 694 | } 695 | layer { 696 | name: "ReLU8" 697 | type: "ReLU" 698 | bottom: "Convolution9" 699 | top: "Convolution9" 700 | } 701 | layer { 702 | name: "Convolution10" 703 | type: "Convolution" 704 | bottom: "Convolution9" 705 | top: "Convolution10" 706 | param { 707 | lr_mult: 0 708 | decay_mult: 0 709 | } 710 | param { 711 | lr_mult: 0 712 | decay_mult: 0 713 | } 714 | convolution_param { 715 | num_output: 32 716 | pad: 1 717 | kernel_size: 3 718 | stride: 1 719 | weight_filler { 720 | type: "msra" 721 | } 722 | bias_filler { 723 | type: "constant" 724 | value: 0 725 | } 726 | } 727 | } 728 | layer { 729 | name: "BatchNorm10" 730 | type: "BatchNorm" 731 | bottom: "Convolution10" 732 | top: "Convolution10" 733 | param { 734 | lr_mult: 0 735 | decay_mult: 0 736 | } 737 | param { 738 | lr_mult: 0 739 | decay_mult: 0 740 | } 741 | param { 742 | lr_mult: 0 743 | decay_mult: 0 744 | } 745 | batch_norm_param { 746 | use_global_stats: true 747 | } 748 | } 749 | layer { 750 | name: "Scale10" 751 | type: "Scale" 752 | bottom: "Convolution10" 753 | top: "Convolution10" 754 | param { 755 | lr_mult: 0 756 | decay_mult: 0 757 | } 758 | param { 759 | lr_mult: 0 760 | decay_mult: 0 761 | } 762 | scale_param { 763 | bias_term: true 764 | } 765 | } 766 | layer { 767 | name: "Eltwise4" 768 | type: "Eltwise" 769 | bottom: "Convolution8" 770 | bottom: "Convolution10" 771 | top: "Eltwise4" 772 | eltwise_param { 773 | operation: SUM 774 | } 775 | } 776 | layer { 777 | name: "ReLU9" 778 | type: "ReLU" 779 | bottom: "Eltwise4" 780 | top: "Eltwise4" 781 | } 782 | layer { 783 | name: "Convolution11" 784 | type: "Convolution" 785 | bottom: "Eltwise4" 786 | top: "Convolution11" 787 | param { 788 | lr_mult: 0 789 | decay_mult: 0 790 | } 791 | param { 792 | lr_mult: 0 793 | decay_mult: 0 794 | } 795 | convolution_param { 796 | num_output: 32 797 | pad: 1 798 | kernel_size: 3 799 | stride: 1 800 | weight_filler { 801 | type: "msra" 802 | } 803 | bias_filler { 804 | type: "constant" 805 | value: 0 806 | } 807 | } 808 | } 809 | layer { 810 | name: "BatchNorm11" 811 | type: "BatchNorm" 812 | bottom: "Convolution11" 813 | top: "Convolution11" 814 | param { 815 | lr_mult: 0 816 | decay_mult: 0 817 | } 818 | param { 819 | lr_mult: 0 820 | decay_mult: 0 821 | } 822 | param { 823 | lr_mult: 0 824 | decay_mult: 0 825 | } 826 | batch_norm_param { 827 | use_global_stats: true 828 | } 829 | } 830 | layer { 831 | name: "Scale11" 832 | type: "Scale" 833 | bottom: "Convolution11" 834 | top: "Convolution11" 835 | param { 836 | lr_mult: 0 837 | decay_mult: 0 838 | } 839 | param { 840 | lr_mult: 0 841 | decay_mult: 0 842 | } 843 | scale_param { 844 | bias_term: true 845 | } 846 | } 847 | layer { 848 | name: "ReLU10" 849 | type: "ReLU" 850 | bottom: "Convolution11" 851 | top: "Convolution11" 852 | } 853 | layer { 854 | name: "Convolution12" 855 | type: "Convolution" 856 | bottom: "Convolution11" 857 | top: "Convolution12" 858 | param { 859 | lr_mult: 0 860 | decay_mult: 0 861 | } 862 | param { 863 | lr_mult: 0 864 | decay_mult: 0 865 | } 866 | convolution_param { 867 | num_output: 32 868 | pad: 1 869 | kernel_size: 3 870 | stride: 1 871 | weight_filler { 872 | type: "msra" 873 | } 874 | bias_filler { 875 | type: "constant" 876 | value: 0 877 | } 878 | } 879 | } 880 | layer { 881 | name: "BatchNorm12" 882 | type: "BatchNorm" 883 | bottom: "Convolution12" 884 | top: "Convolution12" 885 | param { 886 | lr_mult: 0 887 | decay_mult: 0 888 | } 889 | param { 890 | lr_mult: 0 891 | decay_mult: 0 892 | } 893 | param { 894 | lr_mult: 0 895 | decay_mult: 0 896 | } 897 | batch_norm_param { 898 | use_global_stats: true 899 | } 900 | } 901 | layer { 902 | name: "Scale12" 903 | type: "Scale" 904 | bottom: "Convolution12" 905 | top: "Convolution12" 906 | param { 907 | lr_mult: 0 908 | decay_mult: 0 909 | } 910 | param { 911 | lr_mult: 0 912 | decay_mult: 0 913 | } 914 | scale_param { 915 | bias_term: true 916 | } 917 | } 918 | layer { 919 | name: "Eltwise5" 920 | type: "Eltwise" 921 | bottom: "Eltwise4" 922 | bottom: "Convolution12" 923 | top: "Eltwise5" 924 | eltwise_param { 925 | operation: SUM 926 | } 927 | } 928 | layer { 929 | name: "ReLU11" 930 | type: "ReLU" 931 | bottom: "Eltwise5" 932 | top: "Eltwise5" 933 | } 934 | layer { 935 | name: "Convolution13" 936 | type: "Convolution" 937 | bottom: "Eltwise5" 938 | top: "Convolution13" 939 | param { 940 | lr_mult: 0 941 | decay_mult: 0 942 | } 943 | param { 944 | lr_mult: 0 945 | decay_mult: 0 946 | } 947 | convolution_param { 948 | num_output: 32 949 | pad: 1 950 | kernel_size: 3 951 | stride: 1 952 | weight_filler { 953 | type: "msra" 954 | } 955 | bias_filler { 956 | type: "constant" 957 | value: 0 958 | } 959 | } 960 | } 961 | layer { 962 | name: "BatchNorm13" 963 | type: "BatchNorm" 964 | bottom: "Convolution13" 965 | top: "Convolution13" 966 | param { 967 | lr_mult: 0 968 | decay_mult: 0 969 | } 970 | param { 971 | lr_mult: 0 972 | decay_mult: 0 973 | } 974 | param { 975 | lr_mult: 0 976 | decay_mult: 0 977 | } 978 | batch_norm_param { 979 | use_global_stats: true 980 | } 981 | } 982 | layer { 983 | name: "Scale13" 984 | type: "Scale" 985 | bottom: "Convolution13" 986 | top: "Convolution13" 987 | param { 988 | lr_mult: 0 989 | decay_mult: 0 990 | } 991 | param { 992 | lr_mult: 0 993 | decay_mult: 0 994 | } 995 | scale_param { 996 | bias_term: true 997 | } 998 | } 999 | layer { 1000 | name: "ReLU12" 1001 | type: "ReLU" 1002 | bottom: "Convolution13" 1003 | top: "Convolution13" 1004 | } 1005 | layer { 1006 | name: "Convolution14" 1007 | type: "Convolution" 1008 | bottom: "Convolution13" 1009 | top: "Convolution14" 1010 | param { 1011 | lr_mult: 0 1012 | decay_mult: 0 1013 | } 1014 | param { 1015 | lr_mult: 0 1016 | decay_mult: 0 1017 | } 1018 | convolution_param { 1019 | num_output: 32 1020 | pad: 1 1021 | kernel_size: 3 1022 | stride: 1 1023 | weight_filler { 1024 | type: "msra" 1025 | } 1026 | bias_filler { 1027 | type: "constant" 1028 | value: 0 1029 | } 1030 | } 1031 | } 1032 | layer { 1033 | name: "BatchNorm14" 1034 | type: "BatchNorm" 1035 | bottom: "Convolution14" 1036 | top: "Convolution14" 1037 | param { 1038 | lr_mult: 0 1039 | decay_mult: 0 1040 | } 1041 | param { 1042 | lr_mult: 0 1043 | decay_mult: 0 1044 | } 1045 | param { 1046 | lr_mult: 0 1047 | decay_mult: 0 1048 | } 1049 | batch_norm_param { 1050 | use_global_stats: true 1051 | } 1052 | } 1053 | layer { 1054 | name: "Scale14" 1055 | type: "Scale" 1056 | bottom: "Convolution14" 1057 | top: "Convolution14" 1058 | param { 1059 | lr_mult: 0 1060 | decay_mult: 0 1061 | } 1062 | param { 1063 | lr_mult: 0 1064 | decay_mult: 0 1065 | } 1066 | scale_param { 1067 | bias_term: true 1068 | } 1069 | } 1070 | layer { 1071 | name: "Eltwise6" 1072 | type: "Eltwise" 1073 | bottom: "Eltwise5" 1074 | bottom: "Convolution14" 1075 | top: "Eltwise6" 1076 | eltwise_param { 1077 | operation: SUM 1078 | } 1079 | } 1080 | layer { 1081 | name: "ReLU13" 1082 | type: "ReLU" 1083 | bottom: "Eltwise6" 1084 | top: "Eltwise6" 1085 | } 1086 | layer { 1087 | name: "Convolution15" 1088 | type: "Convolution" 1089 | bottom: "Eltwise6" 1090 | top: "Convolution15" 1091 | param { 1092 | lr_mult: 0 1093 | decay_mult: 0 1094 | } 1095 | param { 1096 | lr_mult: 0 1097 | decay_mult: 0 1098 | } 1099 | convolution_param { 1100 | num_output: 64 1101 | pad: 0 1102 | kernel_size: 1 1103 | stride: 2 1104 | weight_filler { 1105 | type: "msra" 1106 | } 1107 | bias_filler { 1108 | type: "constant" 1109 | value: 0 1110 | } 1111 | } 1112 | } 1113 | layer { 1114 | name: "BatchNorm15" 1115 | type: "BatchNorm" 1116 | bottom: "Convolution15" 1117 | top: "Convolution15" 1118 | param { 1119 | lr_mult: 0 1120 | decay_mult: 0 1121 | } 1122 | param { 1123 | lr_mult: 0 1124 | decay_mult: 0 1125 | } 1126 | param { 1127 | lr_mult: 0 1128 | decay_mult: 0 1129 | } 1130 | batch_norm_param { 1131 | use_global_stats: true 1132 | } 1133 | } 1134 | layer { 1135 | name: "Scale15" 1136 | type: "Scale" 1137 | bottom: "Convolution15" 1138 | top: "Convolution15" 1139 | param { 1140 | lr_mult: 0 1141 | decay_mult: 0 1142 | } 1143 | param { 1144 | lr_mult: 0 1145 | decay_mult: 0 1146 | } 1147 | scale_param { 1148 | bias_term: true 1149 | } 1150 | } 1151 | layer { 1152 | name: "Convolution16" 1153 | type: "Convolution" 1154 | bottom: "Eltwise6" 1155 | top: "Convolution16" 1156 | param { 1157 | lr_mult: 0 1158 | decay_mult: 0 1159 | } 1160 | param { 1161 | lr_mult: 0 1162 | decay_mult: 0 1163 | } 1164 | convolution_param { 1165 | num_output: 64 1166 | pad: 1 1167 | kernel_size: 3 1168 | stride: 2 1169 | weight_filler { 1170 | type: "msra" 1171 | } 1172 | bias_filler { 1173 | type: "constant" 1174 | value: 0 1175 | } 1176 | } 1177 | } 1178 | layer { 1179 | name: "BatchNorm16" 1180 | type: "BatchNorm" 1181 | bottom: "Convolution16" 1182 | top: "Convolution16" 1183 | param { 1184 | lr_mult: 0 1185 | decay_mult: 0 1186 | } 1187 | param { 1188 | lr_mult: 0 1189 | decay_mult: 0 1190 | } 1191 | param { 1192 | lr_mult: 0 1193 | decay_mult: 0 1194 | } 1195 | batch_norm_param { 1196 | use_global_stats: true 1197 | } 1198 | } 1199 | layer { 1200 | name: "Scale16" 1201 | type: "Scale" 1202 | bottom: "Convolution16" 1203 | top: "Convolution16" 1204 | param { 1205 | lr_mult: 0 1206 | decay_mult: 0 1207 | } 1208 | param { 1209 | lr_mult: 0 1210 | decay_mult: 0 1211 | } 1212 | scale_param { 1213 | bias_term: true 1214 | } 1215 | } 1216 | layer { 1217 | name: "ReLU14" 1218 | type: "ReLU" 1219 | bottom: "Convolution16" 1220 | top: "Convolution16" 1221 | } 1222 | layer { 1223 | name: "Convolution17" 1224 | type: "Convolution" 1225 | bottom: "Convolution16" 1226 | top: "Convolution17" 1227 | param { 1228 | lr_mult: 0 1229 | decay_mult: 0 1230 | } 1231 | param { 1232 | lr_mult: 0 1233 | decay_mult: 0 1234 | } 1235 | convolution_param { 1236 | num_output: 64 1237 | pad: 1 1238 | kernel_size: 3 1239 | stride: 1 1240 | weight_filler { 1241 | type: "msra" 1242 | } 1243 | bias_filler { 1244 | type: "constant" 1245 | value: 0 1246 | } 1247 | } 1248 | } 1249 | layer { 1250 | name: "BatchNorm17" 1251 | type: "BatchNorm" 1252 | bottom: "Convolution17" 1253 | top: "Convolution17" 1254 | param { 1255 | lr_mult: 0 1256 | decay_mult: 0 1257 | } 1258 | param { 1259 | lr_mult: 0 1260 | decay_mult: 0 1261 | } 1262 | param { 1263 | lr_mult: 0 1264 | decay_mult: 0 1265 | } 1266 | batch_norm_param { 1267 | use_global_stats: true 1268 | } 1269 | } 1270 | layer { 1271 | name: "Scale17" 1272 | type: "Scale" 1273 | bottom: "Convolution17" 1274 | top: "Convolution17" 1275 | param { 1276 | lr_mult: 0 1277 | decay_mult: 0 1278 | } 1279 | param { 1280 | lr_mult: 0 1281 | decay_mult: 0 1282 | } 1283 | scale_param { 1284 | bias_term: true 1285 | } 1286 | } 1287 | layer { 1288 | name: "Eltwise7" 1289 | type: "Eltwise" 1290 | bottom: "Convolution15" 1291 | bottom: "Convolution17" 1292 | top: "Eltwise7" 1293 | eltwise_param { 1294 | operation: SUM 1295 | } 1296 | } 1297 | layer { 1298 | name: "ReLU15" 1299 | type: "ReLU" 1300 | bottom: "Eltwise7" 1301 | top: "Eltwise7" 1302 | } 1303 | layer { 1304 | name: "Convolution18" 1305 | type: "Convolution" 1306 | bottom: "Eltwise7" 1307 | top: "Convolution18" 1308 | param { 1309 | lr_mult: 0 1310 | decay_mult: 0 1311 | } 1312 | param { 1313 | lr_mult: 0 1314 | decay_mult: 0 1315 | } 1316 | convolution_param { 1317 | num_output: 64 1318 | pad: 1 1319 | kernel_size: 3 1320 | stride: 1 1321 | weight_filler { 1322 | type: "msra" 1323 | } 1324 | bias_filler { 1325 | type: "constant" 1326 | value: 0 1327 | } 1328 | } 1329 | } 1330 | layer { 1331 | name: "BatchNorm18" 1332 | type: "BatchNorm" 1333 | bottom: "Convolution18" 1334 | top: "Convolution18" 1335 | param { 1336 | lr_mult: 0 1337 | decay_mult: 0 1338 | } 1339 | param { 1340 | lr_mult: 0 1341 | decay_mult: 0 1342 | } 1343 | param { 1344 | lr_mult: 0 1345 | decay_mult: 0 1346 | } 1347 | batch_norm_param { 1348 | use_global_stats: true 1349 | } 1350 | } 1351 | layer { 1352 | name: "Scale18" 1353 | type: "Scale" 1354 | bottom: "Convolution18" 1355 | top: "Convolution18" 1356 | param { 1357 | lr_mult: 0 1358 | decay_mult: 0 1359 | } 1360 | param { 1361 | lr_mult: 0 1362 | decay_mult: 0 1363 | } 1364 | scale_param { 1365 | bias_term: true 1366 | } 1367 | } 1368 | layer { 1369 | name: "ReLU16" 1370 | type: "ReLU" 1371 | bottom: "Convolution18" 1372 | top: "Convolution18" 1373 | } 1374 | layer { 1375 | name: "Convolution19" 1376 | type: "Convolution" 1377 | bottom: "Convolution18" 1378 | top: "Convolution19" 1379 | param { 1380 | lr_mult: 0 1381 | decay_mult: 0 1382 | } 1383 | param { 1384 | lr_mult: 0 1385 | decay_mult: 0 1386 | } 1387 | convolution_param { 1388 | num_output: 64 1389 | pad: 1 1390 | kernel_size: 3 1391 | stride: 1 1392 | weight_filler { 1393 | type: "msra" 1394 | } 1395 | bias_filler { 1396 | type: "constant" 1397 | value: 0 1398 | } 1399 | } 1400 | } 1401 | layer { 1402 | name: "BatchNorm19" 1403 | type: "BatchNorm" 1404 | bottom: "Convolution19" 1405 | top: "Convolution19" 1406 | param { 1407 | lr_mult: 0 1408 | decay_mult: 0 1409 | } 1410 | param { 1411 | lr_mult: 0 1412 | decay_mult: 0 1413 | } 1414 | param { 1415 | lr_mult: 0 1416 | decay_mult: 0 1417 | } 1418 | batch_norm_param { 1419 | use_global_stats: true 1420 | } 1421 | } 1422 | layer { 1423 | name: "Scale19" 1424 | type: "Scale" 1425 | bottom: "Convolution19" 1426 | top: "Convolution19" 1427 | param { 1428 | lr_mult: 0 1429 | decay_mult: 0 1430 | } 1431 | param { 1432 | lr_mult: 0 1433 | decay_mult: 0 1434 | } 1435 | scale_param { 1436 | bias_term: true 1437 | } 1438 | } 1439 | layer { 1440 | name: "Eltwise8" 1441 | type: "Eltwise" 1442 | bottom: "Eltwise7" 1443 | bottom: "Convolution19" 1444 | top: "Eltwise8" 1445 | eltwise_param { 1446 | operation: SUM 1447 | } 1448 | } 1449 | layer { 1450 | name: "ReLU17" 1451 | type: "ReLU" 1452 | bottom: "Eltwise8" 1453 | top: "Eltwise8" 1454 | } 1455 | layer { 1456 | name: "Convolution20" 1457 | type: "Convolution" 1458 | bottom: "Eltwise8" 1459 | top: "Convolution20" 1460 | param { 1461 | lr_mult: 0 1462 | decay_mult: 0 1463 | } 1464 | param { 1465 | lr_mult: 0 1466 | decay_mult: 0 1467 | } 1468 | convolution_param { 1469 | num_output: 64 1470 | pad: 1 1471 | kernel_size: 3 1472 | stride: 1 1473 | weight_filler { 1474 | type: "msra" 1475 | } 1476 | bias_filler { 1477 | type: "constant" 1478 | value: 0 1479 | } 1480 | } 1481 | } 1482 | layer { 1483 | name: "BatchNorm20" 1484 | type: "BatchNorm" 1485 | bottom: "Convolution20" 1486 | top: "Convolution20" 1487 | param { 1488 | lr_mult: 0 1489 | decay_mult: 0 1490 | } 1491 | param { 1492 | lr_mult: 0 1493 | decay_mult: 0 1494 | } 1495 | param { 1496 | lr_mult: 0 1497 | decay_mult: 0 1498 | } 1499 | batch_norm_param { 1500 | use_global_stats: true 1501 | } 1502 | } 1503 | layer { 1504 | name: "Scale20" 1505 | type: "Scale" 1506 | bottom: "Convolution20" 1507 | top: "Convolution20" 1508 | param { 1509 | lr_mult: 0 1510 | decay_mult: 0 1511 | } 1512 | param { 1513 | lr_mult: 0 1514 | decay_mult: 0 1515 | } 1516 | scale_param { 1517 | bias_term: true 1518 | } 1519 | } 1520 | layer { 1521 | name: "ReLU18" 1522 | type: "ReLU" 1523 | bottom: "Convolution20" 1524 | top: "Convolution20" 1525 | } 1526 | layer { 1527 | name: "Convolution21" 1528 | type: "Convolution" 1529 | bottom: "Convolution20" 1530 | top: "Convolution21" 1531 | param { 1532 | lr_mult: 0 1533 | decay_mult: 0 1534 | } 1535 | param { 1536 | lr_mult: 0 1537 | decay_mult: 0 1538 | } 1539 | convolution_param { 1540 | num_output: 64 1541 | pad: 1 1542 | kernel_size: 3 1543 | stride: 1 1544 | weight_filler { 1545 | type: "msra" 1546 | } 1547 | bias_filler { 1548 | type: "constant" 1549 | value: 0 1550 | } 1551 | } 1552 | } 1553 | layer { 1554 | name: "BatchNorm21" 1555 | type: "BatchNorm" 1556 | bottom: "Convolution21" 1557 | top: "Convolution21" 1558 | param { 1559 | lr_mult: 0 1560 | decay_mult: 0 1561 | } 1562 | param { 1563 | lr_mult: 0 1564 | decay_mult: 0 1565 | } 1566 | param { 1567 | lr_mult: 0 1568 | decay_mult: 0 1569 | } 1570 | batch_norm_param { 1571 | use_global_stats: true 1572 | } 1573 | } 1574 | layer { 1575 | name: "Scale21" 1576 | type: "Scale" 1577 | bottom: "Convolution21" 1578 | top: "Convolution21" 1579 | param { 1580 | lr_mult: 0 1581 | decay_mult: 0 1582 | } 1583 | param { 1584 | lr_mult: 0 1585 | decay_mult: 0 1586 | } 1587 | scale_param { 1588 | bias_term: true 1589 | } 1590 | } 1591 | layer { 1592 | name: "Eltwise9" 1593 | type: "Eltwise" 1594 | bottom: "Eltwise8" 1595 | bottom: "Convolution21" 1596 | top: "Eltwise9" 1597 | eltwise_param { 1598 | operation: SUM 1599 | } 1600 | } 1601 | layer { 1602 | name: "ReLU19" 1603 | type: "ReLU" 1604 | bottom: "Eltwise9" 1605 | top: "Eltwise9" 1606 | } 1607 | layer { 1608 | name: "Pooling1" 1609 | type: "Pooling" 1610 | bottom: "Eltwise9" 1611 | top: "Pooling1" 1612 | pooling_param { 1613 | pool: AVE 1614 | global_pooling: true 1615 | } 1616 | } 1617 | layer { 1618 | name: "InnerProduct1" 1619 | type: "InnerProduct" 1620 | bottom: "Pooling1" 1621 | top: "InnerProduct1" 1622 | param { 1623 | lr_mult: 0 1624 | decay_mult: 0 1625 | } 1626 | param { 1627 | lr_mult: 0 1628 | decay_mult: 0 1629 | } 1630 | inner_product_param { 1631 | num_output: 10 1632 | weight_filler { 1633 | type: "msra" 1634 | } 1635 | bias_filler { 1636 | type: "constant" 1637 | value: 0 1638 | } 1639 | } 1640 | } 1641 | layer { 1642 | name: "SoftmaxWithLoss1" 1643 | type: "SoftmaxWithLoss" 1644 | bottom: "InnerProduct1" 1645 | bottom: "Data2" 1646 | top: "SoftmaxWithLoss1" 1647 | loss_weight: 0 1648 | } 1649 | layer { 1650 | name: "Accuracy1" 1651 | type: "Accuracy" 1652 | bottom: "InnerProduct1" 1653 | bottom: "Data2" 1654 | top: "Accuracy1" 1655 | include { 1656 | phase: TEST 1657 | } 1658 | } 1659 | 1660 | layer { 1661 | name: "stu_Convolution1" 1662 | type: "Convolution" 1663 | bottom: "Data1" 1664 | top: "stu_Convolution1" 1665 | param { 1666 | lr_mult: 1 1667 | decay_mult: 1 1668 | } 1669 | param { 1670 | lr_mult: 2 1671 | decay_mult: 0 1672 | } 1673 | convolution_param { 1674 | num_output: 8 1675 | pad: 1 1676 | kernel_size: 3 1677 | stride: 1 1678 | weight_filler { 1679 | type: "msra" 1680 | } 1681 | bias_filler { 1682 | type: "constant" 1683 | value: 0 1684 | } 1685 | } 1686 | } 1687 | layer { 1688 | name: "stu_BatchNorm1" 1689 | type: "BatchNorm" 1690 | bottom: "stu_Convolution1" 1691 | top: "stu_Convolution1" 1692 | param { 1693 | lr_mult: 0 1694 | decay_mult: 0 1695 | } 1696 | param { 1697 | lr_mult: 0 1698 | decay_mult: 0 1699 | } 1700 | param { 1701 | lr_mult: 0 1702 | decay_mult: 0 1703 | } 1704 | } 1705 | layer { 1706 | name: "stu_Scale1" 1707 | type: "Scale" 1708 | bottom: "stu_Convolution1" 1709 | top: "stu_Convolution1" 1710 | scale_param { 1711 | bias_term: true 1712 | } 1713 | } 1714 | layer { 1715 | name: "stu_ReLU1" 1716 | type: "ReLU" 1717 | bottom: "stu_Convolution1" 1718 | top: "stu_Convolution1" 1719 | } 1720 | layer { 1721 | name: "stu_Convolution2" 1722 | type: "Convolution" 1723 | bottom: "stu_Convolution1" 1724 | top: "stu_Convolution2" 1725 | param { 1726 | lr_mult: 1 1727 | decay_mult: 1 1728 | } 1729 | param { 1730 | lr_mult: 2 1731 | decay_mult: 0 1732 | } 1733 | convolution_param { 1734 | num_output: 8 1735 | pad: 1 1736 | kernel_size: 3 1737 | stride: 1 1738 | weight_filler { 1739 | type: "msra" 1740 | } 1741 | bias_filler { 1742 | type: "constant" 1743 | value: 0 1744 | } 1745 | } 1746 | } 1747 | layer { 1748 | name: "stu_BatchNorm2" 1749 | type: "BatchNorm" 1750 | bottom: "stu_Convolution2" 1751 | top: "stu_Convolution2" 1752 | param { 1753 | lr_mult: 0 1754 | decay_mult: 0 1755 | } 1756 | param { 1757 | lr_mult: 0 1758 | decay_mult: 0 1759 | } 1760 | param { 1761 | lr_mult: 0 1762 | decay_mult: 0 1763 | } 1764 | } 1765 | layer { 1766 | name: "stu_Scale2" 1767 | type: "Scale" 1768 | bottom: "stu_Convolution2" 1769 | top: "stu_Convolution2" 1770 | scale_param { 1771 | bias_term: true 1772 | } 1773 | } 1774 | layer { 1775 | name: "stu_ReLU2" 1776 | type: "ReLU" 1777 | bottom: "stu_Convolution2" 1778 | top: "stu_Convolution2" 1779 | } 1780 | layer { 1781 | name: "stu_Convolution3" 1782 | type: "Convolution" 1783 | bottom: "stu_Convolution2" 1784 | top: "stu_Convolution3" 1785 | param { 1786 | lr_mult: 1 1787 | decay_mult: 1 1788 | } 1789 | param { 1790 | lr_mult: 2 1791 | decay_mult: 0 1792 | } 1793 | convolution_param { 1794 | num_output: 8 1795 | pad: 1 1796 | kernel_size: 3 1797 | stride: 1 1798 | weight_filler { 1799 | type: "msra" 1800 | } 1801 | bias_filler { 1802 | type: "constant" 1803 | value: 0 1804 | } 1805 | } 1806 | } 1807 | layer { 1808 | name: "stu_BatchNorm3" 1809 | type: "BatchNorm" 1810 | bottom: "stu_Convolution3" 1811 | top: "stu_Convolution3" 1812 | param { 1813 | lr_mult: 0 1814 | decay_mult: 0 1815 | } 1816 | param { 1817 | lr_mult: 0 1818 | decay_mult: 0 1819 | } 1820 | param { 1821 | lr_mult: 0 1822 | decay_mult: 0 1823 | } 1824 | } 1825 | layer { 1826 | name: "stu_Scale3" 1827 | type: "Scale" 1828 | bottom: "stu_Convolution3" 1829 | top: "stu_Convolution3" 1830 | scale_param { 1831 | bias_term: true 1832 | } 1833 | } 1834 | layer { 1835 | name: "stu_Eltwise1" 1836 | type: "Eltwise" 1837 | bottom: "stu_Convolution1" 1838 | bottom: "stu_Convolution3" 1839 | top: "stu_Eltwise1" 1840 | eltwise_param { 1841 | operation: SUM 1842 | } 1843 | } 1844 | layer { 1845 | name: "stu_ReLU3" 1846 | type: "ReLU" 1847 | bottom: "stu_Eltwise1" 1848 | top: "stu_Eltwise1" 1849 | } 1850 | layer { 1851 | name: "stu_Convolution6" 1852 | type: "Convolution" 1853 | bottom: "stu_Eltwise1" 1854 | top: "stu_Convolution6" 1855 | param { 1856 | lr_mult: 1 1857 | decay_mult: 1 1858 | } 1859 | param { 1860 | lr_mult: 2 1861 | decay_mult: 0 1862 | } 1863 | convolution_param { 1864 | num_output: 8 1865 | pad: 1 1866 | kernel_size: 3 1867 | stride: 1 1868 | weight_filler { 1869 | type: "msra" 1870 | } 1871 | bias_filler { 1872 | type: "constant" 1873 | value: 0 1874 | } 1875 | } 1876 | } 1877 | layer { 1878 | name: "stu_BatchNorm6" 1879 | type: "BatchNorm" 1880 | bottom: "stu_Convolution6" 1881 | top: "stu_Convolution6" 1882 | param { 1883 | lr_mult: 0 1884 | decay_mult: 0 1885 | } 1886 | param { 1887 | lr_mult: 0 1888 | decay_mult: 0 1889 | } 1890 | param { 1891 | lr_mult: 0 1892 | decay_mult: 0 1893 | } 1894 | } 1895 | layer { 1896 | name: "stu_Scale6" 1897 | type: "Scale" 1898 | bottom: "stu_Convolution6" 1899 | top: "stu_Convolution6" 1900 | scale_param { 1901 | bias_term: true 1902 | } 1903 | } 1904 | layer { 1905 | name: "stu_ReLU6" 1906 | type: "ReLU" 1907 | bottom: "stu_Convolution6" 1908 | top: "stu_Convolution6" 1909 | } 1910 | layer { 1911 | name: "stu_Convolution7" 1912 | type: "Convolution" 1913 | bottom: "stu_Convolution6" 1914 | top: "stu_Convolution7" 1915 | param { 1916 | lr_mult: 1 1917 | decay_mult: 1 1918 | } 1919 | param { 1920 | lr_mult: 2 1921 | decay_mult: 0 1922 | } 1923 | convolution_param { 1924 | num_output: 8 1925 | pad: 1 1926 | kernel_size: 3 1927 | stride: 1 1928 | weight_filler { 1929 | type: "msra" 1930 | } 1931 | bias_filler { 1932 | type: "constant" 1933 | value: 0 1934 | } 1935 | } 1936 | } 1937 | layer { 1938 | name: "stu_BatchNorm7" 1939 | type: "BatchNorm" 1940 | bottom: "stu_Convolution7" 1941 | top: "stu_Convolution7" 1942 | param { 1943 | lr_mult: 0 1944 | decay_mult: 0 1945 | } 1946 | param { 1947 | lr_mult: 0 1948 | decay_mult: 0 1949 | } 1950 | param { 1951 | lr_mult: 0 1952 | decay_mult: 0 1953 | } 1954 | } 1955 | layer { 1956 | name: "stu_Scale7" 1957 | type: "Scale" 1958 | bottom: "stu_Convolution7" 1959 | top: "stu_Convolution7" 1960 | scale_param { 1961 | bias_term: true 1962 | } 1963 | } 1964 | layer { 1965 | name: "stu_Eltwise3" 1966 | type: "Eltwise" 1967 | bottom: "stu_Eltwise1" 1968 | bottom: "stu_Convolution7" 1969 | top: "stu_Eltwise3" 1970 | eltwise_param { 1971 | operation: SUM 1972 | } 1973 | } 1974 | layer { 1975 | name: "stu_ReLU7" 1976 | type: "ReLU" 1977 | bottom: "stu_Eltwise3" 1978 | top: "stu_Eltwise3" 1979 | } 1980 | layer { 1981 | name: "stu_Convolution8" 1982 | type: "Convolution" 1983 | bottom: "stu_Eltwise3" 1984 | top: "stu_Convolution8" 1985 | param { 1986 | lr_mult: 1 1987 | decay_mult: 1 1988 | } 1989 | param { 1990 | lr_mult: 2 1991 | decay_mult: 0 1992 | } 1993 | convolution_param { 1994 | num_output: 16 1995 | pad: 0 1996 | kernel_size: 1 1997 | stride: 2 1998 | weight_filler { 1999 | type: "msra" 2000 | } 2001 | bias_filler { 2002 | type: "constant" 2003 | value: 0 2004 | } 2005 | } 2006 | } 2007 | layer { 2008 | name: "stu_BatchNorm8" 2009 | type: "BatchNorm" 2010 | bottom: "stu_Convolution8" 2011 | top: "stu_Convolution8" 2012 | param { 2013 | lr_mult: 0 2014 | decay_mult: 0 2015 | } 2016 | param { 2017 | lr_mult: 0 2018 | decay_mult: 0 2019 | } 2020 | param { 2021 | lr_mult: 0 2022 | decay_mult: 0 2023 | } 2024 | } 2025 | layer { 2026 | name: "stu_Scale8" 2027 | type: "Scale" 2028 | bottom: "stu_Convolution8" 2029 | top: "stu_Convolution8" 2030 | scale_param { 2031 | bias_term: true 2032 | } 2033 | } 2034 | layer { 2035 | name: "stu_Convolution9" 2036 | type: "Convolution" 2037 | bottom: "stu_Eltwise3" 2038 | top: "stu_Convolution9" 2039 | param { 2040 | lr_mult: 1 2041 | decay_mult: 1 2042 | } 2043 | param { 2044 | lr_mult: 2 2045 | decay_mult: 0 2046 | } 2047 | convolution_param { 2048 | num_output: 16 2049 | pad: 1 2050 | kernel_size: 3 2051 | stride: 2 2052 | weight_filler { 2053 | type: "msra" 2054 | } 2055 | bias_filler { 2056 | type: "constant" 2057 | value: 0 2058 | } 2059 | } 2060 | } 2061 | layer { 2062 | name: "stu_BatchNorm9" 2063 | type: "BatchNorm" 2064 | bottom: "stu_Convolution9" 2065 | top: "stu_Convolution9" 2066 | param { 2067 | lr_mult: 0 2068 | decay_mult: 0 2069 | } 2070 | param { 2071 | lr_mult: 0 2072 | decay_mult: 0 2073 | } 2074 | param { 2075 | lr_mult: 0 2076 | decay_mult: 0 2077 | } 2078 | } 2079 | layer { 2080 | name: "stu_Scale9" 2081 | type: "Scale" 2082 | bottom: "stu_Convolution9" 2083 | top: "stu_Convolution9" 2084 | scale_param { 2085 | bias_term: true 2086 | } 2087 | } 2088 | layer { 2089 | name: "stu_ReLU8" 2090 | type: "ReLU" 2091 | bottom: "stu_Convolution9" 2092 | top: "stu_Convolution9" 2093 | } 2094 | layer { 2095 | name: "stu_Convolution10" 2096 | type: "Convolution" 2097 | bottom: "stu_Convolution9" 2098 | top: "stu_Convolution10" 2099 | param { 2100 | lr_mult: 1 2101 | decay_mult: 1 2102 | } 2103 | param { 2104 | lr_mult: 2 2105 | decay_mult: 0 2106 | } 2107 | convolution_param { 2108 | num_output: 16 2109 | pad: 1 2110 | kernel_size: 3 2111 | stride: 1 2112 | weight_filler { 2113 | type: "msra" 2114 | } 2115 | bias_filler { 2116 | type: "constant" 2117 | value: 0 2118 | } 2119 | } 2120 | } 2121 | layer { 2122 | name: "stu_BatchNorm10" 2123 | type: "BatchNorm" 2124 | bottom: "stu_Convolution10" 2125 | top: "stu_Convolution10" 2126 | param { 2127 | lr_mult: 0 2128 | decay_mult: 0 2129 | } 2130 | param { 2131 | lr_mult: 0 2132 | decay_mult: 0 2133 | } 2134 | param { 2135 | lr_mult: 0 2136 | decay_mult: 0 2137 | } 2138 | } 2139 | layer { 2140 | name: "stu_Scale10" 2141 | type: "Scale" 2142 | bottom: "stu_Convolution10" 2143 | top: "stu_Convolution10" 2144 | scale_param { 2145 | bias_term: true 2146 | } 2147 | } 2148 | layer { 2149 | name: "stu_Eltwise4" 2150 | type: "Eltwise" 2151 | bottom: "stu_Convolution8" 2152 | bottom: "stu_Convolution10" 2153 | top: "stu_Eltwise4" 2154 | eltwise_param { 2155 | operation: SUM 2156 | } 2157 | } 2158 | layer { 2159 | name: "stu_ReLU9" 2160 | type: "ReLU" 2161 | bottom: "stu_Eltwise4" 2162 | top: "stu_Eltwise4" 2163 | } 2164 | layer { 2165 | name: "stu_Convolution11" 2166 | type: "Convolution" 2167 | bottom: "stu_Eltwise4" 2168 | top: "stu_Convolution11" 2169 | param { 2170 | lr_mult: 1 2171 | decay_mult: 1 2172 | } 2173 | param { 2174 | lr_mult: 2 2175 | decay_mult: 0 2176 | } 2177 | convolution_param { 2178 | num_output: 16 2179 | pad: 1 2180 | kernel_size: 3 2181 | stride: 1 2182 | weight_filler { 2183 | type: "msra" 2184 | } 2185 | bias_filler { 2186 | type: "constant" 2187 | value: 0 2188 | } 2189 | } 2190 | } 2191 | layer { 2192 | name: "stu_BatchNorm11" 2193 | type: "BatchNorm" 2194 | bottom: "stu_Convolution11" 2195 | top: "stu_Convolution11" 2196 | param { 2197 | lr_mult: 0 2198 | decay_mult: 0 2199 | } 2200 | param { 2201 | lr_mult: 0 2202 | decay_mult: 0 2203 | } 2204 | param { 2205 | lr_mult: 0 2206 | decay_mult: 0 2207 | } 2208 | } 2209 | layer { 2210 | name: "stu_Scale11" 2211 | type: "Scale" 2212 | bottom: "stu_Convolution11" 2213 | top: "stu_Convolution11" 2214 | scale_param { 2215 | bias_term: true 2216 | } 2217 | } 2218 | layer { 2219 | name: "stu_ReLU10" 2220 | type: "ReLU" 2221 | bottom: "stu_Convolution11" 2222 | top: "stu_Convolution11" 2223 | } 2224 | layer { 2225 | name: "stu_Convolution12" 2226 | type: "Convolution" 2227 | bottom: "stu_Convolution11" 2228 | top: "stu_Convolution12" 2229 | param { 2230 | lr_mult: 1 2231 | decay_mult: 1 2232 | } 2233 | param { 2234 | lr_mult: 2 2235 | decay_mult: 0 2236 | } 2237 | convolution_param { 2238 | num_output: 16 2239 | pad: 1 2240 | kernel_size: 3 2241 | stride: 1 2242 | weight_filler { 2243 | type: "msra" 2244 | } 2245 | bias_filler { 2246 | type: "constant" 2247 | value: 0 2248 | } 2249 | } 2250 | } 2251 | layer { 2252 | name: "stu_BatchNorm12" 2253 | type: "BatchNorm" 2254 | bottom: "stu_Convolution12" 2255 | top: "stu_Convolution12" 2256 | param { 2257 | lr_mult: 0 2258 | decay_mult: 0 2259 | } 2260 | param { 2261 | lr_mult: 0 2262 | decay_mult: 0 2263 | } 2264 | param { 2265 | lr_mult: 0 2266 | decay_mult: 0 2267 | } 2268 | } 2269 | layer { 2270 | name: "stu_Scale12" 2271 | type: "Scale" 2272 | bottom: "stu_Convolution12" 2273 | top: "stu_Convolution12" 2274 | scale_param { 2275 | bias_term: true 2276 | } 2277 | } 2278 | layer { 2279 | name: "stu_Eltwise5" 2280 | type: "Eltwise" 2281 | bottom: "stu_Eltwise4" 2282 | bottom: "stu_Convolution12" 2283 | top: "stu_Eltwise5" 2284 | eltwise_param { 2285 | operation: SUM 2286 | } 2287 | } 2288 | layer { 2289 | name: "stu_ReLU11" 2290 | type: "ReLU" 2291 | bottom: "stu_Eltwise5" 2292 | top: "stu_Eltwise5" 2293 | } 2294 | layer { 2295 | name: "stu_Convolution15" 2296 | type: "Convolution" 2297 | bottom: "stu_Eltwise5" 2298 | top: "stu_Convolution15" 2299 | param { 2300 | lr_mult: 1 2301 | decay_mult: 1 2302 | } 2303 | param { 2304 | lr_mult: 2 2305 | decay_mult: 0 2306 | } 2307 | convolution_param { 2308 | num_output: 32 2309 | pad: 0 2310 | kernel_size: 1 2311 | stride: 2 2312 | weight_filler { 2313 | type: "msra" 2314 | } 2315 | bias_filler { 2316 | type: "constant" 2317 | value: 0 2318 | } 2319 | } 2320 | } 2321 | layer { 2322 | name: "stu_BatchNorm15" 2323 | type: "BatchNorm" 2324 | bottom: "stu_Convolution15" 2325 | top: "stu_Convolution15" 2326 | param { 2327 | lr_mult: 0 2328 | decay_mult: 0 2329 | } 2330 | param { 2331 | lr_mult: 0 2332 | decay_mult: 0 2333 | } 2334 | param { 2335 | lr_mult: 0 2336 | decay_mult: 0 2337 | } 2338 | } 2339 | layer { 2340 | name: "stu_Scale15" 2341 | type: "Scale" 2342 | bottom: "stu_Convolution15" 2343 | top: "stu_Convolution15" 2344 | scale_param { 2345 | bias_term: true 2346 | } 2347 | } 2348 | layer { 2349 | name: "stu_Convolution16" 2350 | type: "Convolution" 2351 | bottom: "stu_Eltwise5" 2352 | top: "stu_Convolution16" 2353 | param { 2354 | lr_mult: 1 2355 | decay_mult: 1 2356 | } 2357 | param { 2358 | lr_mult: 2 2359 | decay_mult: 0 2360 | } 2361 | convolution_param { 2362 | num_output: 32 2363 | pad: 1 2364 | kernel_size: 3 2365 | stride: 2 2366 | weight_filler { 2367 | type: "msra" 2368 | } 2369 | bias_filler { 2370 | type: "constant" 2371 | value: 0 2372 | } 2373 | } 2374 | } 2375 | layer { 2376 | name: "stu_BatchNorm16" 2377 | type: "BatchNorm" 2378 | bottom: "stu_Convolution16" 2379 | top: "stu_Convolution16" 2380 | param { 2381 | lr_mult: 0 2382 | decay_mult: 0 2383 | } 2384 | param { 2385 | lr_mult: 0 2386 | decay_mult: 0 2387 | } 2388 | param { 2389 | lr_mult: 0 2390 | decay_mult: 0 2391 | } 2392 | } 2393 | layer { 2394 | name: "stu_Scale16" 2395 | type: "Scale" 2396 | bottom: "stu_Convolution16" 2397 | top: "stu_Convolution16" 2398 | scale_param { 2399 | bias_term: true 2400 | } 2401 | } 2402 | layer { 2403 | name: "stu_ReLU14" 2404 | type: "ReLU" 2405 | bottom: "stu_Convolution16" 2406 | top: "stu_Convolution16" 2407 | } 2408 | layer { 2409 | name: "stu_Convolution17" 2410 | type: "Convolution" 2411 | bottom: "stu_Convolution16" 2412 | top: "stu_Convolution17" 2413 | param { 2414 | lr_mult: 1 2415 | decay_mult: 1 2416 | } 2417 | param { 2418 | lr_mult: 2 2419 | decay_mult: 0 2420 | } 2421 | convolution_param { 2422 | num_output: 32 2423 | pad: 1 2424 | kernel_size: 3 2425 | stride: 1 2426 | weight_filler { 2427 | type: "msra" 2428 | } 2429 | bias_filler { 2430 | type: "constant" 2431 | value: 0 2432 | } 2433 | } 2434 | } 2435 | layer { 2436 | name: "stu_BatchNorm17" 2437 | type: "BatchNorm" 2438 | bottom: "stu_Convolution17" 2439 | top: "stu_Convolution17" 2440 | param { 2441 | lr_mult: 0 2442 | decay_mult: 0 2443 | } 2444 | param { 2445 | lr_mult: 0 2446 | decay_mult: 0 2447 | } 2448 | param { 2449 | lr_mult: 0 2450 | decay_mult: 0 2451 | } 2452 | } 2453 | layer { 2454 | name: "stu_Scale17" 2455 | type: "Scale" 2456 | bottom: "stu_Convolution17" 2457 | top: "stu_Convolution17" 2458 | scale_param { 2459 | bias_term: true 2460 | } 2461 | } 2462 | layer { 2463 | name: "stu_Eltwise7" 2464 | type: "Eltwise" 2465 | bottom: "stu_Convolution15" 2466 | bottom: "stu_Convolution17" 2467 | top: "stu_Eltwise7" 2468 | eltwise_param { 2469 | operation: SUM 2470 | } 2471 | } 2472 | layer { 2473 | name: "stu_ReLU15" 2474 | type: "ReLU" 2475 | bottom: "stu_Eltwise7" 2476 | top: "stu_Eltwise7" 2477 | } 2478 | 2479 | layer { 2480 | name: "stu_Convolution20" 2481 | type: "Convolution" 2482 | bottom: "stu_Eltwise7" 2483 | top: "stu_Convolution20" 2484 | param { 2485 | lr_mult: 1 2486 | decay_mult: 1 2487 | } 2488 | param { 2489 | lr_mult: 2 2490 | decay_mult: 0 2491 | } 2492 | convolution_param { 2493 | num_output: 32 2494 | pad: 1 2495 | kernel_size: 3 2496 | stride: 1 2497 | weight_filler { 2498 | type: "msra" 2499 | } 2500 | bias_filler { 2501 | type: "constant" 2502 | value: 0 2503 | } 2504 | } 2505 | } 2506 | layer { 2507 | name: "stu_BatchNorm20" 2508 | type: "BatchNorm" 2509 | bottom: "stu_Convolution20" 2510 | top: "stu_Convolution20" 2511 | param { 2512 | lr_mult: 0 2513 | decay_mult: 0 2514 | } 2515 | param { 2516 | lr_mult: 0 2517 | decay_mult: 0 2518 | } 2519 | param { 2520 | lr_mult: 0 2521 | decay_mult: 0 2522 | } 2523 | } 2524 | layer { 2525 | name: "stu_Scale20" 2526 | type: "Scale" 2527 | bottom: "stu_Convolution20" 2528 | top: "stu_Convolution20" 2529 | scale_param { 2530 | bias_term: true 2531 | } 2532 | } 2533 | layer { 2534 | name: "stu_ReLU18" 2535 | type: "ReLU" 2536 | bottom: "stu_Convolution20" 2537 | top: "stu_Convolution20" 2538 | } 2539 | layer { 2540 | name: "stu_Convolution21" 2541 | type: "Convolution" 2542 | bottom: "stu_Convolution20" 2543 | top: "stu_Convolution21" 2544 | param { 2545 | lr_mult: 1 2546 | decay_mult: 1 2547 | } 2548 | param { 2549 | lr_mult: 2 2550 | decay_mult: 0 2551 | } 2552 | convolution_param { 2553 | num_output: 32 2554 | pad: 1 2555 | kernel_size: 3 2556 | stride: 1 2557 | weight_filler { 2558 | type: "msra" 2559 | } 2560 | bias_filler { 2561 | type: "constant" 2562 | value: 0 2563 | } 2564 | } 2565 | } 2566 | layer { 2567 | name: "stu_BatchNorm21" 2568 | type: "BatchNorm" 2569 | bottom: "stu_Convolution21" 2570 | top: "stu_Convolution21" 2571 | param { 2572 | lr_mult: 0 2573 | decay_mult: 0 2574 | } 2575 | param { 2576 | lr_mult: 0 2577 | decay_mult: 0 2578 | } 2579 | param { 2580 | lr_mult: 0 2581 | decay_mult: 0 2582 | } 2583 | } 2584 | layer { 2585 | name: "stu_Scale21" 2586 | type: "Scale" 2587 | bottom: "stu_Convolution21" 2588 | top: "stu_Convolution21" 2589 | scale_param { 2590 | bias_term: true 2591 | } 2592 | } 2593 | layer { 2594 | name: "stu_Eltwise9" 2595 | type: "Eltwise" 2596 | bottom: "stu_Eltwise7" 2597 | bottom: "stu_Convolution21" 2598 | top: "stu_Eltwise9" 2599 | eltwise_param { 2600 | operation: SUM 2601 | } 2602 | } 2603 | layer { 2604 | name: "stu_ReLU19" 2605 | type: "ReLU" 2606 | bottom: "stu_Eltwise9" 2607 | top: "stu_Eltwise9" 2608 | } 2609 | layer { 2610 | name: "stu_Pooling1" 2611 | type: "Pooling" 2612 | bottom: "stu_Eltwise9" 2613 | top: "stu_Pooling1" 2614 | pooling_param { 2615 | pool: AVE 2616 | global_pooling: true 2617 | } 2618 | } 2619 | layer { 2620 | name: "stu_InnerProduct1" 2621 | type: "InnerProduct" 2622 | bottom: "stu_Pooling1" 2623 | top: "stu_InnerProduct1" 2624 | param { 2625 | lr_mult: 1 2626 | decay_mult: 1 2627 | } 2628 | param { 2629 | lr_mult: 2 2630 | decay_mult: 1 2631 | } 2632 | inner_product_param { 2633 | num_output: 10 2634 | weight_filler { 2635 | type: "msra" 2636 | } 2637 | bias_filler { 2638 | type: "constant" 2639 | value: 0 2640 | } 2641 | } 2642 | } 2643 | layer { 2644 | name: "stu_SoftmaxWithLoss1" 2645 | type: "SoftmaxWithLoss" 2646 | bottom: "stu_InnerProduct1" 2647 | bottom: "Data2" 2648 | top: "stu_SoftmaxWithLoss1" 2649 | } 2650 | layer { 2651 | name: "stu_Accuracy1" 2652 | type: "Accuracy" 2653 | bottom: "stu_InnerProduct1" 2654 | bottom: "Data2" 2655 | top: "stu_Accuracy1" 2656 | include { 2657 | phase: TEST 2658 | } 2659 | } 2660 | 2661 | layer { 2662 | name: "pool_eltwise1" 2663 | type: "Pooling" 2664 | bottom: "Eltwise1" 2665 | top: "pool_eltwise1" 2666 | pooling_param { 2667 | pool: AVE 2668 | global_pooling: true 2669 | } 2670 | include { 2671 | phase: TRAIN 2672 | } 2673 | } 2674 | layer { 2675 | name: "pool_eltwise3" 2676 | type: "Pooling" 2677 | bottom: "Eltwise3" 2678 | top: "pool_eltwise3" 2679 | pooling_param { 2680 | pool: AVE 2681 | global_pooling: true 2682 | } 2683 | include { 2684 | phase: TRAIN 2685 | } 2686 | } 2687 | layer { 2688 | name: "pool_eltwise4" 2689 | type: "Pooling" 2690 | bottom: "Eltwise4" 2691 | top: "pool_eltwise4" 2692 | pooling_param { 2693 | pool: AVE 2694 | global_pooling: true 2695 | } 2696 | include { 2697 | phase: TRAIN 2698 | } 2699 | } 2700 | layer { 2701 | name: "pool_eltwise6" 2702 | type: "Pooling" 2703 | bottom: "Eltwise6" 2704 | top: "pool_eltwise6" 2705 | pooling_param { 2706 | pool: AVE 2707 | global_pooling: true 2708 | } 2709 | include { 2710 | phase: TRAIN 2711 | } 2712 | } 2713 | layer { 2714 | name: "pool_eltwise7" 2715 | type: "Pooling" 2716 | bottom: "Eltwise7" 2717 | top: "pool_eltwise7" 2718 | pooling_param { 2719 | pool: AVE 2720 | global_pooling: true 2721 | } 2722 | include { 2723 | phase: TRAIN 2724 | } 2725 | } 2726 | layer { 2727 | name: "bem13" 2728 | type: "BatchEuclideanVector" 2729 | bottom: "pool_eltwise3" 2730 | bottom: "pool_eltwise1" 2731 | top: "bem13" 2732 | include { 2733 | phase: TRAIN 2734 | } 2735 | } 2736 | layer { 2737 | name: "bem46" 2738 | type: "BatchEuclideanVector" 2739 | bottom: "pool_eltwise6" 2740 | bottom: "pool_eltwise4" 2741 | top: "bem46" 2742 | include { 2743 | phase: TRAIN 2744 | } 2745 | } 2746 | layer { 2747 | name: "bem79" 2748 | type: "BatchEuclideanVector" 2749 | bottom: "Pooling1" 2750 | bottom: "pool_eltwise7" 2751 | top: "bem79" 2752 | include { 2753 | phase: TRAIN 2754 | } 2755 | } 2756 | layer { 2757 | name: "stu_pool_eltwise1" 2758 | type: "Pooling" 2759 | bottom: "stu_Eltwise1" 2760 | top: "stu_pool_eltwise1" 2761 | pooling_param { 2762 | pool: AVE 2763 | global_pooling: true 2764 | } 2765 | include { 2766 | phase: TRAIN 2767 | } 2768 | } 2769 | layer { 2770 | name: "stu_pool_eltwise3" 2771 | type: "Pooling" 2772 | bottom: "stu_Eltwise3" 2773 | top: "stu_pool_eltwise3" 2774 | pooling_param { 2775 | pool: AVE 2776 | global_pooling: true 2777 | } 2778 | include { 2779 | phase: TRAIN 2780 | } 2781 | } 2782 | layer { 2783 | name: "stu_pool_eltwise4" 2784 | type: "Pooling" 2785 | bottom: "stu_Eltwise4" 2786 | top: "stu_pool_eltwise4" 2787 | pooling_param { 2788 | pool: AVE 2789 | global_pooling: true 2790 | } 2791 | include { 2792 | phase: TRAIN 2793 | } 2794 | } 2795 | layer { 2796 | name: "stu_pool_eltwise5" 2797 | type: "Pooling" 2798 | bottom: "stu_Eltwise5" 2799 | top: "stu_pool_eltwise5" 2800 | pooling_param { 2801 | pool: AVE 2802 | global_pooling: true 2803 | } 2804 | include { 2805 | phase: TRAIN 2806 | } 2807 | } 2808 | layer { 2809 | name: "stu_pool_eltwise7" 2810 | type: "Pooling" 2811 | bottom: "stu_Eltwise7" 2812 | top: "stu_pool_eltwise7" 2813 | pooling_param { 2814 | pool: AVE 2815 | global_pooling: true 2816 | } 2817 | include { 2818 | phase: TRAIN 2819 | } 2820 | } 2821 | layer { 2822 | name: "stu_bem13" 2823 | type: "BatchEuclideanVector" 2824 | bottom: "stu_pool_eltwise3" 2825 | bottom: "stu_pool_eltwise1" 2826 | top: "stu_bem13" 2827 | include { 2828 | phase: TRAIN 2829 | } 2830 | } 2831 | layer { 2832 | name: "stu_bem46" 2833 | type: "BatchEuclideanVector" 2834 | bottom: "stu_pool_eltwise5" 2835 | bottom: "stu_pool_eltwise4" 2836 | top: "stu_bem46" 2837 | include { 2838 | phase: TRAIN 2839 | } 2840 | } 2841 | layer { 2842 | name: "stu_bem79" 2843 | type: "BatchEuclideanVector" 2844 | bottom: "stu_Pooling1" 2845 | bottom: "stu_pool_eltwise7" 2846 | top: "stu_bem79" 2847 | include { 2848 | phase: TRAIN 2849 | } 2850 | } 2851 | layer { 2852 | name: "bem13_loss" 2853 | type: "EuclideanLoss" 2854 | bottom: "bem13" 2855 | bottom: "stu_bem13" 2856 | top: "bem13_loss" 2857 | loss_weight: 0.005 2858 | include { 2859 | phase: TRAIN 2860 | } 2861 | } 2862 | layer { 2863 | name: "bem46_loss" 2864 | type: "EuclideanLoss" 2865 | bottom: "bem46" 2866 | bottom: "stu_bem46" 2867 | top: "bem46_loss" 2868 | loss_weight: 0.005 2869 | include { 2870 | phase: TRAIN 2871 | } 2872 | } 2873 | layer { 2874 | name: "bem79_loss" 2875 | type: "EuclideanLoss" 2876 | bottom: "bem79" 2877 | bottom: "stu_bem79" 2878 | top: "bem79_loss" 2879 | loss_weight: 0.005 2880 | include { 2881 | phase: TRAIN 2882 | } 2883 | } 2884 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | COPYRIGHT 3 | 4 | All contributions by Institute of Automation, Chinese Academy of Sciences: 5 | Copyright (c) 2018-2019 The Regents of Institute of Automation, Chinese Academy of Sciences (Regents) 6 | All rights reserved. 7 | 8 | All other contributions: 9 | Copyright (c) 2018-2019, the respective contributors 10 | All rights reserved. 11 | 12 | LICENSE 13 | 14 | Redistribution and use in source and binary forms, with or without 15 | modification, are permitted provided that the following conditions are met: 16 | 17 | 1. Redistributions of source code must retain the above copyright notice, this 18 | list of conditions and the following disclaimer. 19 | 2. Redistributions in binary form must reproduce the above copyright notice, 20 | this list of conditions and the following disclaimer in the documentation 21 | and/or other materials provided with the distribution. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 24 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 27 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | CONTRIBUTION AGREEMENT 35 | 36 | By contributing to the repository through pull-request, comment, 37 | or otherwise, the contributor releases their content to the 38 | license and copyright terms herein. 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IRG for Knowledge Distillation 2 | [![License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE) 3 | 4 | IRG is an open source implementation of the paper called "Knowledge Distillation via Instance Relationship Graph": 5 | > Yufan Liu, Jiajiong Cao, Bing Li, Chunfeng Yuan, Weiming Hu *et al*. Knowledge Distillation via Instance Relationship Graph. IEEE Conference on Computer Vision and Pattern Recognition (CVPR): Long Beach, USA. 2019.06.16-2019.06.20. 6 | 7 | ## Abstract 8 | *The key challenge of knowledge distillation is to extract 9 | general, moderate and sufficient knowledge from a teacher 10 | network to guide a student network. In this paper, a novel 11 | Instance Relationship Graph (IRG) is proposed for knowledge 12 | distillation. It models three kinds of knowledge, including 13 | instance features, instance relationships and feature 14 | space transformation, while the latter two kinds of 15 | knowledge are neglected by previous methods. Firstly, the 16 | IRG is constructed to model the distilled knowledge of 17 | one network layer, by considering instance features and 18 | instance relationships as vertexes and edges respectively. 19 | Secondly, an IRG transformation is proposed to models the 20 | feature space transformation across layers. It is more moderate 21 | than directly mimicking the features at intermediate 22 | layers. Finally, hint loss functions are designed to force a 23 | student’s IRGs to mimic the structures of a teacher’s IRGs. 24 | The proposed method effectively captures the knowledge 25 | along the whole network via IRGs, and thus shows stable 26 | convergence and strong robustness to different network architectures. 27 | In addition, the proposed method shows superior 28 | performance over existing methods on datasets of various 29 | scales.* 30 | 31 | ## Citation 32 | If you find IRG useful in your research, please cite our paper: 33 | ```` 34 | @inproceedings{liu2019knowledge, 35 | title={Knowledge Distillation via Instance Relationship Graph}, 36 | author={Liu, Yufan and Cao, Jiajiong and Li, Bing and Yuan, Chunfeng and Hu, Weiming and Li, Yangxi and Duan, Yunqiang}, 37 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 38 | pages={7096--7104}, 39 | year={2019} 40 | } 41 | ```` 42 | 43 | ## Framework 44 | ![Framework](./img/framework.png "Framework") 45 | 46 | ## Requirements 47 | 48 | - Caffe-windows 49 | 50 | ## Usage 51 | 1. Put \*.cpp/\*.hpp/\*.cu files into corresponding directories of caffe. 52 | 53 | 2. Compile caffe. 54 | ```make 55 | make all -j 8 56 | ``` 57 | 3. Use prototxt files (e.g., IRG.prototxt) to train. 58 | IRG.prototxt is the train prototxt of single IRG (Teacher: ResNet20; Student: ResNet20-x0.5). 59 | IRG_Transformation.prototxt is the train prototxt of single IRG Transformation (Teacher: ResNet20; Student: ResNet20-x0.5). 60 | ResNet-20.prototxt is the original train prototxt of ResNet20. 61 | 62 | ## Experiments 63 | ![Experimental results](./img/Exp_results.png "Experimental results") 64 | 65 | ## Contact 66 | If any question, please contact yufan.liu@ia.ac.cn, or use public issues section of this repository. 67 | -------------------------------------------------------------------------------- /ResNet-20.prototxt: -------------------------------------------------------------------------------- 1 | name: "resnet_cifar10" 2 | layer { 3 | name: "Data1" 4 | type: "ImageData" 5 | top: "Data1" 6 | top: "Data2" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mean_value: 127.5 12 | mean_value: 127.5 13 | mean_value: 127.5 14 | scale: 0.00390625 15 | crop_size: 32 16 | mirror:true 17 | } 18 | image_data_param { 19 | source: "yourpath/train.txt" 20 | new_height: 36 21 | new_width: 36 22 | batch_size: 128 23 | is_color: true 24 | shuffle: true 25 | } 26 | } 27 | layer { 28 | name: "Data1" 29 | type: "ImageData" 30 | top: "Data1" 31 | top: "Data2" 32 | include { 33 | phase: TEST 34 | } 35 | transform_param { 36 | mean_value: 127.5 37 | mean_value: 127.5 38 | mean_value: 127.5 39 | scale: 0.00390625 40 | } 41 | image_data_param { 42 | source: "yourpath/val.txt" 43 | new_height: 36 44 | new_width: 36 45 | batch_size: 100 46 | is_color: true 47 | shuffle: false 48 | } 49 | } 50 | layer { 51 | name: "Convolution1" 52 | type: "Convolution" 53 | bottom: "Data1" 54 | top: "Convolution1" 55 | param { 56 | lr_mult: 0 57 | decay_mult: 0 58 | } 59 | param { 60 | lr_mult: 0 61 | decay_mult: 0 62 | } 63 | convolution_param { 64 | num_output: 16 65 | pad: 1 66 | kernel_size: 3 67 | stride: 1 68 | weight_filler { 69 | type: "msra" 70 | } 71 | bias_filler { 72 | type: "constant" 73 | value: 0 74 | } 75 | } 76 | } 77 | layer { 78 | name: "BatchNorm1" 79 | type: "BatchNorm" 80 | bottom: "Convolution1" 81 | top: "Convolution1" 82 | param { 83 | lr_mult: 0 84 | decay_mult: 0 85 | } 86 | param { 87 | lr_mult: 0 88 | decay_mult: 0 89 | } 90 | param { 91 | lr_mult: 0 92 | decay_mult: 0 93 | } 94 | batch_norm_param { 95 | use_global_stats: true 96 | } 97 | } 98 | layer { 99 | name: "Scale1" 100 | type: "Scale" 101 | bottom: "Convolution1" 102 | top: "Convolution1" 103 | param { 104 | lr_mult: 0 105 | decay_mult: 0 106 | } 107 | param { 108 | lr_mult: 0 109 | decay_mult: 0 110 | } 111 | scale_param { 112 | bias_term: true 113 | } 114 | } 115 | layer { 116 | name: "ReLU1" 117 | type: "ReLU" 118 | bottom: "Convolution1" 119 | top: "Convolution1" 120 | } 121 | layer { 122 | name: "Convolution2" 123 | type: "Convolution" 124 | bottom: "Convolution1" 125 | top: "Convolution2" 126 | param { 127 | lr_mult: 0 128 | decay_mult: 0 129 | } 130 | param { 131 | lr_mult: 0 132 | decay_mult: 0 133 | } 134 | convolution_param { 135 | num_output: 16 136 | pad: 1 137 | kernel_size: 3 138 | stride: 1 139 | weight_filler { 140 | type: "msra" 141 | } 142 | bias_filler { 143 | type: "constant" 144 | value: 0 145 | } 146 | } 147 | } 148 | layer { 149 | name: "BatchNorm2" 150 | type: "BatchNorm" 151 | bottom: "Convolution2" 152 | top: "Convolution2" 153 | param { 154 | lr_mult: 0 155 | decay_mult: 0 156 | } 157 | param { 158 | lr_mult: 0 159 | decay_mult: 0 160 | } 161 | param { 162 | lr_mult: 0 163 | decay_mult: 0 164 | } 165 | batch_norm_param { 166 | use_global_stats: true 167 | } 168 | } 169 | layer { 170 | name: "Scale2" 171 | type: "Scale" 172 | bottom: "Convolution2" 173 | top: "Convolution2" 174 | param { 175 | lr_mult: 0 176 | decay_mult: 0 177 | } 178 | param { 179 | lr_mult: 0 180 | decay_mult: 0 181 | } 182 | scale_param { 183 | bias_term: true 184 | } 185 | } 186 | layer { 187 | name: "ReLU2" 188 | type: "ReLU" 189 | bottom: "Convolution2" 190 | top: "Convolution2" 191 | } 192 | layer { 193 | name: "Convolution3" 194 | type: "Convolution" 195 | bottom: "Convolution2" 196 | top: "Convolution3" 197 | param { 198 | lr_mult: 0 199 | decay_mult: 0 200 | } 201 | param { 202 | lr_mult: 0 203 | decay_mult: 0 204 | } 205 | convolution_param { 206 | num_output: 16 207 | pad: 1 208 | kernel_size: 3 209 | stride: 1 210 | weight_filler { 211 | type: "msra" 212 | } 213 | bias_filler { 214 | type: "constant" 215 | value: 0 216 | } 217 | } 218 | } 219 | layer { 220 | name: "BatchNorm3" 221 | type: "BatchNorm" 222 | bottom: "Convolution3" 223 | top: "Convolution3" 224 | param { 225 | lr_mult: 0 226 | decay_mult: 0 227 | } 228 | param { 229 | lr_mult: 0 230 | decay_mult: 0 231 | } 232 | param { 233 | lr_mult: 0 234 | decay_mult: 0 235 | } 236 | batch_norm_param { 237 | use_global_stats: true 238 | } 239 | } 240 | layer { 241 | name: "Scale3" 242 | type: "Scale" 243 | bottom: "Convolution3" 244 | top: "Convolution3" 245 | param { 246 | lr_mult: 0 247 | decay_mult: 0 248 | } 249 | param { 250 | lr_mult: 0 251 | decay_mult: 0 252 | } 253 | scale_param { 254 | bias_term: true 255 | } 256 | } 257 | layer { 258 | name: "Eltwise1" 259 | type: "Eltwise" 260 | bottom: "Convolution1" 261 | bottom: "Convolution3" 262 | top: "Eltwise1" 263 | eltwise_param { 264 | operation: SUM 265 | } 266 | } 267 | layer { 268 | name: "ReLU3" 269 | type: "ReLU" 270 | bottom: "Eltwise1" 271 | top: "Eltwise1" 272 | } 273 | layer { 274 | name: "Convolution4" 275 | type: "Convolution" 276 | bottom: "Eltwise1" 277 | top: "Convolution4" 278 | param { 279 | lr_mult: 0 280 | decay_mult: 0 281 | } 282 | param { 283 | lr_mult: 0 284 | decay_mult: 0 285 | } 286 | convolution_param { 287 | num_output: 16 288 | pad: 1 289 | kernel_size: 3 290 | stride: 1 291 | weight_filler { 292 | type: "msra" 293 | } 294 | bias_filler { 295 | type: "constant" 296 | value: 0 297 | } 298 | } 299 | } 300 | layer { 301 | name: "BatchNorm4" 302 | type: "BatchNorm" 303 | bottom: "Convolution4" 304 | top: "Convolution4" 305 | param { 306 | lr_mult: 0 307 | decay_mult: 0 308 | } 309 | param { 310 | lr_mult: 0 311 | decay_mult: 0 312 | } 313 | param { 314 | lr_mult: 0 315 | decay_mult: 0 316 | } 317 | batch_norm_param { 318 | use_global_stats: true 319 | } 320 | } 321 | layer { 322 | name: "Scale4" 323 | type: "Scale" 324 | bottom: "Convolution4" 325 | top: "Convolution4" 326 | param { 327 | lr_mult: 0 328 | decay_mult: 0 329 | } 330 | param { 331 | lr_mult: 0 332 | decay_mult: 0 333 | } 334 | scale_param { 335 | bias_term: true 336 | } 337 | } 338 | layer { 339 | name: "ReLU4" 340 | type: "ReLU" 341 | bottom: "Convolution4" 342 | top: "Convolution4" 343 | } 344 | layer { 345 | name: "Convolution5" 346 | type: "Convolution" 347 | bottom: "Convolution4" 348 | top: "Convolution5" 349 | param { 350 | lr_mult: 0 351 | decay_mult: 0 352 | } 353 | param { 354 | lr_mult: 0 355 | decay_mult: 0 356 | } 357 | convolution_param { 358 | num_output: 16 359 | pad: 1 360 | kernel_size: 3 361 | stride: 1 362 | weight_filler { 363 | type: "msra" 364 | } 365 | bias_filler { 366 | type: "constant" 367 | value: 0 368 | } 369 | } 370 | } 371 | layer { 372 | name: "BatchNorm5" 373 | type: "BatchNorm" 374 | bottom: "Convolution5" 375 | top: "Convolution5" 376 | param { 377 | lr_mult: 0 378 | decay_mult: 0 379 | } 380 | param { 381 | lr_mult: 0 382 | decay_mult: 0 383 | } 384 | param { 385 | lr_mult: 0 386 | decay_mult: 0 387 | } 388 | batch_norm_param { 389 | use_global_stats: true 390 | } 391 | } 392 | layer { 393 | name: "Scale5" 394 | type: "Scale" 395 | bottom: "Convolution5" 396 | top: "Convolution5" 397 | param { 398 | lr_mult: 0 399 | decay_mult: 0 400 | } 401 | param { 402 | lr_mult: 0 403 | decay_mult: 0 404 | } 405 | scale_param { 406 | bias_term: true 407 | } 408 | } 409 | layer { 410 | name: "Eltwise2" 411 | type: "Eltwise" 412 | bottom: "Eltwise1" 413 | bottom: "Convolution5" 414 | top: "Eltwise2" 415 | eltwise_param { 416 | operation: SUM 417 | } 418 | } 419 | layer { 420 | name: "ReLU5" 421 | type: "ReLU" 422 | bottom: "Eltwise2" 423 | top: "Eltwise2" 424 | } 425 | layer { 426 | name: "Convolution6" 427 | type: "Convolution" 428 | bottom: "Eltwise2" 429 | top: "Convolution6" 430 | param { 431 | lr_mult: 0 432 | decay_mult: 0 433 | } 434 | param { 435 | lr_mult: 0 436 | decay_mult: 0 437 | } 438 | convolution_param { 439 | num_output: 16 440 | pad: 1 441 | kernel_size: 3 442 | stride: 1 443 | weight_filler { 444 | type: "msra" 445 | } 446 | bias_filler { 447 | type: "constant" 448 | value: 0 449 | } 450 | } 451 | } 452 | layer { 453 | name: "BatchNorm6" 454 | type: "BatchNorm" 455 | bottom: "Convolution6" 456 | top: "Convolution6" 457 | param { 458 | lr_mult: 0 459 | decay_mult: 0 460 | } 461 | param { 462 | lr_mult: 0 463 | decay_mult: 0 464 | } 465 | param { 466 | lr_mult: 0 467 | decay_mult: 0 468 | } 469 | batch_norm_param { 470 | use_global_stats: true 471 | } 472 | } 473 | layer { 474 | name: "Scale6" 475 | type: "Scale" 476 | bottom: "Convolution6" 477 | top: "Convolution6" 478 | param { 479 | lr_mult: 0 480 | decay_mult: 0 481 | } 482 | param { 483 | lr_mult: 0 484 | decay_mult: 0 485 | } 486 | scale_param { 487 | bias_term: true 488 | } 489 | } 490 | layer { 491 | name: "ReLU6" 492 | type: "ReLU" 493 | bottom: "Convolution6" 494 | top: "Convolution6" 495 | } 496 | layer { 497 | name: "Convolution7" 498 | type: "Convolution" 499 | bottom: "Convolution6" 500 | top: "Convolution7" 501 | param { 502 | lr_mult: 0 503 | decay_mult: 0 504 | } 505 | param { 506 | lr_mult: 0 507 | decay_mult: 0 508 | } 509 | convolution_param { 510 | num_output: 16 511 | pad: 1 512 | kernel_size: 3 513 | stride: 1 514 | weight_filler { 515 | type: "msra" 516 | } 517 | bias_filler { 518 | type: "constant" 519 | value: 0 520 | } 521 | } 522 | } 523 | layer { 524 | name: "BatchNorm7" 525 | type: "BatchNorm" 526 | bottom: "Convolution7" 527 | top: "Convolution7" 528 | param { 529 | lr_mult: 0 530 | decay_mult: 0 531 | } 532 | param { 533 | lr_mult: 0 534 | decay_mult: 0 535 | } 536 | param { 537 | lr_mult: 0 538 | decay_mult: 0 539 | } 540 | batch_norm_param { 541 | use_global_stats: true 542 | } 543 | } 544 | layer { 545 | name: "Scale7" 546 | type: "Scale" 547 | bottom: "Convolution7" 548 | top: "Convolution7" 549 | param { 550 | lr_mult: 0 551 | decay_mult: 0 552 | } 553 | param { 554 | lr_mult: 0 555 | decay_mult: 0 556 | } 557 | scale_param { 558 | bias_term: true 559 | } 560 | } 561 | layer { 562 | name: "Eltwise3" 563 | type: "Eltwise" 564 | bottom: "Eltwise2" 565 | bottom: "Convolution7" 566 | top: "Eltwise3" 567 | eltwise_param { 568 | operation: SUM 569 | } 570 | } 571 | layer { 572 | name: "ReLU7" 573 | type: "ReLU" 574 | bottom: "Eltwise3" 575 | top: "Eltwise3" 576 | } 577 | layer { 578 | name: "Convolution8" 579 | type: "Convolution" 580 | bottom: "Eltwise3" 581 | top: "Convolution8" 582 | param { 583 | lr_mult: 0 584 | decay_mult: 0 585 | } 586 | param { 587 | lr_mult: 0 588 | decay_mult: 0 589 | } 590 | convolution_param { 591 | num_output: 32 592 | pad: 0 593 | kernel_size: 1 594 | stride: 2 595 | weight_filler { 596 | type: "msra" 597 | } 598 | bias_filler { 599 | type: "constant" 600 | value: 0 601 | } 602 | } 603 | } 604 | layer { 605 | name: "BatchNorm8" 606 | type: "BatchNorm" 607 | bottom: "Convolution8" 608 | top: "Convolution8" 609 | param { 610 | lr_mult: 0 611 | decay_mult: 0 612 | } 613 | param { 614 | lr_mult: 0 615 | decay_mult: 0 616 | } 617 | param { 618 | lr_mult: 0 619 | decay_mult: 0 620 | } 621 | batch_norm_param { 622 | use_global_stats: true 623 | } 624 | } 625 | layer { 626 | name: "Scale8" 627 | type: "Scale" 628 | bottom: "Convolution8" 629 | top: "Convolution8" 630 | param { 631 | lr_mult: 0 632 | decay_mult: 0 633 | } 634 | param { 635 | lr_mult: 0 636 | decay_mult: 0 637 | } 638 | scale_param { 639 | bias_term: true 640 | } 641 | } 642 | layer { 643 | name: "Convolution9" 644 | type: "Convolution" 645 | bottom: "Eltwise3" 646 | top: "Convolution9" 647 | param { 648 | lr_mult: 0 649 | decay_mult: 0 650 | } 651 | param { 652 | lr_mult: 0 653 | decay_mult: 0 654 | } 655 | convolution_param { 656 | num_output: 32 657 | pad: 1 658 | kernel_size: 3 659 | stride: 2 660 | weight_filler { 661 | type: "msra" 662 | } 663 | bias_filler { 664 | type: "constant" 665 | value: 0 666 | } 667 | } 668 | } 669 | layer { 670 | name: "BatchNorm9" 671 | type: "BatchNorm" 672 | bottom: "Convolution9" 673 | top: "Convolution9" 674 | param { 675 | lr_mult: 0 676 | decay_mult: 0 677 | } 678 | param { 679 | lr_mult: 0 680 | decay_mult: 0 681 | } 682 | param { 683 | lr_mult: 0 684 | decay_mult: 0 685 | } 686 | batch_norm_param { 687 | use_global_stats: true 688 | } 689 | } 690 | layer { 691 | name: "Scale9" 692 | type: "Scale" 693 | bottom: "Convolution9" 694 | top: "Convolution9" 695 | param { 696 | lr_mult: 0 697 | decay_mult: 0 698 | } 699 | param { 700 | lr_mult: 0 701 | decay_mult: 0 702 | } 703 | scale_param { 704 | bias_term: true 705 | } 706 | } 707 | layer { 708 | name: "ReLU8" 709 | type: "ReLU" 710 | bottom: "Convolution9" 711 | top: "Convolution9" 712 | } 713 | layer { 714 | name: "Convolution10" 715 | type: "Convolution" 716 | bottom: "Convolution9" 717 | top: "Convolution10" 718 | param { 719 | lr_mult: 0 720 | decay_mult: 0 721 | } 722 | param { 723 | lr_mult: 0 724 | decay_mult: 0 725 | } 726 | convolution_param { 727 | num_output: 32 728 | pad: 1 729 | kernel_size: 3 730 | stride: 1 731 | weight_filler { 732 | type: "msra" 733 | } 734 | bias_filler { 735 | type: "constant" 736 | value: 0 737 | } 738 | } 739 | } 740 | layer { 741 | name: "BatchNorm10" 742 | type: "BatchNorm" 743 | bottom: "Convolution10" 744 | top: "Convolution10" 745 | param { 746 | lr_mult: 0 747 | decay_mult: 0 748 | } 749 | param { 750 | lr_mult: 0 751 | decay_mult: 0 752 | } 753 | param { 754 | lr_mult: 0 755 | decay_mult: 0 756 | } 757 | batch_norm_param { 758 | use_global_stats: true 759 | } 760 | } 761 | layer { 762 | name: "Scale10" 763 | type: "Scale" 764 | bottom: "Convolution10" 765 | top: "Convolution10" 766 | param { 767 | lr_mult: 0 768 | decay_mult: 0 769 | } 770 | param { 771 | lr_mult: 0 772 | decay_mult: 0 773 | } 774 | scale_param { 775 | bias_term: true 776 | } 777 | } 778 | layer { 779 | name: "Eltwise4" 780 | type: "Eltwise" 781 | bottom: "Convolution8" 782 | bottom: "Convolution10" 783 | top: "Eltwise4" 784 | eltwise_param { 785 | operation: SUM 786 | } 787 | } 788 | layer { 789 | name: "ReLU9" 790 | type: "ReLU" 791 | bottom: "Eltwise4" 792 | top: "Eltwise4" 793 | } 794 | layer { 795 | name: "Convolution11" 796 | type: "Convolution" 797 | bottom: "Eltwise4" 798 | top: "Convolution11" 799 | param { 800 | lr_mult: 0 801 | decay_mult: 0 802 | } 803 | param { 804 | lr_mult: 0 805 | decay_mult: 0 806 | } 807 | convolution_param { 808 | num_output: 32 809 | pad: 1 810 | kernel_size: 3 811 | stride: 1 812 | weight_filler { 813 | type: "msra" 814 | } 815 | bias_filler { 816 | type: "constant" 817 | value: 0 818 | } 819 | } 820 | } 821 | layer { 822 | name: "BatchNorm11" 823 | type: "BatchNorm" 824 | bottom: "Convolution11" 825 | top: "Convolution11" 826 | param { 827 | lr_mult: 0 828 | decay_mult: 0 829 | } 830 | param { 831 | lr_mult: 0 832 | decay_mult: 0 833 | } 834 | param { 835 | lr_mult: 0 836 | decay_mult: 0 837 | } 838 | batch_norm_param { 839 | use_global_stats: true 840 | } 841 | } 842 | layer { 843 | name: "Scale11" 844 | type: "Scale" 845 | bottom: "Convolution11" 846 | top: "Convolution11" 847 | param { 848 | lr_mult: 0 849 | decay_mult: 0 850 | } 851 | param { 852 | lr_mult: 0 853 | decay_mult: 0 854 | } 855 | scale_param { 856 | bias_term: true 857 | } 858 | } 859 | layer { 860 | name: "ReLU10" 861 | type: "ReLU" 862 | bottom: "Convolution11" 863 | top: "Convolution11" 864 | } 865 | layer { 866 | name: "Convolution12" 867 | type: "Convolution" 868 | bottom: "Convolution11" 869 | top: "Convolution12" 870 | param { 871 | lr_mult: 0 872 | decay_mult: 0 873 | } 874 | param { 875 | lr_mult: 0 876 | decay_mult: 0 877 | } 878 | convolution_param { 879 | num_output: 32 880 | pad: 1 881 | kernel_size: 3 882 | stride: 1 883 | weight_filler { 884 | type: "msra" 885 | } 886 | bias_filler { 887 | type: "constant" 888 | value: 0 889 | } 890 | } 891 | } 892 | layer { 893 | name: "BatchNorm12" 894 | type: "BatchNorm" 895 | bottom: "Convolution12" 896 | top: "Convolution12" 897 | param { 898 | lr_mult: 0 899 | decay_mult: 0 900 | } 901 | param { 902 | lr_mult: 0 903 | decay_mult: 0 904 | } 905 | param { 906 | lr_mult: 0 907 | decay_mult: 0 908 | } 909 | batch_norm_param { 910 | use_global_stats: true 911 | } 912 | } 913 | layer { 914 | name: "Scale12" 915 | type: "Scale" 916 | bottom: "Convolution12" 917 | top: "Convolution12" 918 | param { 919 | lr_mult: 0 920 | decay_mult: 0 921 | } 922 | param { 923 | lr_mult: 0 924 | decay_mult: 0 925 | } 926 | scale_param { 927 | bias_term: true 928 | } 929 | } 930 | layer { 931 | name: "Eltwise5" 932 | type: "Eltwise" 933 | bottom: "Eltwise4" 934 | bottom: "Convolution12" 935 | top: "Eltwise5" 936 | eltwise_param { 937 | operation: SUM 938 | } 939 | } 940 | layer { 941 | name: "ReLU11" 942 | type: "ReLU" 943 | bottom: "Eltwise5" 944 | top: "Eltwise5" 945 | } 946 | layer { 947 | name: "Convolution13" 948 | type: "Convolution" 949 | bottom: "Eltwise5" 950 | top: "Convolution13" 951 | param { 952 | lr_mult: 0 953 | decay_mult: 0 954 | } 955 | param { 956 | lr_mult: 0 957 | decay_mult: 0 958 | } 959 | convolution_param { 960 | num_output: 32 961 | pad: 1 962 | kernel_size: 3 963 | stride: 1 964 | weight_filler { 965 | type: "msra" 966 | } 967 | bias_filler { 968 | type: "constant" 969 | value: 0 970 | } 971 | } 972 | } 973 | layer { 974 | name: "BatchNorm13" 975 | type: "BatchNorm" 976 | bottom: "Convolution13" 977 | top: "Convolution13" 978 | param { 979 | lr_mult: 0 980 | decay_mult: 0 981 | } 982 | param { 983 | lr_mult: 0 984 | decay_mult: 0 985 | } 986 | param { 987 | lr_mult: 0 988 | decay_mult: 0 989 | } 990 | batch_norm_param { 991 | use_global_stats: true 992 | } 993 | } 994 | layer { 995 | name: "Scale13" 996 | type: "Scale" 997 | bottom: "Convolution13" 998 | top: "Convolution13" 999 | param { 1000 | lr_mult: 0 1001 | decay_mult: 0 1002 | } 1003 | param { 1004 | lr_mult: 0 1005 | decay_mult: 0 1006 | } 1007 | scale_param { 1008 | bias_term: true 1009 | } 1010 | } 1011 | layer { 1012 | name: "ReLU12" 1013 | type: "ReLU" 1014 | bottom: "Convolution13" 1015 | top: "Convolution13" 1016 | } 1017 | layer { 1018 | name: "Convolution14" 1019 | type: "Convolution" 1020 | bottom: "Convolution13" 1021 | top: "Convolution14" 1022 | param { 1023 | lr_mult: 0 1024 | decay_mult: 0 1025 | } 1026 | param { 1027 | lr_mult: 0 1028 | decay_mult: 0 1029 | } 1030 | convolution_param { 1031 | num_output: 32 1032 | pad: 1 1033 | kernel_size: 3 1034 | stride: 1 1035 | weight_filler { 1036 | type: "msra" 1037 | } 1038 | bias_filler { 1039 | type: "constant" 1040 | value: 0 1041 | } 1042 | } 1043 | } 1044 | layer { 1045 | name: "BatchNorm14" 1046 | type: "BatchNorm" 1047 | bottom: "Convolution14" 1048 | top: "Convolution14" 1049 | param { 1050 | lr_mult: 0 1051 | decay_mult: 0 1052 | } 1053 | param { 1054 | lr_mult: 0 1055 | decay_mult: 0 1056 | } 1057 | param { 1058 | lr_mult: 0 1059 | decay_mult: 0 1060 | } 1061 | batch_norm_param { 1062 | use_global_stats: true 1063 | } 1064 | } 1065 | layer { 1066 | name: "Scale14" 1067 | type: "Scale" 1068 | bottom: "Convolution14" 1069 | top: "Convolution14" 1070 | param { 1071 | lr_mult: 0 1072 | decay_mult: 0 1073 | } 1074 | param { 1075 | lr_mult: 0 1076 | decay_mult: 0 1077 | } 1078 | scale_param { 1079 | bias_term: true 1080 | } 1081 | } 1082 | layer { 1083 | name: "Eltwise6" 1084 | type: "Eltwise" 1085 | bottom: "Eltwise5" 1086 | bottom: "Convolution14" 1087 | top: "Eltwise6" 1088 | eltwise_param { 1089 | operation: SUM 1090 | } 1091 | } 1092 | layer { 1093 | name: "ReLU13" 1094 | type: "ReLU" 1095 | bottom: "Eltwise6" 1096 | top: "Eltwise6" 1097 | } 1098 | layer { 1099 | name: "Convolution15" 1100 | type: "Convolution" 1101 | bottom: "Eltwise6" 1102 | top: "Convolution15" 1103 | param { 1104 | lr_mult: 0 1105 | decay_mult: 0 1106 | } 1107 | param { 1108 | lr_mult: 0 1109 | decay_mult: 0 1110 | } 1111 | convolution_param { 1112 | num_output: 64 1113 | pad: 0 1114 | kernel_size: 1 1115 | stride: 2 1116 | weight_filler { 1117 | type: "msra" 1118 | } 1119 | bias_filler { 1120 | type: "constant" 1121 | value: 0 1122 | } 1123 | } 1124 | } 1125 | layer { 1126 | name: "BatchNorm15" 1127 | type: "BatchNorm" 1128 | bottom: "Convolution15" 1129 | top: "Convolution15" 1130 | param { 1131 | lr_mult: 0 1132 | decay_mult: 0 1133 | } 1134 | param { 1135 | lr_mult: 0 1136 | decay_mult: 0 1137 | } 1138 | param { 1139 | lr_mult: 0 1140 | decay_mult: 0 1141 | } 1142 | batch_norm_param { 1143 | use_global_stats: true 1144 | } 1145 | } 1146 | layer { 1147 | name: "Scale15" 1148 | type: "Scale" 1149 | bottom: "Convolution15" 1150 | top: "Convolution15" 1151 | param { 1152 | lr_mult: 0 1153 | decay_mult: 0 1154 | } 1155 | param { 1156 | lr_mult: 0 1157 | decay_mult: 0 1158 | } 1159 | scale_param { 1160 | bias_term: true 1161 | } 1162 | } 1163 | layer { 1164 | name: "Convolution16" 1165 | type: "Convolution" 1166 | bottom: "Eltwise6" 1167 | top: "Convolution16" 1168 | param { 1169 | lr_mult: 0 1170 | decay_mult: 0 1171 | } 1172 | param { 1173 | lr_mult: 0 1174 | decay_mult: 0 1175 | } 1176 | convolution_param { 1177 | num_output: 64 1178 | pad: 1 1179 | kernel_size: 3 1180 | stride: 2 1181 | weight_filler { 1182 | type: "msra" 1183 | } 1184 | bias_filler { 1185 | type: "constant" 1186 | value: 0 1187 | } 1188 | } 1189 | } 1190 | layer { 1191 | name: "BatchNorm16" 1192 | type: "BatchNorm" 1193 | bottom: "Convolution16" 1194 | top: "Convolution16" 1195 | param { 1196 | lr_mult: 0 1197 | decay_mult: 0 1198 | } 1199 | param { 1200 | lr_mult: 0 1201 | decay_mult: 0 1202 | } 1203 | param { 1204 | lr_mult: 0 1205 | decay_mult: 0 1206 | } 1207 | batch_norm_param { 1208 | use_global_stats: true 1209 | } 1210 | } 1211 | layer { 1212 | name: "Scale16" 1213 | type: "Scale" 1214 | bottom: "Convolution16" 1215 | top: "Convolution16" 1216 | param { 1217 | lr_mult: 0 1218 | decay_mult: 0 1219 | } 1220 | param { 1221 | lr_mult: 0 1222 | decay_mult: 0 1223 | } 1224 | scale_param { 1225 | bias_term: true 1226 | } 1227 | } 1228 | layer { 1229 | name: "ReLU14" 1230 | type: "ReLU" 1231 | bottom: "Convolution16" 1232 | top: "Convolution16" 1233 | } 1234 | layer { 1235 | name: "Convolution17" 1236 | type: "Convolution" 1237 | bottom: "Convolution16" 1238 | top: "Convolution17" 1239 | param { 1240 | lr_mult: 0 1241 | decay_mult: 0 1242 | } 1243 | param { 1244 | lr_mult: 0 1245 | decay_mult: 0 1246 | } 1247 | convolution_param { 1248 | num_output: 64 1249 | pad: 1 1250 | kernel_size: 3 1251 | stride: 1 1252 | weight_filler { 1253 | type: "msra" 1254 | } 1255 | bias_filler { 1256 | type: "constant" 1257 | value: 0 1258 | } 1259 | } 1260 | } 1261 | layer { 1262 | name: "BatchNorm17" 1263 | type: "BatchNorm" 1264 | bottom: "Convolution17" 1265 | top: "Convolution17" 1266 | param { 1267 | lr_mult: 0 1268 | decay_mult: 0 1269 | } 1270 | param { 1271 | lr_mult: 0 1272 | decay_mult: 0 1273 | } 1274 | param { 1275 | lr_mult: 0 1276 | decay_mult: 0 1277 | } 1278 | batch_norm_param { 1279 | use_global_stats: true 1280 | } 1281 | } 1282 | layer { 1283 | name: "Scale17" 1284 | type: "Scale" 1285 | bottom: "Convolution17" 1286 | top: "Convolution17" 1287 | param { 1288 | lr_mult: 0 1289 | decay_mult: 0 1290 | } 1291 | param { 1292 | lr_mult: 0 1293 | decay_mult: 0 1294 | } 1295 | scale_param { 1296 | bias_term: true 1297 | } 1298 | } 1299 | layer { 1300 | name: "Eltwise7" 1301 | type: "Eltwise" 1302 | bottom: "Convolution15" 1303 | bottom: "Convolution17" 1304 | top: "Eltwise7" 1305 | eltwise_param { 1306 | operation: SUM 1307 | } 1308 | } 1309 | layer { 1310 | name: "ReLU15" 1311 | type: "ReLU" 1312 | bottom: "Eltwise7" 1313 | top: "Eltwise7" 1314 | } 1315 | layer { 1316 | name: "Convolution18" 1317 | type: "Convolution" 1318 | bottom: "Eltwise7" 1319 | top: "Convolution18" 1320 | param { 1321 | lr_mult: 0 1322 | decay_mult: 0 1323 | } 1324 | param { 1325 | lr_mult: 0 1326 | decay_mult: 0 1327 | } 1328 | convolution_param { 1329 | num_output: 64 1330 | pad: 1 1331 | kernel_size: 3 1332 | stride: 1 1333 | weight_filler { 1334 | type: "msra" 1335 | } 1336 | bias_filler { 1337 | type: "constant" 1338 | value: 0 1339 | } 1340 | } 1341 | } 1342 | layer { 1343 | name: "BatchNorm18" 1344 | type: "BatchNorm" 1345 | bottom: "Convolution18" 1346 | top: "Convolution18" 1347 | param { 1348 | lr_mult: 0 1349 | decay_mult: 0 1350 | } 1351 | param { 1352 | lr_mult: 0 1353 | decay_mult: 0 1354 | } 1355 | param { 1356 | lr_mult: 0 1357 | decay_mult: 0 1358 | } 1359 | batch_norm_param { 1360 | use_global_stats: true 1361 | } 1362 | } 1363 | layer { 1364 | name: "Scale18" 1365 | type: "Scale" 1366 | bottom: "Convolution18" 1367 | top: "Convolution18" 1368 | param { 1369 | lr_mult: 0 1370 | decay_mult: 0 1371 | } 1372 | param { 1373 | lr_mult: 0 1374 | decay_mult: 0 1375 | } 1376 | scale_param { 1377 | bias_term: true 1378 | } 1379 | } 1380 | layer { 1381 | name: "ReLU16" 1382 | type: "ReLU" 1383 | bottom: "Convolution18" 1384 | top: "Convolution18" 1385 | } 1386 | layer { 1387 | name: "Convolution19" 1388 | type: "Convolution" 1389 | bottom: "Convolution18" 1390 | top: "Convolution19" 1391 | param { 1392 | lr_mult: 0 1393 | decay_mult: 0 1394 | } 1395 | param { 1396 | lr_mult: 0 1397 | decay_mult: 0 1398 | } 1399 | convolution_param { 1400 | num_output: 64 1401 | pad: 1 1402 | kernel_size: 3 1403 | stride: 1 1404 | weight_filler { 1405 | type: "msra" 1406 | } 1407 | bias_filler { 1408 | type: "constant" 1409 | value: 0 1410 | } 1411 | } 1412 | } 1413 | layer { 1414 | name: "BatchNorm19" 1415 | type: "BatchNorm" 1416 | bottom: "Convolution19" 1417 | top: "Convolution19" 1418 | param { 1419 | lr_mult: 0 1420 | decay_mult: 0 1421 | } 1422 | param { 1423 | lr_mult: 0 1424 | decay_mult: 0 1425 | } 1426 | param { 1427 | lr_mult: 0 1428 | decay_mult: 0 1429 | } 1430 | batch_norm_param { 1431 | use_global_stats: true 1432 | } 1433 | } 1434 | layer { 1435 | name: "Scale19" 1436 | type: "Scale" 1437 | bottom: "Convolution19" 1438 | top: "Convolution19" 1439 | param { 1440 | lr_mult: 0 1441 | decay_mult: 0 1442 | } 1443 | param { 1444 | lr_mult: 0 1445 | decay_mult: 0 1446 | } 1447 | scale_param { 1448 | bias_term: true 1449 | } 1450 | } 1451 | layer { 1452 | name: "Eltwise8" 1453 | type: "Eltwise" 1454 | bottom: "Eltwise7" 1455 | bottom: "Convolution19" 1456 | top: "Eltwise8" 1457 | eltwise_param { 1458 | operation: SUM 1459 | } 1460 | } 1461 | layer { 1462 | name: "ReLU17" 1463 | type: "ReLU" 1464 | bottom: "Eltwise8" 1465 | top: "Eltwise8" 1466 | } 1467 | layer { 1468 | name: "Convolution20" 1469 | type: "Convolution" 1470 | bottom: "Eltwise8" 1471 | top: "Convolution20" 1472 | param { 1473 | lr_mult: 0 1474 | decay_mult: 0 1475 | } 1476 | param { 1477 | lr_mult: 0 1478 | decay_mult: 0 1479 | } 1480 | convolution_param { 1481 | num_output: 64 1482 | pad: 1 1483 | kernel_size: 3 1484 | stride: 1 1485 | weight_filler { 1486 | type: "msra" 1487 | } 1488 | bias_filler { 1489 | type: "constant" 1490 | value: 0 1491 | } 1492 | } 1493 | } 1494 | layer { 1495 | name: "BatchNorm20" 1496 | type: "BatchNorm" 1497 | bottom: "Convolution20" 1498 | top: "Convolution20" 1499 | param { 1500 | lr_mult: 0 1501 | decay_mult: 0 1502 | } 1503 | param { 1504 | lr_mult: 0 1505 | decay_mult: 0 1506 | } 1507 | param { 1508 | lr_mult: 0 1509 | decay_mult: 0 1510 | } 1511 | batch_norm_param { 1512 | use_global_stats: true 1513 | } 1514 | } 1515 | layer { 1516 | name: "Scale20" 1517 | type: "Scale" 1518 | bottom: "Convolution20" 1519 | top: "Convolution20" 1520 | param { 1521 | lr_mult: 0 1522 | decay_mult: 0 1523 | } 1524 | param { 1525 | lr_mult: 0 1526 | decay_mult: 0 1527 | } 1528 | scale_param { 1529 | bias_term: true 1530 | } 1531 | } 1532 | layer { 1533 | name: "ReLU18" 1534 | type: "ReLU" 1535 | bottom: "Convolution20" 1536 | top: "Convolution20" 1537 | } 1538 | layer { 1539 | name: "Convolution21" 1540 | type: "Convolution" 1541 | bottom: "Convolution20" 1542 | top: "Convolution21" 1543 | param { 1544 | lr_mult: 0 1545 | decay_mult: 0 1546 | } 1547 | param { 1548 | lr_mult: 0 1549 | decay_mult: 0 1550 | } 1551 | convolution_param { 1552 | num_output: 64 1553 | pad: 1 1554 | kernel_size: 3 1555 | stride: 1 1556 | weight_filler { 1557 | type: "msra" 1558 | } 1559 | bias_filler { 1560 | type: "constant" 1561 | value: 0 1562 | } 1563 | } 1564 | } 1565 | layer { 1566 | name: "BatchNorm21" 1567 | type: "BatchNorm" 1568 | bottom: "Convolution21" 1569 | top: "Convolution21" 1570 | param { 1571 | lr_mult: 0 1572 | decay_mult: 0 1573 | } 1574 | param { 1575 | lr_mult: 0 1576 | decay_mult: 0 1577 | } 1578 | param { 1579 | lr_mult: 0 1580 | decay_mult: 0 1581 | } 1582 | batch_norm_param { 1583 | use_global_stats: true 1584 | } 1585 | } 1586 | layer { 1587 | name: "Scale21" 1588 | type: "Scale" 1589 | bottom: "Convolution21" 1590 | top: "Convolution21" 1591 | param { 1592 | lr_mult: 0 1593 | decay_mult: 0 1594 | } 1595 | param { 1596 | lr_mult: 0 1597 | decay_mult: 0 1598 | } 1599 | scale_param { 1600 | bias_term: true 1601 | } 1602 | } 1603 | layer { 1604 | name: "Eltwise9" 1605 | type: "Eltwise" 1606 | bottom: "Eltwise8" 1607 | bottom: "Convolution21" 1608 | top: "Eltwise9" 1609 | eltwise_param { 1610 | operation: SUM 1611 | } 1612 | } 1613 | layer { 1614 | name: "ReLU19" 1615 | type: "ReLU" 1616 | bottom: "Eltwise9" 1617 | top: "Eltwise9" 1618 | } 1619 | layer { 1620 | name: "Pooling1" 1621 | type: "Pooling" 1622 | bottom: "Eltwise9" 1623 | top: "Pooling1" 1624 | pooling_param { 1625 | pool: AVE 1626 | global_pooling: true 1627 | } 1628 | } 1629 | layer { 1630 | name: "InnerProduct1" 1631 | type: "InnerProduct" 1632 | bottom: "Pooling1" 1633 | top: "InnerProduct1" 1634 | param { 1635 | lr_mult: 0 1636 | decay_mult: 0 1637 | } 1638 | param { 1639 | lr_mult: 0 1640 | decay_mult: 0 1641 | } 1642 | inner_product_param { 1643 | num_output: 10 1644 | weight_filler { 1645 | type: "msra" 1646 | } 1647 | bias_filler { 1648 | type: "constant" 1649 | value: 0 1650 | } 1651 | } 1652 | } 1653 | layer { 1654 | name: "SoftmaxWithLoss1" 1655 | type: "SoftmaxWithLoss" 1656 | bottom: "InnerProduct1" 1657 | bottom: "Data2" 1658 | top: "SoftmaxWithLoss1" 1659 | } 1660 | layer { 1661 | name: "Accuracy1" 1662 | type: "Accuracy" 1663 | bottom: "InnerProduct1" 1664 | bottom: "Data2" 1665 | top: "Accuracy1" 1666 | include { 1667 | phase: TEST 1668 | } 1669 | } 1670 | -------------------------------------------------------------------------------- /batch_euclidean_map_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/io.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/layers/batch_euclidean_map_layer.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void BatchEuclideanMapLayer::LayerSetUp(const vector*>& bottom, 13 | const vector*>& top){ 14 | num_ = bottom[0]->num(); 15 | channels_ = bottom[0]->channels(); 16 | CHECK_EQ(bottom[0]->height(), 1); 17 | CHECK_EQ(bottom[0]->width(), 1); 18 | } 19 | 20 | template 21 | void BatchEuclideanMapLayer::Reshape(const vector*>& bottom, 22 | const vector*>& top){ 23 | diff_feat.Reshape(1, bottom[0]->channels(), 1, 1); 24 | top[0]->Reshape(bottom[0]->num(), bottom[0]->num(), 1, 1); 25 | } 26 | 27 | template 28 | void BatchEuclideanMapLayer::Forward_cpu(const vector*>& bottom, 29 | const vector*>& top){ 30 | const Dtype* bottom_data = bottom[0]->cpu_data(); 31 | Dtype* top_data = top[0]->mutable_cpu_data(); 32 | Dtype* diff_feat_data = diff_feat.mutable_cpu_data(); 33 | max_d = Dtype(0.0); 34 | 35 | for (int n = 0; n < num_; ++n){ 36 | for (int nn = 0; nn < num_; ++nn){ 37 | //diff_feat = x_n - x_nn 38 | caffe_sub(channels_, bottom_data+n*channels_, bottom_data+nn*channels_, diff_feat_data); 39 | //sim = diff_feat * diff_feat 40 | Dtype distance = caffe_cpu_dot(channels_, diff_feat_data, diff_feat_data); 41 | if (distance > max_d){ 42 | max_d = distance; 43 | } 44 | //top[n, nn] = sim 45 | caffe_set(1, distance, top_data+n*num_+nn); 46 | } 47 | } 48 | caffe_scal(num_*num_, Dtype(1.0) / max_d, top_data); 49 | } 50 | 51 | template 52 | void BatchEuclideanMapLayer::Backward_cpu(const vector*>& top, 53 | const vector& propagate_down, const vector*>& bottom){ 54 | if (!propagate_down[0]){return;} 55 | const Dtype* top_diff = top[0]->cpu_diff(); 56 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 57 | caffe_set(num_*channels_, Dtype(0.0), bottom_diff); 58 | Dtype* diff_feat_data = diff_feat.mutable_cpu_data(); 59 | const Dtype* bottom_data = bottom[0]->cpu_data(); 60 | Dtype scale = Dtype(0.0); 61 | for (int n = 0; n < num_; ++n){ 62 | for (int nn = 0; nn < num_; ++nn){ 63 | caffe_sub(channels_, bottom_data+n*channels_, bottom_data+nn*channels_, diff_feat_data); 64 | caffe_copy(1, top_diff+n*num_+nn, &scale); 65 | //const Dtype* diff_feat_data2 = diff_feat.cpu_data(); 66 | caffe_axpy(channels_, scale*Dtype(2.0) / max_d, diff_feat_data, bottom_diff+n*channels_); 67 | } 68 | } 69 | } 70 | 71 | 72 | #ifdef CPU_ONLY 73 | STUB_GPU(BatchEuclideanMapLayer); 74 | #endif 75 | INSTANTIATE_CLASS(BatchEuclideanMapLayer); 76 | REGISTER_LAYER_CLASS(BatchEuclideanMap); 77 | } // namespace caffe 78 | -------------------------------------------------------------------------------- /batch_euclidean_map_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/batch_euclidean_map_layer.hpp" 5 | 6 | namespace caffe { 7 | template 8 | void BatchEuclideanMapLayer::Forward_gpu(const vector*>& bottom, const vector*>& top){ 9 | const Dtype* bottom_data = bottom[0]->gpu_data(); 10 | Dtype* top_data = top[0]->mutable_gpu_data(); 11 | Dtype* diff_feat_data = diff_feat.mutable_gpu_data(); 12 | 13 | max_d = Dtype(0.0); 14 | for (int n = 0; n < num_; ++n){ 15 | for (int nn = 0; nn < num_; ++nn){ 16 | //diff_feat = x_n - x_nn 17 | caffe_gpu_sub(channels_, bottom_data+n*channels_, bottom_data+nn*channels_, diff_feat_data); 18 | //sim = diff_feat * diff_feat 19 | Dtype distance = Dtype(0.0); 20 | caffe_gpu_dot(channels_, diff_feat_data, diff_feat_data, &distance); 21 | if (distance > max_d){ 22 | max_d = distance; 23 | } 24 | //top[n, nn] = sim 25 | caffe_gpu_set(1, distance, top_data+n*num_+nn); 26 | } 27 | } 28 | caffe_gpu_scal(num_*num_, Dtype(1.0) / max_d, top_data); 29 | } 30 | 31 | template 32 | void BatchEuclideanMapLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom){ 33 | if (!propagate_down[0]){return;} 34 | const Dtype* top_diff = top[0]->gpu_diff(); 35 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 36 | caffe_gpu_set(num_*channels_, Dtype(0.0), bottom_diff); 37 | Dtype* diff_feat_data = diff_feat.mutable_gpu_data(); 38 | const Dtype* bottom_data = bottom[0]->gpu_data(); 39 | Dtype scale = Dtype(0.0); 40 | for (int n = 0; n < num_; ++n){ 41 | for (int nn = 0; nn < num_; ++nn){ 42 | caffe_gpu_sub(channels_, bottom_data+n*channels_, bottom_data+nn*channels_, diff_feat_data); 43 | caffe_copy(1, top_diff+n*num_+nn, &scale); 44 | //const Dtype* diff_feat_data2 = diff_feat.gpu_data(); 45 | caffe_gpu_axpy(channels_, scale*Dtype(2.0) / max_d, diff_feat_data, bottom_diff+n*channels_); 46 | } 47 | } 48 | } 49 | INSTANTIATE_LAYER_GPU_FUNCS(BatchEuclideanMapLayer); 50 | } 51 | -------------------------------------------------------------------------------- /batch_euclidean_map_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_BATCH_EUCLIDEAN_MAP_LAYER_HPP_ 2 | #define CAFFE_BATCH_EUCLIDEAN_MAP_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | template 12 | class BatchEuclideanMapLayer : public Layer { 13 | public: 14 | explicit BatchEuclideanMapLayer(const LayerParameter& param) : Layer(param) {} 15 | virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); 16 | virtual void Reshape(const vector*>& bottom, const vector*>& top); 17 | virtual inline const char* type() const { return "BatchEuclideanMap"; } 18 | virtual inline int ExactNumBottomBlobs() const { return 1; } 19 | virtual inline int MinTopBlobs() const { return 1; } 20 | virtual inline int MaxTopBlobs() const { return 1; } 21 | protected: 22 | virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); 23 | virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); 24 | virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); 25 | virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); 26 | int channels_; 27 | int num_; 28 | Blob diff_feat; 29 | Dtype max_d; 30 | 31 | }; 32 | } // namespace caffe 33 | 34 | #endif //CAFFE_BatchEuclideanMap_LAYER_HPP_ 35 | -------------------------------------------------------------------------------- /batch_euclidean_vector_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layer.hpp" 5 | #include "caffe/util/io.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | #include "caffe/layers/batch_euclidean_vector_layer.hpp" 8 | 9 | namespace caffe { 10 | 11 | template 12 | void BatchEuclideanVectorLayer::LayerSetUp(const vector*>& bottom, 13 | const vector*>& top){ 14 | num_ = bottom[0]->num(); 15 | channels_ = bottom[0]->channels(); 16 | CHECK_EQ(bottom[0]->height(), 1); 17 | CHECK_EQ(bottom[0]->width(), 1); 18 | CHECK_EQ(bottom[1]->height(), 1); 19 | CHECK_EQ(bottom[1]->width(), 1); 20 | CHECK_EQ(channels_, bottom[1]->channels()); 21 | } 22 | 23 | template 24 | void BatchEuclideanVectorLayer::Reshape(const vector*>& bottom, 25 | const vector*>& top){ 26 | diff_feat.Reshape(1, channels_, 1, 1); 27 | top[0]->Reshape(1, 1, 1, num_); 28 | } 29 | 30 | template 31 | void BatchEuclideanVectorLayer::Forward_cpu(const vector*>& bottom, 32 | const vector*>& top){ 33 | const Dtype* bottom_data1 = bottom[0]->cpu_data(); 34 | const Dtype* bottom_data2 = bottom[1]->cpu_data(); 35 | Dtype* top_data = top[0]->mutable_cpu_data(); 36 | Dtype* diff_feat_data = diff_feat.mutable_cpu_data(); 37 | max_d = Dtype(0.0); 38 | for(int n = 0; n < num_; ++n){ 39 | caffe_sub(channels_, bottom_data1+n*channels_, bottom_data2+n*channels_, diff_feat_data); 40 | Dtype distance = caffe_cpu_dot(channels_, diff_feat_data, diff_feat_data); 41 | if (distance > max_d){ 42 | max_d = distance; 43 | } 44 | caffe_set(1, distance, top_data+n); 45 | } 46 | caffe_scal(num_, Dtype(1.0) / max_d, top_data); 47 | } 48 | 49 | template 50 | void BatchEuclideanVectorLayer::Backward_cpu(const vector*>& top, 51 | const vector& propagate_down, const vector*>& bottom){ 52 | if (!propagate_down[0]){return;} 53 | const Dtype* top_diff = top[0]->cpu_diff(); 54 | Dtype* bottom_diff1 = bottom[0]->mutable_cpu_diff(); 55 | Dtype* bottom_diff2 = bottom[1]->mutable_cpu_diff(); 56 | caffe_set(num_*channels_, Dtype(0.0), bottom_diff1); 57 | caffe_set(num_*channels_, Dtype(0.0), bottom_diff2); 58 | Dtype* diff_feat_data = diff_feat.mutable_cpu_data(); 59 | const Dtype* bottom_data1 = bottom[0]->cpu_data(); 60 | const Dtype* bottom_data2 = bottom[1]->cpu_data(); 61 | Dtype scale = Dtype(0.0); 62 | for (int n = 0; n < num_; ++n){ 63 | caffe_sub(channels_, bottom_data1+n*channels_, bottom_data2+n*channels_, diff_feat_data); 64 | caffe_copy(1, top_diff+n, &scale); 65 | //const Dtype* diff_feat_data2 = diff_feat.cpu_data(); 66 | caffe_axpy(channels_, scale*Dtype(2.0) / max_d, diff_feat_data, bottom_diff1+n*channels_); 67 | caffe_axpy(channels_, scale*Dtype(-2.0) / max_d, diff_feat_data, bottom_diff2+n*channels_); 68 | } 69 | } 70 | 71 | 72 | #ifdef CPU_ONLY 73 | STUB_GPU(BatchEuclideanVectorLayer); 74 | #endif 75 | INSTANTIATE_CLASS(BatchEuclideanVectorLayer); 76 | REGISTER_LAYER_CLASS(BatchEuclideanVector); 77 | } // namespace caffe 78 | -------------------------------------------------------------------------------- /batch_euclidean_vector_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/batch_euclidean_vector_layer.hpp" 5 | 6 | namespace caffe { 7 | template 8 | void BatchEuclideanVectorLayer::Forward_gpu(const vector*>& bottom, const vector*>& top){ 9 | const Dtype* bottom_data1 = bottom[0]->gpu_data(); 10 | const Dtype* bottom_data2 = bottom[1]->gpu_data(); 11 | Dtype* top_data = top[0]->mutable_gpu_data(); 12 | Dtype* diff_feat_data = diff_feat.mutable_gpu_data(); 13 | 14 | max_d = Dtype(0.0); 15 | for (int n = 0; n < num_; ++n){ 16 | //diff_feat = x_n - x_nn 17 | caffe_gpu_sub(channels_, bottom_data1+n*channels_, bottom_data2+n*channels_, diff_feat_data); 18 | //sim = diff_feat * diff_feat 19 | Dtype distance = Dtype(0.0); 20 | caffe_gpu_dot(channels_, diff_feat_data, diff_feat_data, &distance); 21 | if (distance > max_d){ 22 | max_d = distance; 23 | } 24 | //top[n, nn] = sim 25 | caffe_gpu_set(1, distance, top_data+n); 26 | } 27 | caffe_gpu_scal(num_, Dtype(1.0) / max_d, top_data); 28 | } 29 | 30 | template 31 | void BatchEuclideanVectorLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom){ 32 | if (!propagate_down[0]){return;} 33 | const Dtype* top_diff = top[0]->gpu_diff(); 34 | Dtype* bottom_diff1 = bottom[0]->mutable_gpu_diff(); 35 | Dtype* bottom_diff2 = bottom[1]->mutable_gpu_diff(); 36 | caffe_gpu_set(num_*channels_, Dtype(0.0), bottom_diff1); 37 | caffe_gpu_set(num_*channels_, Dtype(0.0), bottom_diff2); 38 | Dtype* diff_feat_data = diff_feat.mutable_gpu_data(); 39 | const Dtype* bottom_data1 = bottom[0]->gpu_data(); 40 | const Dtype* bottom_data2 = bottom[1]->gpu_data(); 41 | Dtype scale = Dtype(0.0); 42 | for (int n = 0; n < num_; ++n){ 43 | caffe_gpu_sub(channels_, bottom_data1+n*channels_, bottom_data2+n*channels_, diff_feat_data); 44 | caffe_copy(1, top_diff+n, &scale); 45 | //const Dtype* diff_feat_data2 = diff_feat.gpu_data(); 46 | caffe_gpu_axpy(channels_, scale*Dtype(2.0) / max_d, diff_feat_data, bottom_diff1+n*channels_); 47 | caffe_gpu_axpy(channels_, scale*Dtype(-2.0) / max_d, diff_feat_data, bottom_diff2+n*channels_); 48 | } 49 | } 50 | INSTANTIATE_LAYER_GPU_FUNCS(BatchEuclideanVectorLayer); 51 | } 52 | -------------------------------------------------------------------------------- /batch_euclidean_vector_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_BATCH_EUCLIDEAN_VECTOR_LAYER_HPP_ 2 | #define CAFFE_BATCH_EUCLIDEAN_VECTOR_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | template 12 | class BatchEuclideanVectorLayer : public Layer { 13 | public: 14 | explicit BatchEuclideanVectorLayer(const LayerParameter& param) : Layer(param) {} 15 | virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); 16 | virtual void Reshape(const vector*>& bottom, const vector*>& top); 17 | virtual inline const char* type() const { return "BatchEuclideanVector"; } 18 | virtual inline int ExactNumBottomBlobs() const { return 2; } 19 | virtual inline int MinTopBlobs() const { return 1; } 20 | virtual inline int MaxTopBlobs() const { return 1; } 21 | protected: 22 | virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); 23 | virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); 24 | virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); 25 | virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); 26 | int channels_; 27 | int num_; 28 | Blob diff_feat; 29 | Dtype max_d; 30 | }; 31 | } // namespace caffe 32 | 33 | #endif //CAFFE_BatchEuclideanVector_LAYER_HPP_ 34 | -------------------------------------------------------------------------------- /img/Exp_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufanLIU/IRG/0a6837c923cf3a814a4c15022c11a658905297a5/img/Exp_results.png -------------------------------------------------------------------------------- /img/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yufanLIU/IRG/0a6837c923cf3a814a4c15022c11a658905297a5/img/framework.png --------------------------------------------------------------------------------