├── README.md ├── VOCdevkit └── VOC2007 │ ├── ImageSets │ └── Segmentation │ │ ├── train.txt │ │ └── val.txt │ ├── JPEGImages │ ├── 0.png │ └── 20000.png │ └── SegmentationClass │ ├── 0.png │ └── 20000.png ├── __pycache__ └── segformer.cpython-38.pyc ├── datasets ├── JPEGImages │ └── 1.jpg ├── SegmentationClass │ └── 1.png └── before │ ├── 1.jpg │ └── 1.json ├── get_miou.py ├── json_to_dataset.py ├── model_data └── README.md ├── nets ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── backbone.cpython-37.pyc │ ├── backbone.cpython-38.pyc │ ├── conv_.cpython-37.pyc │ ├── conv_.cpython-38.pyc │ ├── segformer.cpython-38.pyc │ └── segformer_training.cpython-38.pyc ├── backbone.py ├── conv_.py ├── mf_head.py ├── segformer.py └── segformer_training.py ├── predict.py ├── requirements.txt ├── segformer.py ├── summary.py ├── train.py ├── utils ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── callbacks.cpython-38.pyc │ ├── dataloader.cpython-38.pyc │ ├── utils.cpython-38.pyc │ ├── utils_fit.cpython-38.pyc │ └── utils_metrics.cpython-38.pyc ├── callbacks.py ├── dataloader.py ├── utils.py ├── utils_fit.py └── utils_metrics.py └── voc_annotation.py /README.md: -------------------------------------------------------------------------------- 1 | # Article link:https://www.mdpi.com/2072-4292/15/19/4697/htm 2 | Citation: Zhang, T.; Qin, C.; Li, W.; Mao, X.; Zhao, L.; Hou, B.; Jiao, L. Water Body Extraction of the Weihe River Basin Based on MF-SegFormer Applied to Landsat8 OLI Data. Remote Sens. 2023, 15, 4697. 3 | 4 | -------------------------------------------------------------------------------- /VOCdevkit/VOC2007/ImageSets/Segmentation/train.txt: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /VOCdevkit/VOC2007/ImageSets/Segmentation/val.txt: -------------------------------------------------------------------------------- 1 | 20000 2 | -------------------------------------------------------------------------------- /VOCdevkit/VOC2007/JPEGImages/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/VOCdevkit/VOC2007/JPEGImages/0.png -------------------------------------------------------------------------------- /VOCdevkit/VOC2007/JPEGImages/20000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/VOCdevkit/VOC2007/JPEGImages/20000.png -------------------------------------------------------------------------------- /VOCdevkit/VOC2007/SegmentationClass/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/VOCdevkit/VOC2007/SegmentationClass/0.png -------------------------------------------------------------------------------- /VOCdevkit/VOC2007/SegmentationClass/20000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/VOCdevkit/VOC2007/SegmentationClass/20000.png -------------------------------------------------------------------------------- /__pycache__/segformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/__pycache__/segformer.cpython-38.pyc -------------------------------------------------------------------------------- /datasets/JPEGImages/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/datasets/JPEGImages/1.jpg -------------------------------------------------------------------------------- /datasets/SegmentationClass/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/datasets/SegmentationClass/1.png -------------------------------------------------------------------------------- /datasets/before/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/datasets/before/1.jpg -------------------------------------------------------------------------------- /datasets/before/1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "3.16.7", 3 | "flags": {}, 4 | "shapes": [ 5 | { 6 | "label": "cat", 7 | "line_color": null, 8 | "fill_color": null, 9 | "points": [ 10 | [ 11 | 202.77358490566036, 12 | 626.0943396226414 13 | ], 14 | [ 15 | 178.24528301886792, 16 | 552.5094339622641 17 | ], 18 | [ 19 | 195.22641509433961, 20 | 444.9622641509434 21 | ], 22 | [ 23 | 177.30188679245282, 24 | 340.2452830188679 25 | ], 26 | [ 27 | 173.52830188679243, 28 | 201.56603773584905 29 | ], 30 | [ 31 | 211.2641509433962, 32 | 158.16981132075472 33 | ], 34 | [ 35 | 226.35849056603772, 36 | 87.41509433962264 37 | ], 38 | [ 39 | 208.43396226415092, 40 | 6.283018867924525 41 | ], 42 | [ 43 | 277.3018867924528, 44 | 57.226415094339615 45 | ], 46 | [ 47 | 416.92452830188677, 48 | 80.81132075471697 49 | ], 50 | [ 51 | 497.1132075471698, 52 | 64.77358490566037 53 | ], 54 | [ 55 | 578.2452830188679, 56 | 6.283018867924525 57 | ], 58 | [ 59 | 599.0, 60 | 35.52830188679245 61 | ], 62 | [ 63 | 589.566037735849, 64 | 96.84905660377359 65 | ], 66 | [ 67 | 592.3962264150944, 68 | 133.64150943396226 69 | ], 70 | [ 71 | 679.188679245283, 72 | 174.2075471698113 73 | ], 74 | [ 75 | 723.5283018867924, 76 | 165.71698113207546 77 | ], 78 | [ 79 | 726.3584905660377, 80 | 222.32075471698113 81 | ], 82 | [ 83 | 759.377358490566, 84 | 262.88679245283015 85 | ], 86 | [ 87 | 782.9622641509434, 88 | 350.62264150943395 89 | ], 90 | [ 91 | 766.9245283018868, 92 | 428.92452830188677 93 | ], 94 | [ 95 | 712.2075471698113, 96 | 465.71698113207543 97 | ], 98 | [ 99 | 695.2264150943396, 100 | 538.3584905660377 101 | ], 102 | [ 103 | 657.4905660377358, 104 | 601.566037735849 105 | ], 106 | [ 107 | 606, 108 | 633 109 | ], 110 | [ 111 | 213, 112 | 633 113 | ] 114 | ], 115 | "shape_type": "polygon", 116 | "flags": {} 117 | } 118 | ], 119 | "lineColor": [ 120 | 0, 121 | 255, 122 | 0, 123 | 128 124 | ], 125 | "fillColor": [ 126 | 255, 127 | 0, 128 | 0, 129 | 128 130 | ], 131 | "imagePath": "1.jpg", 132 | "imageData": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAJ6A7YDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwCY06M0jcUJ96vlZLU96JOpp1NFLUmgUlLRRYBKDS0lADaKdTTQAxqYakNNIoAjfoayL/7prXfpWRqHQ1SA43U/vn61jsK2tT++ayQuTXqUPhOSpuQ7DTWT2rQWDIpHgrRVNSOQyWj5pjirskXJqCSPitoyM3Esaef3grr9PbgVxdmdsgrrdNbgVw4tHVh5aWOjQZjqCVOtTQv8tMnbg15yR1XK0d19nk5rdtilxECMGuTuOTVzSr5oJAjHivVwdVr3WceJpXXMjTv9NJBKCuR1GOeCRtynFejRus0OazryxhvcqVFd8qKlqjhVZrRnm3zSnmniGukvPDzW5JUcVnm1YdqyleOhtSs3coJb1ZjgHpUwjxU8UYxXFO7PYo1VEj8n5aiZa0JFwtU5BiuZXPVoVVJFKVapysV6Grs1UpK6qO5zYySsys07evFMMjY605hTgo/GvRVkj5ed+YYm6lbNTrHT/K4rOUjqpwdiiysahMZBrRaPFV5I6OY1dMgWQrV+C5461lyZFNSYqaUoKSM1PlZ00U+RU4esKC4960opsgVxyhynVGfMi5upQ1RBqdnmsTUsLJ709XxVQHFPVqloZcDU4NVZWp4alYRPuoLVDmgmgCbdTd+KiJpjSUJATl6YZKgaWomlppElozcVE89V95Y8VPFavMa0jC4rjAxc8VtadZncCRS2WldyK6O0sfL7V0Qp2MZzvsTWUGAOK0wuAKjii2gVKxq2zEjeoHqVjUbVmxortTDUrCoiOagsBSg0lFQMeDTwaYqk1ajg3VrGDkZyko7iRgtVtVCrSxxKi46mnBa3jTsYSq3KE65zmuT1+by1YA9q7G9kWGInrxXnWu3PmStzQ3ysI6nOTuNx4qHd6U58ZzTK2VRmZLupjSHHWmEmjBpBzNDTTgfzpoFPC0ydRwJp4oVeKeI6keoscW8+9OksWYdKnsx+9ANbqQKwFXFkSRy8GkzzzhVGB616j4N8IWsFubmeJZJexbtWTZ2iLzgZrt9DvESDyScV1Uqlmc1SF0XZtKhlhKsikfSuS1GySzm44Brup54khLSyqox3NcLr2owTyERHIHeqqu61FTTT0Mi4uFVCKyZZ8mm3d2M1nefubFedUjzHfSlYuCfnrTvOqOOHzBmphb8iuX2TOtTQzc0hwtW4NNlmNWrOz6cV0NpagY4ranh11M54i2xSsNJWLBIret7NfSnpF2q/axYNbciRzObY+KxXHSmyWuO1a0MYxRLDTRPMzGWHaeasp0qdrb5qcIgBTC7Id1SIe1OMftSBRmgBXOBVC4kq3KeKzrg1z1pWR34SHMylK2T71CakbrTTXj1JXZ9DTjZEJFRPUzkCqsj04q5E5JaDXNMPTNRs3NRtP711Ql0OOSFkPNQE4pJJagDZNXpcuK0uy9bqGk5rXiXisKGXaa1YrtfLqK9N2ujz8RUuy6JNtNkkqhJdgng1Cbg+tc0IWepjHUtTXGGorJnuMNRXoJKxrY23pF6096Z3rzpBElFSCogeKXNZmpJSUmaaTQA40ZplFAD80wmkpDQAppppf50mKAIpDxWVf/cNasnQ1j6gflNWhWOR1L/WmqEa5bFX7/8A1hqrbx5kr0YaRMJLUuxQZHSle34PFaUEPy9KdLB8vSuZ1GmaKJzMsPzVBLBwa1pof3lPSxEn3jge3NdMahjKKRz0MREtdNp+QBTk0+BTxCzH/aOP0FaduFiHy28K/gW/mTWlRKS1MIzcXoWYG4p0nIpVuHH8EP8A36X/AAqZZi33ooz/AMBx/KsI0o3NXUZmSQ81WOYpM1uyeR/FAP8AgLEfzzVSeC0cfLMyN6MOPzFdEaNtiPavqWrS7YQZBp8N/tm+aoLO1aOMkYdPVTmoZyscuK74XSOOdrnTq0N3EBxWVe6cFJKrmoLS724Ga2onE0eDg1o4qRMZOLOHuovLnxToga3NUsBu3qKx1UqcYrjqUrHdSrXEaqkvWrTmqzcmuKVNXPTp13FFKWqMi1qSpVdofWqi+UyqV3Mzcc8001ckh21RlOK3jLmOCpGzuTxtzVheazY5cGrkcvNXKJ00JJomeOoXizU4bNKMVJ0GZNBWfLHtNbkwGKzLla0g9TlrwVirHNitK2n6c1kkYNWbcnIp1IpowpTaZ0EcuQKmDVnwkkVZDYrglHU9CMrlnNOBqsJKcJKz5TS5aBqQN+VVBMKXz6nlGXN3SmmSqvniozP701FiLhkqFpKrG496haYk4HNUoNkORZaXNLFG0zYA61Pa6VPMFlmxGjdC3GfwrpLHTLaLbtV5D6t8oraNOxm59jMstKLYJFdHaaUAB8taVrbKoG1FX6D/ABrSSLA5rZJIycmylDZBR0q4kIUU/IFNaSk2QKTTGNIZKYWqLjsBqNqVpKjZqkYGoTStJT4YjJU2vsO9hgUngVPHb5G406TZbLuYj8awNS1wj5IWxXRCiupzzrPobcs8FuPmYZqk+tQK3DiuRuNRc53Sbiaz3vH7CuhWRzttnptrfRTjO8VbF2o4BBry6y1WaGUDdXV6feNKQSabYkaOqzExNn0rzzU+ZGrtNSdpFNcdqC8muNv3jrUfdMUrzTdlSt1pK2uZWIttLt/OnkgUzcKLj5RCPpSgUtPWPvScrFeyYq4qWmBfSlUZasucpQJYn2yA1uWtwCOayktxwTVpFKYxVwqDnSSR0ENwqjrU51Uwj5TzXPrOwHWmtMTXVE4matxqksxyzk/jVKW4yuc1UaTAqpPcYGM1TVyE7Fa9uDuODUFrKWl5qGTMklS20WJBUtaG1P3mdHanKitKCDcelZ9hGcDNbkAwoFZwpNvU2nJRLNrCARWskqxjFZycUpl9K6HGxyXubcEu+ti0izXM2UvzV0dpPgVlJD5kbCJtFOK5qoLnjrTvtAqLBzIkZRTDHUiyg0UrFERWoynerB6VDKQBQNFOfpWRcNzV+6lwKyJXya4cVPQ9rAxW4maY5oLVBJJXmJXZ67kkhsjVA3WkeSmM4xXbTSscU22yKaqb8VYlkGKpSSZNXy2COohJY1KkRxTYEBNaKRDFZupysyxFSysikyleagkuWXIBrQuFwOKypl5Nd11KF0eXTvKdmCXhzVxZ90ec1k4OeKuQKfLrl5OaR6EsPyq5HcS/PRTbiI76K6lDQ5zsX61Ef1qZ6hYV5MhRHCnU1RT6zsbCGjNKaSgYlLRiloJuJSU6koGIaQ0tBoGQS8CsTUD8prZn6Vg6ieDVx3EcveHMppbKPMlNuOZjV3Toi8oAGTXfqonP9o3baH5akmg+XpV60gSOP5zk+gqzIwEeERV98ZNc/s7vU1ctDljYSSyHCE/QVqW2kT4/493/AO/ZqyWmc481j+NW7eCvSoYZWuefVqu9iFdJcDJhb/vmmvYbf4cfhWuqFR6UySWVf4ifrzVVadka4ePMzEMGD0pVXFaDTIxw8K/VeKX7NFKuYn59G4rnpQdzpqpJGXMaz5Eya1bm3dD8wIql5RLV2qB5kpajrFPKbO4/hVmeaHzP38Ik9+hFPt7XAyetTywREfNitYohlFo0aPdandjqvQj/ABqSzv8AyuHpksCfwNgjvUEjRPhJ22v2lHf61oQbwKXMXHOay7nTf3hNFm8ttJtc5B6EdDWi0oKZNEo8yHTlyswpLFqpSWbCushiWUHvVee0GDxXn1Y8p6VKTkck8BqF0xW9PbY7VnzQe1efOqr2O2nhZPUxZuhqjNHmtma3PpVJoGHbFa0qljaWETWpkeWQ1TL0q40HXimGCuh17mccI47DBLgUCf3qJ0IqFvvVUdSakXEsPNmqsg3U5eak21srI5mnIoNFzUsMeDUzriolfa1Vucko8rNGHgVI0mBVVZOOKRpeDXLKGp0RqaDzPg0onqg8nNSREtVchpGpctmY1G1wanjtiw6U59PYjgUlFFtyKLXhpouy3AqyNGnmbaiMxJ4AHWuh03w5FYbXnVZ7j/nn1VPr6n2rZQic7nO5k2OlXF1D9olYQW+cbm6t/ujvW5bWQiCrbQ7fWVhlj/hW1b6Y0rb5cs3qa1oNPCjpSlboGvUyLTTiTlssfU1u21sEHSp1gVRQ0gUVjsUi0mFFK0wFUDce9RPce9TzFcpdeeoTcVQec+tRiU1LkVyml54prT1Q82mPMR3qbisXHuKjM/NZzTEnir9laPMQWHFOKchNpK7LFvGZiD2qa71CGwhx3qG/vV0+DCda426vZ76Uk7h9a6ox5TknNyLep61LPkBiAfSskTK3XOfep0sFY5aX8KlWyWPpWhkUGi5yuacbZ9vMWR61pfZztOF/EVEsk8R2jp6EVSRJjGIwy7q6nRZgQuPSsS9jST5iNrVc0EsJNo5xQC3Oh1BlER9a5DUGHNdFfybVO6uXvjnNcT+I74x90x5JcNSq2aq3DHzKntjmulxtEinG8rCyA4NVt5zWiy5FUZk5pU5JnQ6FtRyy81aWQbazScU9JHNaSppofOo6M0PM4pqTgSdapOzYqo0jBs1nCkjlrT7HVxTqQMVaEg21yMV6yd6uxX7NjJ4prDWdzJ121Y2zIM0hk5rPS7BHWl+0jPWuhI5ZFx24qv5Jmb2oSXzDWtY2471tCNzCc7Iz1sfapYrTZIM1uC3AHSqlwm01U4ovDykWrXAGK2LeLIBrCtpMEVswTjbU6WNJ3LhHapI4QX7VWD1Zt5l3Dms76mbvY0IbfGCBV6MlRUMLAip6qxyubJRKacs5FVy2KTzRSshxlI0Y7irkc4Nc8bgKetTQ3w9aykjrpttG60tU7ibrUAucr1qtPNmuecrI6IrUr3UtUDk1PIcmkArx6s3JnpUZuC0IGBxVaQGtErUTxCoib/WGZLqRVWSQjNbE0HFZc9uxNXGepUa1yi8pY4FM8pupq/DZ+oq19kGOldNKalKxnUxXKtDOgyDWkrqFqGSIJVdpsdTXXKlG5xc8qg+4f8qzpDuNSzTqe9U2l5rnl7uiPVweH6tDwKuwgeWOKzFfLDFaEbYAFXR1OrEq0SZofM5oq3AmU4ors5TzTZcVEetTNUJ614kjCIi8U/saaKcDWZogooopMsKKKKQAaSlNJTsAUjUtIadgKtx0Nc9qPQ10Uykise7ty2eK3o0nJ6GcpJI5Xyi8x69a3dLtyBwu0HrzyaSGxzJ0rWtbZkr0fYu2pzwmpMuxIFUU50yKkSLipCtZSSidcafMVo4OatxrigDFTLgCtMPi1sZYjB8quBfiqsz1ZbmqM3BruupnHTk4Mh6tmh5ABxUTy7artPk1UaKQVsQ+pa+0uoxwy/3W5FKI4JuYjsk/ut0P0NVd+RSqpI5rZU9DznUuxJ7ie3k2shX60/eJYct1qZCGXZcLvTt6j6UyazMcG9Tuh9fT61lLRnRFNrUpmReeaqzKjDLCrCFfMxT5hGV4FHQhkFneLB+5c7oSfxH0rYUAxAbtyn7retc1cQ45FXdLvxGfInJMR/Q+tVGRLNm2kaB8E8Gr0uJI8g1TeMjjgkcj3pkVx+82tSqQUkbUKzgxLiHNUZLbJxit7ylkWm/Yu9eRVwTbPbp46PKc69j7VC2mBh0rpfsgz0p4tB6VP1VxH9dvscZNpbL0FUpLQjtXdy2YYHisq7sB1xWUoSjsddHFJ6M5B7bI6VWNgSeAa6drA54FPSw9qqnKVzWvKnY5M6c69qabdx2rsjYe1RyaciDLKCcdK64t9TynNXOHmQjtVNxzXUXth1wKwZrYiQ1tCaMKkOYrxuRxmpDyOKb5TCpF4FEjOMSExkmrVqnNR4qeDg1lKWhrBam/ZQCTrW9aaV55AVc1jaQGnmSJBkk12ME8UUXkwkH+/J/e/wDrVEe7NpS6ISKwisxi35kIw0o/kv8AjUsNgq9qsxFcVKZlArTmMbCxwqgFKzqtVZboCqE18PXis5TKUTQluAO9UJbn3qhNfZ70ixXE0ZlI2RDndKdo/D1/Cs7t7FpJbk7XPPWmGf3qoZLdPv3BbH/PNf6mnR31kv8Aywdz/tP/AIVFu5foiyH3VKtQLfofu28Y/WpPt+P+WUf5VVkS79h7nFV2Yk1aFzFIAHtlOfQkVfs9Pgk/eMCg9zmrjTuYSqcu5TstPaU7nBAqze6kmnw7ImGaffXpiXyrYqfbODXKXknmSEzNhvQ10KPKcspuTC6vnuZSXl/Cq7/MOtVZJdrDac+9Sw5Y5ZqpXJ2JIoGZshgKvwxbcb+fpRDZCUblPNThfLOGXHvWqRDZL5C5DRE/Q1Ex5OR+BFWF4+YU8hZRkcGrJOa1N8nGwCneHZcXbAVb1e2O3cMYrP0MeTekg9ahlI2tTOM7jz6VzV1yDWxqszPLmsaTkVwy+I9SkvdMO7XmoYZth61du0zmsqTKmu2n7yOWcvZyujTFyMdaryzgk1S8w0hkNNUkivrjZeGGFSxgCqCTdqmFwFpSi+gudS1JXHNVpRSPdZNRNNmiMGjGU0NNSRuaizk1at4CxGa3TMJMcsj4p6M+7mr0dluHSn/YSrdKI7mUpFmwXNdHajaAaw7ZfKxWvDL8oFdC0MFFyZphhtqjcdanjYtTmg3VEtTqilAzVJDYFaduxwKjFn81XYoMCo9myXUTBpDiiCUiTrUxi+Wqh+WSk4iUlY6OzmyorREny1gWc3FaBm/d0HLOOpNNPjvVU3Rz1qpPOag80kVnKR1UqehbkueOtFvPk9azpXOKns8mspO51QjY6GKX93UUs1RpkLVW4mwDXLX2NqUeZj2m5pyy1m+YWPFTpuxXjtNs7pU+VGgsualXkVno5Bq3HJxVyg4o5nJXHyKCKqtCuKfJJioxNXOpWRaYmxVFQvKFFPllGOKz5nPY0o1XF3QcnMNnnBBrJuZRk4NW35BzWdOvWuxYpz3NcPTUHcrNMajaakkzmoNrE1qnc9qlViW7YlpK2oIHbB20zRtJLAPL37V1UVkiqMDpXdh46XZ5+NxSvaJlxIVXpRWjLEFaiuq6PO9qyZqhep3qB68OSCLGDrTxUe7mnhqzsbJj8UYpu6nDJpqm2HMOAoxmnBTSrExPAq/ZMOYZilwKsLaO3Y082bDtR7MpIqYFSLDmpfIOelTJFxW1KknuS0ynJbhu1V3slPatYRU0Q5bpXo0YpHLVjIxjYBTnFTR2+O1bP2QEcipY7Vaud3sZwXLuZaQEjpSNblea3EtB6U2a1+XpWfsObc2hinBnPtHg1IkeatyWrZ6U1Ldx2rP6oovQK+MclYqPHt5rMvJMCt6W3bb92sa8t29K6acWjkjJGO5aUYFENm5rRt7KtWCyAHSu2CMK8lLYxo7UqOacVEda88G0dKxblju21c5e6Y06fvE0cYkNWSDApK/l61Hp67h81XJoDsOOlcTTvc9GUoqNjKa2iuAzwLhh96P/AAqnJA3YVpLvhkyvBFW2VbmPKhVl7j1rRI42zl3jcHkVVlt1Lbvun2rbuY8HpxWbcKKLDLWmzPIBbSvyPuMf5VanOxgV7daxBOYyvqDxWw0y3VuJRwTw3sau4i/bXOa0opVZa5qJnhbDfnV+1uCzkZ6VLLWhsmPPShU9arRXGDyauxzKetTZD5mI0QIqlPEMYxxWkCKieHzD0qXSTNI12jJSx3HI5q5HYKe1XIrYg+1XY4MDpQqMUOWJlLqZTacoHT9Kz7ixwfu5rq2jGMVWktwxxjmpcEKNZnB31uxyAgH0Fc/cafhjlea9TfTEkH3Mms2Xw35rFttR7Fm/1lWPLJbNs/d4qnNFg8V6rP4bXySoX5q5m/8AClyso2IcGnKDFGsmzixwasW8bSzLEilmc4AFbV34buIZFUI2T1rVs/Dl9YW/nxRH7VN8qnH+qXufqayUG9zWVVJaFNpRpUf2GBlZyf30q9/9kH0q/Z3hHFXIPC0rBcoeOtW/+EdlhxsU89ql0ZN3COIjsNS+460yTUPekbR71R9wjNOHh+6lXldoHVm4AqfZSK9rDuU5bxmOBnJ6AVHKvkxB7qbys9Il5f8ALtWqbIwTFbOJsAYMzD5j9PSqMmkSySZYMTml7Kw/bIqfbsR4tYRGf+eh+Zz+Pb8KqSPM7HezMx6ljmuhg0RlXJBpBorsSxWn7NsXtoo5QFyWp8UbseldK2hkjGMVbg0iCJV3Uvq7H9aSMK1gckcGtW20x5ZskfnWgPs0BwoBNSNqUUK5O0VcaSRjLEN7EsdjBaLvlKn2rL1LVv8AllCPyqlqWshySWwPSsGW5luflQ7c9+9a2sc7be5NeyDBLXHz+maz1mnbhm8xP7rVYj0+FW3OxJ96tJBbRn7+PwoSC5Wgs3blAPoa0be2PcVJDEDyjsfwq6kRIGea2iiGxIYNvTip/vfJKvHrShT0xUgizg5q7E3IxG0PBG5D3qK4/dDKnirm2VBx8wqpcsGX0PpSY0Yd9c5U5qrow8y8arF+vXjimaEGFxL04FYmour/ACSGsRpM9au6tKfPbc3PpWOZea5Wrs7YytFCzEGsy4HNXWfNU5ua6aOhhX94q8UnGKGNMLYroucvKGcGkMlITUZNA27Dy1AOaaoJNW4YOKbZCux1vFk1s20PIFVbaCta0iywqCnoadnaAgcVZkseOlWbGDgVrC3G3pXRCJ5lapqcpLbGM5pY5MVs3luOcCsSZdpNOex04Z3NSzbOK2Iowwrn7CSuhtjwKUNS8RId5IpwAAqRqiatTlTEc8VmznBq+xrPuRUSNYMtWctaZkJWsG2kxJtrorWHzFFYMrluzOkVmbpSiJsVuixB7UNY8dKwe52ppI55oyzAVq2Vt0qwum/NnFX7W229RQkPm0Gi3+WqF1bZBrf2gCqlwo5rlm9Rwm46mNBZjNWmtgFqVSqmlmuEVa5nTs7mzxEpIy5vkNOhmBHJrOv74bjg1Sjvsd66ZwTgRGEpPQ3J5RjrVI3AB61Qm1AY61ly6gSeK8mVO7aR6dDB1JdDoHuBjrUPmAmsP7YT3qaO89TWDoNHRLCSijTkcYqhKMnika6HrSxsCacYuJkotbkSQZ6ipreyDTA44Bq3EoOKvwQAc01WaYRi5OyNCxjwoq5JcJCvzGs8S+UtZ15cE55roWO6I645fzfEXLrWoUfFFcldzZk60VusRUsX/Z9M9FeKoWhJrV8jjpUZgpSpSueZGmrGaLbJqQWvFaUdtmrAsyR0rSGHJk1Ex0tuelWFt/atSLTWY1dTTSo6V1Qo2MHJGLDYl26Vq2+ljH3anS3ZD92tK2HqK6YUEctSvZ6FRdNVR0pGsAf4a2VXcelTJACelN4dExxbRzT6V/s0xtLI7V2AtFx0oNmpHSs3h+xvHGnDyWLjtUQtmXtXayaep7VVm08YPFR7JxOlYuEtzljHgc05AK0rqxPOKoG3ZTU3kmbxjSmieMjHNLJtxVflRUTTEHmtPaWM3hUSlQTUkcCntUKS+1TrOq0lVJlg00SGzUjpWfd6UrD7taaXa055VYV0xldHl1aTg7HLmx8k9KnRRitWWJW7VVa09K2jI5pJlGeLcKyJrDdJnFdRHYnvT304YzgVrK1iIXuc7a2e3irzwkDGeKstbeU3FKVAHIrFWOh3sYF0FQ8iqryeWQU61q3xTByK564nCtxSuSW5MXcTSgfOP9YvqPUVjXMW08Hg8irMV+IZQw7VPOkVxHvix5Uh4x/C1AznJVJPTNX9JVhNjOYn4YfyqJl2ybe/TFXreMIA2MH1poRce3ZW246VGv7pm7GrjszQpKOexqmSDIR60WGPExI4NWY7h2AqkuF5PSpo2Ixt5HrSsBrxTZxzV1ZVXqee1YX26KEAjlv5VWl1BpC3zfrSugtc6QXwz1FNbUyzbVNc5HcsBnvipopWWIStwT0FK9wsdIlwWPWrCt3z1rAhuMEc81eWV5cbeM96ANVG+b71WFIYdayx8pwTwKet9DFVXJsX2jUc0x2t+jAZrJm1lcOB2rOk1PJDc0rjsdK1nZS4kZBxzUU0qLgqoBrHm1Exxxxc7mG5vp2qq9+SNx55qWyje+1qG6DirNuVmwT3rmftJccVqW1x5UHmy/dBx16+1IDoFtosBpeF/nUcvkEbSBgdAKxX1nzpD6DoB2qu2pKG70NlJGw8FsoJ2is6c2652gVRl1MsSv61Rnu1253fWsyrmo14oXtVOa+29DWb9p8w/L0qndz9eOaaiFy5car1w1VDqbSjhqzHmwuDxmoS4QcHihom5oT6iY1IDc1jyX8srEb6q3Fz1qiZiz8HioKNiLypTlz09astf29um2JBn1xWRl3Xao4qzbae8p+8i/7xpWZV0TeYZ2zVyCPmn2+jy5/16H/dNacWmTRY5z+NaKLIbQyBWz3FaEcR/hXmmJDLGeUzUn7wHKlkNaozZKuP4lxUgj3jiljdiP3qiT8KnTYCAAV9qqxNyjJK0APWsq4nSXJArprixM0W5NpPoa565t1RiDEUP04qJRNYtGLdBpFIFJo0ZzNn5TU1yBG2M8VNp+2KCR2/OskaN6GBq1sVLOzVzkkoDda0dfv2lnIU/L6VgeZk5oUB+1LwfNNdc1HDmQ4FaMdmXXpTegRfMZLx1AVramsWA6VnSQ7T0pKRq4aFQ0Bc1L5RJqxFbkmtOYy5G2NtbQyEcVsR2eAOKs6bYj5citkWQCdKxlIrk5THjg2itKxt/mBp5tua0bO3xjitqRyVpJGlZxAAVfPC1Xgj2inTSYFdHQ8xq8ijeuADWBcMC1X7+frWPvzJXPOt0PZwmG0uX7MYNb9s3y1z9qcEVrQS4FaUKhWJwxrA01qrrNkU/fxXVdHmOFhzYqpcAYNPd8fSoZJAaiTSNKcW2Q2y/v8ANdbp2CBXKRkBs1v6fcDArlkzrUbHTxoMVKIVIqlBPkCrqSishgbdaRlEYqYy1TuZeDTuCTbsQT3GzvWZPfdeaZfTkZrGlkZjiuCtiIxkenh8BOauXGvTnrVa4vCV61W571FKwC1l9aUmdkcuUdyjczNIxqmSfWrExzVcqaVSrc9DCYRRZG5NRVKymk21gmevGmkQnrRuIqbbTCKq4OCY0Sc1etn5qhjmr1nCWb2qZq6OWthU0bdqN1aiAAVTtIMAVbc7Vrzqjtoc9HDKLuQzycVi3s+M1dupwCa56+n3Zwa1w1FyY8RiVTKlxPl+tFZ0hbeaK9ZUtDynjdT6KWzZh0p6aaxPINdTHYKB0qUWiL2r1HTR4n1uRz9vpRHUVbFkiCtORokHWse+v1TODVxpmE8Q+pMqop7VZRUYdq4+fWWEuMmtPT9T3EZNJxsR7Vs3Wtt3QVJFZkdqkt7tGUc1a+0J6iqRBHHAF7VOqgVGZ17VG1xjvTV2S5KO5a3igyCs43NNe64q1TZzvFRNB5VAqrLOtZ8142Kz5b1s0nA1p1uY1HKvVeS1UjIqit971ZivQw5NZOJ206zWxn3UBXtWY45wa6KYCUcVlT2h3ZxXNOGp6dHFX3KyrxTHap9pUVWl5NJwTR1Rq6kLTEHinx3jZ5NM8vdTHhxSppoyxCjJGnHOJB1qwhFYSSsh61ajvSOtayqKO550cO5vQ3oytLLtx1rIW+4605b3J5qViot2udDy9pXLMidcCs+cevFTTX4VetYd7fnJwa6Yy0OGpTaZHex5U81zs0X7w96lur2bJGTWet2xbG7mgxsPeFSOlPsZFt5ij/6l+GHp71GGctzSHJbn86aGXb63VJlLjrwWx39fxpNvljk1dgIvtPKNy0YwPp2rGWcEtB91x0qmSjQjkzbzIrdtwFUjLhwW61Da3BF7GH45wwqtczNDcTQv0ViBU3HYvGUTfLnGanNwIIsKenBrnxdMDlSetWkkaUEngNzS5hlm4nGcgjmoI5f33XrxVYj5ueBU6xBYN/vnmp3LLqTkEtkY9KnaUykLnA96pxwvLIhY4B5GaexVQo681ViTQtnxIQe1XzqHlrhTwKx95+btn0pZgyw8AnPAp2Ea8d8zrlm4qlcXbSHjjn86qTu1tF06FQefanLvmmhQ5xgcUgJJyfs4ycZPJptg3mXcSH5kzlifQcmo9QlYYCn5RkVJp2ItPublztJwkf40AOvLkPcPKep5wP5U2Of92CRj61UyCS+c+lNkmc/KBQBr6exnbJJCDlmHYVLdXZmmXB2oBhV9BWff3psIhaLhXIDTY9ew/CswXZc53YB60Aaj3OJcI2fxqVZiehH1rL+QA4btTbdbiVv3Ks0a9T2H40rDNKeRpDhM+5FVPm3Ac/WrCypHw9yi+y8mnyXenoOS7H8qVh3GApFHlutU5pzK2AOPWnyXdrJx5U/PYNQFtSM75FX0IzTRNzNvGeIfKARWfkNktWpewNJ/qXDr7ViXEJjPLUpFIHjznnioI4kUktgfWrUMRxuY8e9V52DZ284qShAwEnPT2q5Ddqo6c+9Zkb8/MeKuxXKAY60Aa8N/LwEU/gK07e/ufRj9axLfUYoxjOPoK0YtSgI/1xX61SEzoYb4OoE0AFWFltpOjAfjXPi4mIBil3D60oml/iXH4VVyOU6QRA8qw/A1dhhJAyc1yqyk/dfBq5BqE9serfhTuLlOoSD2zWdqMEsOZYoiy9xUtjqwkHzofqK0JpElgJHQ1e4tmcDfsk658kI+elQXCiLTcY+Y1s3yWzXG0HoeQetc/r18u3yoe3esnobR10OE1GL98eaobDmtiaHcTVfyPm6UoyVglTaJdNt90gyK7Cz08NH0rG0W0LS9K7uxttsYyKiWo4aHP3Wmfuz8tcxqFiVY8V6ddW4MZrk9VtBk8Vi9NTppvmdji0gO7pWlbW2WHFOSD96a1rOAFhU+1ud0KFlcu6daYxxWr9lyOlPsYOBxWxHb5HSqRxVNzAayOelWoICB0rY+x57VKtpjtW8JWPOrU+YzljIFVboHFb4tc9qimsNwPFaSqaGVOhZ3ZwV7ktjHNVFgfPCmu0l0NS2dtKmij+7XHKEmz26NaEI2OVhicYO2rayMo5rpRpAA+7UEuk8421dO8RVK0ZGZCSatc4q0un7BwKV4CF6V0qbOCUUzOeoD1q08bZNQmHPam3dEwaixinmtCyzmqKxHNb2nW+QKzNpWZp2wOBV9M0W8HA4q2kHHSoM7kYziq0+cVoNHtFUZyBWVWVkaUXqYt0vNZzqBmta5rIuTivAq+9O59Rhqq5bFSU1nXE1WZpsZrJuJhk81pCOp0XuP3ZNKSMVUWXBpTNWrO6hHQkdhUfmVC8uTUZc01E6SYyU0tUWTT41JNVYkngiaWQADJJrq7LTfKjAxz3qjoNj5kwYjpXZRWwC9KtQ9255uKxFpcqMsQ+WKoXk+0da1L5hGCBXLajcYzzxXluPNOxpGfu3KV5c9eax5ZdzGpJ5CzHmqx616dGPKjysW1IY0e45NFTBsCiunmPMPrOW9SMdax7vW0TODzXH3fiJmyFJrEudSllbk8V0vEK5ywwb6naTa15rYDcVTlYznrXNWtz8/WtyGcbRXoYeSkjzMXScGQXEPNMjvBARk4q1Nyuaw75TnNaVIaXMKM+h0UWvhQBu/WtG31jzf4q4SKIsRW9ZDywM1EYq1zWpKx2UN2ZB1qbzM96worxY1HPNSrqK561asedUcpGxmmsaox3obvVhZga0OazRBOTis2WTmtSbBU1j3KkEmspno4WSeg3d3qNbho2qMS44pRG0h4rHc71Fx1Nq0uRIKtSQ+YuRWZYwFTW7FEdtZSjcpSszDuItuazH610d5Bwa5+dNsma5ZPlPRoVL6CxAZpJ4+Klh9BTpcbfmI+grSM1a50tXMt15pm1quEpu6Z+tSZGPuL+VefiOaZ0UbR6GeMipVk4qyRn+BfyqJkA/gH4V5zfK9zvjNSQx1DCq0lrFJ14q0FT1K0yWJuSPmHqK9DD4iRy4jDwauZF1Y2+Dk5rnLqwIlLRHmuku2+U1g3F3t6ivUhLmPBrw5WVstGPm61WkucN6UXFyGAwetUZRu5PStUcxq2OqNZ3iN1Rjhh7U/XbMw3glh4yNwx3rGaUbRjt3NdG8h1Lw4J/8AltbHB+lPdCZiyHzxHOuQc4b1zU2owk6ju/hniV/ocVAGG1lHcbhj1rTH+kWlo552qUP507DMqK3JOe1SqAxEQ4wasLA0MYYcZOcH0pig7twXPbNSAwQF5xCuCxOOKv3dod0MA4BbGfYUzR7dxqhb1yRmuhFsZrxm2/JCoP1OKqMRNmPcwmKVNpzgfpVW0VrmdIlGQM5ramtywKZyVHLU7QtKEV7vbLA8inYVygbN413t1rVurdYFhDdhn9BV7VtPkjsomRc/vKoapL5sCP8A7I4/CgEYuqL57ADjLZ9qkEwhuNueVTH40y4k3soAycqMVBJI/wDpDkD0WpKIr5mkjh4zxmrF5G66ZBEONzFv6VHcT+YsQxgrHmpNUn8uODviJeD70CKkG6FiCcirulzLEbm+mUFIOEX+856fl1rMWXfnC5b+6PWrWqziGKPT4AP3HMh9WPU/h0oAzLpp7mQvKe/JzRFbyswhgDOx6k8AD3q7DZCVWubhtkKDj1Y+gqlqWqmRfs9uuyEDkL3Pue9LRDNFJrS3kCMy3UoGMD7g/wDiqpXd/O8u3c20dFHQfhWdDKFjJ/M0n2lnZgep6VIFkXYiO48sael2srZzxWbN/rVCH5u9RyEw453c8UAbpuRkqDgCiNj5RJBIJrDW5xIeRVg6gNuckgdh3qbgXpDKWBQlaTyGuztmK7v+eg71ntfNLFgfL7U6CeYn5d3Tqaq4yS7HkZU8VQXJDELhfWtL7K12A1w3A6Uy6X9yIkwsY9O9ZlIw5JGLEgYA70xZX6bqfPHKR93CCoBE1VYZZRz3arKy8feNU48Z+arSbSaVhlyKV1GQ7fnV2HULhT99vxrPQEHgVZhiZupFMehsRaiZB86/iKuW96FPDce/FZKwbRyali3BuxFO7FZHU293ESMMFb36Grb3wVduSp/SuTbd0BxVu3lZmWJiTk9auMiXE0bhPMUzY3H1rlr2BnkY7cV3sVli1ArJvdN5PFacqZnzNbHn89uQelRR25LDiurl0zJ6VGNMwelc0o2eh1U+aSGaVAIsHFdTbyALWDFH5Jq8kxxSadjN6M0bmcba5rUXDE1bup2INYlxKSa5akmkduEjzSKwT5q0bIfvKqxrmr1mv7wVxxn7x71SlamdFYL0rchTgVk6evSt2FeBXox2PnKz1HCIU4RCpQBSE4qjnHRxDNWBbhhUUTCriMMVQmVHtF9Kj+ygGrzMKiJqkTqVzbqRUb2o9Kt5ozQx3M5rUelV5bTIrXODUZjBpknOvZZJ4qE2J9K6XyBmgWgNUpD5Tml087q2rG0244rQWzHpVqGALUtlIWGLAFWAuBTlwBTZG4qbisU7mTFY9zcVcvZetYsxrzsXXS0OuhRe5HcT1kXUuQatXDHBrMlbiuCUb6nq4RO5n3UuB1rMkbcau3OSapEc1pTVkevSs3YaOlBJpTTTWh6cVZDaOtKKUCmIFGauW8OSKjhiyRW5Y2RbBxUSYpSUVdm94fttsJb1roHG2Oq2nW/lQAVJePsiNdDdqZ85Vqc9Y53U5/mauR1GfLGtvUpyd1cneS7pcVw0Ic0rnZWnywImfJpyKKYozUy4H1ruaseXOVxdmegoqxAobNFRqYWR3jIajeI4zWqbbioXt8CvS+qNGX1uLM+Fip962LWfKjmstoynSliuNhxXTh1yaHHipKotDpPMDR1mXmKbHdZX71Vp5smu2Tujy4U2mXLKMMK0GIjjrHtLjYKsTXgMeM1g5WQ5RbYkt+4bAohvJieaz0zJIa1be34q6UW9WKdoluK9Zepq7FqWOrVmvBgVUk3KeK0lFow5VI6uK9DjrSThXXiubgvGU8mr6X/y4Jqb9xKDi7ocbcmatixstwHFZ1vMsjCujsGQR1zSVmenGo3HUsRWSp2q0sYApolHrS+ZUkkFxDuBrDurPnJro/vVBPbhhWVSnzI3oVOVnIyboQQBharPNmtu8thg8VhXERjb2ryK/PSdz6TDShUiCHmrcYyKpRsKtLLxW9CrGcdSa1BrYn2io5BxxTPPpQd1Y1KHNI5eacCpKwGaqyXJi5DY9CKuXUQKnHBrm7+68gsko+hrWFBLYiWKlswvdTiyRcLkH+JeGH+Nc/dRPIDLA4ng6kr1H1Hao7q6E+fQVSjmlgl3wSlX9u/1r0aS01PMqy5ncay+auBSx58soeTWrbLbakF2hYbvPK9Ff6ehqjf2rW05IJXP8JFbWMTNkcHcnP0rZ8N3Tfa2sZx+6uVKfjjisz7N5xMoxnvzSxvNb3COmNyYYU46CZNeRvYXu1g37t8EV0mg24ubSdeqo2R+X/1qq63Al8INRi+7MAWx2Peuh8Nae0DTIcbXXOfXH/66qKJbMnUIFxblVG0kjrUWmWbkSiRR0OOPSty4sA1syMvzL80bHsa0tG0xp44OASQQaIxE2QadoSwAOy5LDj8a2oLBIldnTAXk119rpMRht9yj92tZur2LICsSkhgc1T0JucSliZ57lwM7sAD610el6L+4TjsTU2kabm4kOMYK4rsIrZI4wFXpRsFziNYtWj05gpI2f4VwEkhWREfJBIBr1/W7ES2cg6EivKriHymkQr8wyah7lIylBmujhePM2qKztQkaKCZ/4Q5Ue5rp1tvIt4Zwv71iTisHXbTzlt7JG27AHlb1Y0WLMOeYllQFtxCg4o1y7c6yyr/ql2xgH2qW+CxanBbxdiu/j9KyJWe41OVmUsGc0WEmdLpUkEb3N6yhltACAehbtUdrCsqvfajuEByRjgyt6fT1q3DZJ/Y9tFPmFXP2m5x/d6Ko9/8AGqc8k+oRlseRaocIWHQeiilJagiG5vJryUhlVYlXAAGFVaozRL5IK85OM1sFtN+x8iYF+NzAc/hWRqLS27Lko0QAMbAcVFiiBehDD5R0qNiM7R2GSagluJZIcqVwTgnNPVl8sBW5osAomIOzbn0xVSUmKUs59lUVeBWAjepJbse9QSBJJ+B8+c89qYFZ7dwu5yOecelRwyMcKF+UdzVqX97GQrdOKhZRBDk5MuPwqQuTLhRnIJ9BVyOUA5bk/wB2siDcw35+pqxHN8uQflB6+tBVzVSeac/M2FHbtVsQI455X3rHW9JICripW1VQQvOfQVGozQns1lG/yuB3NZdzCqxku2B2FXY7i4uVzysXbNMmtjN90hj2zTsFzDVSTwDj3p+5o3wBmtWDTHkbB5+lTNpRU52n60xop27Ej5uK0Yh9KpGHDEdhVy0PPPSguxoxtheRQ7lW+4uKVipA2/lUJYySfNwKlkli1V5WyUGK1tOsjJeIccA1SsMMdqV1mk2ZQ7iK0hG7Jk7I0PLAjA7VRniBzWpN0qmy5NbWMDLeyB7VXeyx2rcEWabJb8VlONzuw9RJWZy81t83SpYLPPatGa3+bpUkEYFTZ2FVs3oZlxYDb0rnL628tjxXd3ATyzXK6mqljXJXjodGClaaMeFa0bRf3oqpEvNaNqB5grxnK0j6qSvSN6xXpWzH0FZVl2rTz8vvXsUXdHyeJVpEjS4FQNcAHrUM0pqo8hrbZHPFXZppc89asLecda54zOvemfbnB61w1MVyvU9angedaHS/ax60faR61zgv2p632aI42LIqZe4nQfaR60C4GetYRvfenC7461usSjzpUWmbqzD1qVZBWEl371Mt5/tVSrxYKi2bYK04MorHF7701r/3q/bIr6vM3fOWlE9c9/aPPWpUvc96SrRYfV5djdE/Skkl+Ws6KfPepzLkVMqqSKp0JNlO6yeaypmA61pXT8Vh3cuK8LFT5p6H0eHwqVLUq3cwA61jzzcVJdTZJ5rOkYk1UE1uXCnGIOc1Dt61Ii5PNJN8orW44TSkVX60yldufemitkj1I1FYUVPEhJpka5rRtLbzGAxSfYu6Suy3ptiZpBxxXZWNgqKvy81R022WILxXQwlQBRUovQ+exuOvLlRYWIRxVi6tLthbmtqWYeXxXLa5N+6NXJPkPPw0uaocpqE2c1z0vzS1p3zk1mNSw0eVHbi6nQUDApDljgU1m4xWrpemtLiVx16V1qNzhbuNtLdwlFdNDYBV4FFa+wKudr5Ipr2+RUoNP7V77SPkXWkZs1sMHiqMllzW4+OageMdaynTTLhWaMbyDHUUi1pTBRVN+tc7TR6lOUZor/Mo4qBpmJxV8rkYqMWmWzUum2YTkosmso84rchXC1RtINorQHSuyEbRscNSV2I1V5I91TkUAVTM0yi0BFQurLWmw5qKSIEVHLcuMncqW9yyMM10FnqPyjmufaMA1LGSK5pLU7oJtHWJqWe9XYLrea4+OVgeta1lOQRmo5Szq4m4pXbiqUNwCgqfdmkQQXMW4VhXlt14rpCvFU7mDcOlc1eipo7sLiHTkcbKhiahZSK1ru1zkYrHeIo2DXz1eE6MtNj6ehVjViSqcmrMf6VRRsGray/JxXXhcQnuZ4ihdXQ642+Wc1x+uLndt5roruYhT6Vyd/O5mI5xXpwtJ6Hh4iPKjAKk5bGD7VGYhyynn0rVFnNKMpE2PpUE2k3T8pCwrqSPOuVYgTIF2kN2JrpIlTUrVLa4ZQ/8MrdR7GsZbK6tFBmRsDuak+YlSASOnynmrRLC5s/s0nkNEVccZxSxWJQ5lXdj06Vo/aFvoBb3J2yR8RSf0NT2ULs3kjKzAce9CQh2n2m60a3YHYhyOP4a7LQbMF/kIHUfXipfDtgsu07QrdfVWFdRpuhR2BKpkqx3D/ZPp9K1tbchsz4/Dn2iCJj8vqD3rT07QVs2U+natuOIKuMVLtqOYLEax4XFRTwCRcEA1aFFTcZQgshFJkeuaukcU6g0XAzr5cx4xmvPdT0tv7U837oJ6H3r091BBzWJqtkrq74+ZVwv600I811fERRFY7s7V/qa5PUL5ft03y52Hkj9BXc6paES5wdxGATXLz2dvBfGUJuSMZwR1b3plI5eWN42Nw5ClsEA9c03R7UveQxYJ3PknFGpzNc3QJIwOn9TWtoUCgNNuZVA5b0Hc0kMvakRKZPOH+i2552/8tn9B7DpXPvcNOWacbQ3CLngD0ArTvJRKS23bCBtjXPQf56ms+ea2hbcxK8fwjn6CoY0VLqJ3kVd6rgYAx0pqwefA1iSG6lCf71Qy3EjBtoJX/ZH8zUdvcPBIhb5QT070IGZ7xGH5WwCD+VWhsCj1xzmruqwAXAnAG2QBgc1mzwMSNgyB3J4pDJYlM8nyk7hwM1E1sRdH5sbOreppkRaF+WyD949/pUs14nl+VgHPagBUQeWT0BGaqTSGeTbt2gfmaszv5uDGoQA84pI/wB6+FB24ySKBmeMiTYOhPrTtv7slicD9afEDJK4QfKTwKfeKY4QqDLZw1AFZrslNsfA6VPaosJLuxY+9VogrSYwcL+pqQbrmT7uAOgFSM3YZ0kxu4UVetjB2bvXJPMynaOgPOKt212Qck4oQjt7cwRR/KBn+dUNUvVWPqP90Vlrqflw7tygDuetZ7z/AGxi7OxH0oZcSU3DTN0x7VYjuNi1RBQfdYn6ioXnwetSzW5ri9JHB5q7C/nqCTXPwS7jx0rXs/TuazV7gzpdOdIWGOWNd1aSD7ODiuGsbfaqnHPvXY2G77OM11UzmmWZDk0wJmnEU9BWpncaI6f5WakUVKi81VguZ8tpu7VVe0K9K6FYQ1JJZ5HSk4j5jj7zcqmuWvifMNejXmmFgeK5m+0dixO2uOvRbWh3YWsoy1OWiHNX7biQVM+mPH2NRrE0cnIrwKtKUZao+qp4iM6VkzdtGrQDcVh28uK0EnGK9GhJWPBxVN8xYkANVzFS+cKfvBFb8yMKdF3KU6YFZshrXuCDWXKPm4rir01I9ehUcEMSrCDNV+anXNeXy8rCrib6DiOKbnFO34FQtKM1Unpoc0Yc7JC5FQNK2fvGhpc0w1gpS7nr4fCxS1HfaZf71L9ol9aixzS1ftJdzr9hT7EonbvVqG8wapAVYjjpe3lHqJ4aEuhrQ3lW/tI29ay448VKWwKh4yb0FHBwjsSzz5Gawb64q1d3IA61z13PuJ5p0U5O7NZqyshkkuTVZ2AFNaXFVpZ816MIOTPPry5EWlmqGWXIqm0pHSlBY13UcInqzyJYp30AtzT0OaSOEntV+3sHduFrvWFgkCzGaFtky1dFYQBME9agtNNMeCwrZtrf2rkqYeN/dO1ZnzQsy5bnAqwbjBxTEiwOKjkXmhwdjyalWMmXGuPl61zuszboyK0ZZdo61gajLu4zWdTVWLoyUXcwpoix9qozQlRW0FU1BND5h45ojTsglUc5GZY2xnuBnoK7iwt1jjUAdqxNOtfLbpXTQrtjFdNK1tDR+6tSYRgCikz70VsTzG6pp4PFVEkqUPx1r1up8f1Jj0qCXIFSIc0yccUyr2Mm4nw5B4qv5wyMUy/iO7INUllI61ySdmejQNNZhmrsJBNYgm5q3Bc4NQpG1SkpROgjwBUgPT0rLivB61bjuAa6YyujzZRaZbApDTRLxSFquxnZsd3qORqQyYFRNLk0m7GiiyF8lqkSlC55p4WueS1OynNpaioOauRzFKrAVIqk0rD9pdmpa3hJHNb1tJuFcxawtuFdFZggVjIvSxoAZFIyZFKDxSGSpuCM+5tww6Vh31nweK6dyMVkXuOa4cTQUkengcQ4yOY2FWx3p+SBgdasyRjzCcgD1rLvbzylxAcHue5rgw2CfNqetiMdGEdNyK6nSJtszf8AAR1rBubxhM2yJQPXGTT7iQykmqjwfKSWNezCmobHz9fEOpuOF7LMu0zMR6A4qBmYHhj+NNSIo2S2R3GKtDygCO3vWxzkQvpolIZ2K+hORT4ru3k4lTGejRjBpjBJgUGfwpFsJY49yRs34UXJLSWyXbboJ92OSCMHFa1naztOkwXLRkcHuPSs20Xziu4COYceYo/mK77w/ZhpYzNAodeDt5VhWkUS2dLoMEEtmGRAp9RXQRrgYqpaWkVpnygVU847CrwOaUmKI4UtIDS54qBh3opCaAaAFzSGgmkoAQmqNzgxtu59qutVO4wIm6niqQM4TXIGuJzMkgEUfG3+8fT+QridbsLmY+XEcHqxY4HPoOprudW3KWwQsEQ3yKOpPYV5/wCIrzytP3y5X7RJgRxHB/GkNGBPBBGCjOrN0AXoPX61ppFdQacqRQvsbljjr6D6VRhtvLl3AKwjXdIWOSzdl9h7VZfUDYSqrbprk8NzwPUGnshlSdpTIYiMknhRzWfPuJBbaGyeGYcfhWjqksVkSImI8wBkx1we1Z0OI1abYskp6GQfKtQURIq7TvPzkfKp6t/9am+Qclv4hwMUrvF5pJYsx6gc0QmIdHLHP3R/WnuAJtmh2uGYqeM96ZM2eyqg9+tWnxIQEwBjnmoFgVBuZ9wz1NISKnkq65wqj34pBZeZJkbeB1HarTWqy5YNwe2KeyrBGATg4pDK5slA3Z4Xk561HJL5MDKgwW4zUvyFhu27c9FpZo4sAn5ueKBla1h8kKSB6470kpRWIJPPIAFXBG0q9AoHf1qrNC8k33Tj09aAKyQEhhECC/r2FSvttLTyVGSRy3c1aWJlfGNp7gCleBJWBd8IDyAOtSBiBGwXwdo70wFmbjpWxcbDEYok4FVEg53OMAUAQL8xwwJ9AKuwW7TLtQfKOtQ+Zul4XA/WrqbjDtztBqrDuV5RFENitufuBWdcNzjFaEse1cRLj1Y96zWBz/eNKxVy1aMeK6LTVAlDNxXNW5fIOMVtWUzZ+dqLBzHaW8oODXWacd1vxXB2kp8sc8V2uitmDFawM5F8g1ItPK0wjmrMiRfu1KlQrU6VSAuwVeSIEVmwk5rUgbirJIprYMOlZlxYBj0rdc8VXZQaTQ1Kxy0+lA5+Wsm50nr8td40APaq0tgG7VzzoKW51UsVKGzOA/s9lJ4oaFlHSuwm032rOnsBzxXLLDJbHXHGcz1OUd2Vu9SLOcVpzWHJ4qD7FXHKjOL0PSp16TRSd2IqARMzdK1PstSpaVSpN7mNTEJbGSYCB0phyPpW21vxVGaECuerhDnVXmKJHFU5etXXIxiqsi5rNYbQ9ChpqQrJTg9II6awxXPPDdT1aWIWxIDmniqyNlsVp21qz8muKa5dzq9rG1xsceauRpipls9opkn7usGnII14ilgoqpcXAANRzXHWqTkvVRp9zT2sStd3Gc81lSvmrl0CKyZZdpr1sLR5tjir4lRElPBqBVLGnKd5q/bQZIr2IUVTjdnz2Kxbm7IgiszJ2q3HYH+7Wtb2y4FXo7YVwPFydXlRldKNzMtNM5GVrobTT1SP7tOt4QK0kIx7V6M7yicCqXkVlth6VZihApGlHalWXArKk7bnU6cmtCZsAVnzzAE0txeAL1rFur33p1cRFaBTwVWWpJdXY55rn7y63N1p91dZrGlmZpK5oy5mdDouCsy8LjJ4rRtE87HFY1rC8hBrqdJttq5PrWnxOwRahqX7KxUDcRV1oSBxViCL5alZRiqlWjSiEaU60jP2kUVO8eDRXF9eO/6gzOj1HJ61ZGoD1rJbTJ4ZMc4pZLKfZkZr6hVLnxUqaizoIb5T3FWvPVx1rjV+0wfeq3DqRHDHFbRmmZuJtXUasD3rHaHDEVM2pKV+9UUc6ySVz1bM6qV0hn2duoqNt69Aa27eJSKLi1QgkCpVJNGv1hp2OfW5cNWjb3xyOap3VvtORVaOTaaxk/Zs6oUVVVzp4rs461YW4rn4rkirAvOa1jiETLBO+hqy3FRRTbpKoPPnvUlsT5g4odVSJ+ruJ0MMfmLTjCRUljHuQVotb5FRcyktDMSLNXIrfNTx23NXI4QopSkKERbWDFaSYUVVTgU8y4FZtmpYMlRtNiqrz1WkuMd6i9jWnScixLcYHWsu6uc5FEtxu71l3UhIODQlzG8mqSK95ckjaDWJcSn6n0qzdM4Gay5923cTj3rRJI5JScnchkkbPC/XNPFyqjOM1Qefyzyxqsb9AdvLMe3SgRrFhK3yKWPYCqt1qVrbnGBM464OF/8Ar1lXuq+SvkW/UjErZ6+wrm7vUpSxAalcSOgu/ERLH59g7CMYxTI/EW4hfOz+JFcYxlnlwNzMegFX7fQruQ5fEf1NS5JbspRvsdquttAwaKYsQfm5yDXq3gfxFb6rCsYws0fBjJ5PuK8EksZ4JCUulYE52mtrw9rraVqMUrrhgexwcVVOrHuTODPqKOUYyMY9M1KJK4/RNcS+t43V92RnHeuihn9a0ZBpg0VXWcEU7zKkY/PNOFQ7qeDxQA8mmFqa0mBVYz8n0pXAstIMVXnmXymye1VZrnrzVCa5znNHMFjmfEhaCz8m3LefcnBbGdq9zXm12yXGsSiWUJZ2ZwGYZy3cmvRNeujFDJN8u4DC7vXsK8xvFt4F8mSVzKWLStt4LHkmjmKSEu9UENk0qBSUKmGP+7nPzH1Y1iPf7uFAUnliTUVy6gSjJ/eEEE+1Zs0scZ3M5/xqbjsdA16kuhxsi7pIJihY9cHkH+dUGvmU/OSc/n+VUra9eXTr+EcYVXX8DWWqXc5+VXbv8qk02M349RRD8kIyepJyf1q0rrOMLwD1Oa5Y+fAMPuT2IIqWC5mh6ZxSTFY6k2P7s7b2Pnscg1DHZtnbNKuD0xkgVnQar52Azfd6cVppdidctOVI/vUxWE8tLYEhmbnpUUk0EkZyshY9BmrZUlcrKrA1SYssmzye/wB6mhiwFd2GYDHXnOKkkdN2clj24qERpKcKeB7YNTxqACWZTjsTQBZgH7sEuMkcUY2ys+ApA6tVdGYtwp3euc1bEDmEE5YA+lICCadBC3JAP8S9T7VEYj5Ks6+XEPU5NaN0itGiwgHA6+9SQWuYgJcNj7xYcVSQrmPHtLBUUkerf4U+W2VcnBLnoO1arwjOEi2j1HU0j2YGHILHsp7UWC5gSxSxEFxub+ECrdvBcNEPNXA7Ada0zBsYSyhdx6YqxC8THBK596dguVYNK+0RjzvlQdqiv9HURYgT8TW/EuV+R1watR2bsQcCnYR5nJBcWzYI5qS0vWikAmT867y98PtfE/OqsOmax5vDnkNtuiB6EGp5QuW7ORDGjLKDntXdaKT5Arg7XTGgwy/Mn1rutGb9wAOlVFDbNsGncVDupTJWpmOzzViHmqo61ctx0oQFyKOrsYwKgixipwasgVjSCnhc0/ZVCI1PNWY4wwqAJg1YjO0VAyOa3GKy7m3FbEknFZ85zmpaKTMGeAVReHmtuZaoSJWMoo2jUaM8oBTQRU80fFUicGo5bGnPcnOCKoXMXBq9GciobhflrOS0NKT1Ocucq1Vt244q9ex8ms3kNXBOok7H0FGm3G5bUDFV3TceKVST0qeOM9+tO6khN8g+ysh5mTXQ2tsAtZtt8taAvBGOteRjafVHP7eTepZmUKtYl7J1xU91qYwcNWBdXvmE1y4WOupo6kraAzZel42VS8/mh7j5eOtdtWnd6FQqytqRXzACuenbdJxWrdM0mazvs5LdK9PApRWpyYmUmwthzWzb4GKoQwEEcVowxHiuyTbOLkT1NGB60ockVQtIDxurXiXArnp4O8uZkzqL4UPTipDLUZHFRscGtK9VU1Y9HA5eqr5mSNLVea6IXimSPxVOQk15csRJ7H0tPBUoLVENzdsay5pmJ61dljLVWe1Y1neT3HP2UVYoSZaiCxMrg4rSh095D92tez00qRuWpnVcEcUqVKoV7HTPlHFb1tabRU0ECxjpVkDHQVnDFSREsFBiqNopSeKQmq8stClKqymoYeNxs021qKpzMS1FdSoKxwPMNTp7mxQfw1T+yoeMVu3o+WsF5/Lmwa+jhPufKVKLkrohm0xGH3aybrSRzha6aOdWHanNCsgzXQjzW5Qep59PYyxNxnFNhDiQda7S4sA3asiXT9rZArOUDSOJdrD7OciMA1NPcLjrVZF2iql0+O9Pmsioy5nqLPIGFZsiHPFC3XzYOKuQL52K5px52evQqciM7zWU9KlSfB61rtp6yR8is+bTXST5eVrkqU5x2PRpYmnPQt2SGds10lnp4wOKqaPY4UZFdZbW4RRxXRT21OLE1Pe0G2lv5YHFaHljbTFwBTjJgVdzkEwFpRJUDy1A8+KALxlAqCS496pPcgd6qy3Oe9RJ2OuhQci3Lc+hqu05PeqTT1LAd4rm9pzysj0/ZqlC7HseM1n3bHnFXZ2wtYt3IxNdsVZHjVZ8zuZ1zM2dpJrMuZ1C/f5qxdzbXOaw7udmBAXmggiuZuTtbn1zWWbxoyzuQSOARSXEkoz8vFZskrs33Tj6UJATPdDnYqiqqwPcShQOSeMVNHavJICR17YrpNKsUhPmsvKjisqkuRXZpCNyTSfD8NsA0u3zsZyah1S1uXJWBiv0qtq2r+ZN9nVtrJyCO9PtNUe4VVf74/WlTgpe8xSk1ojJ1LQtXsLGK/lJaBzjKtnH1rNhvZQw807h696797OW/tfJlzsPY1xmraQ+nTkD5l6g1vUppapGUZPqeneAdYaWHyN+WTkfSvUbPUTIoBbFfPHge9a21iIZwCdpr2u3lxipuVa52cE/HJzVxZa52zl3VsRt2700S0Xlkp+6qwfA96aZDmi4h8rkZ5qhLNg1JNLxWfI/JqGUkEs3PWqFxPwalkbPFU5TzzQOxzessZSdx+Uc/jXmmo3P2i69Aefzr0nxN+506Rg3zYNeU3kwVg2RwtAzNv7sqT03HoPQVFY6dLffvpW2Qg8sep+lMtLT+0L4nPyKct/hVvWb8wL9mh447dqylN35Y7lpaXZdhvdM0yQxwwqzkEZPzH9apt4ouMny4CFFU9J0s3EgnnJVR+Zrt5Ly3fQ102KxgH96UJ8zVvDDKS95mMqjWxy8evxXPyXtsGHuKln0m1u4/O09vLY/wg5BoudKwCyofyqjpt8bO7CspVc4IPas6uHcNYsuFTm3KW0xzGC4TY4/iHepdsqt3IHQ10msaYt/ZefFxKg3AjvWdaQGeyWaNeh2uvoadOfOvM0cbMqwSNkbZZkPfnNWHWUKT9p/Si508xrvB5HpVOOcq+19zCrRnKNizvZesufxq7bxZXe8xb2PWqcdh5kZdX3DuvcVahiTaIldtx7kVQi+svlDbDEDnuasvJMI8Nj6CoI43QqCw2/X+tW4dvyt5fTpnvSsA4TGFRhPouOtLK19IAqGMDOTxUvlNJJ8rnI7AcD8aaI1zl2Ofc1SAlFlN5Ct5nzE8n/61RvzjaenXvTkNu7AmdWOcbd/FW0A2Z2Zx6HNUSVTClwDiJmOMbpDx+AqpHp2yRpA3sea2Cpc4R1BHYmnG2QsPNO/vxmgCG0L8KEXb6Ct2AMsXK81SjtwBuxjHbFWkuVK4Tkj0oAsGEyHcMBv0rP1ZIfsuZ03sOmKvIZc5Zwo+matBEIyefqKBHI6bdwk7Hz1/Kuz04ALhTxWZdW6wL5sUQb1wKn0m9Ekm0oVHvTQG5zTamI4FREYpiZKlWonxiqanFPV+apCNWKWramsqFqvo3AqyTQjPFPZhVVXwKY89BFizvFPDVQWXmrMZyKEMWRjUDLmp2FRt0pFFGZeKpSitKUZqjMvBqWUjMm71TdRmr80ZyarPHWTRaZCnFNm5Wjoaa54rKSOik9TGvRyeKyvKLSe1bN0u4mqyRYNeNXpuUz6ShXUKQkNuAoqwIwopRhRUcswUV1xp2ieTXxTchzS7RxVKa7b1pkk+apyy5Ncc6LkxRq6BLOzHrVYnNKxqJpKTw3LsbRr3EakHNNLihZK0jRuiZ130HGIGm+SM9KcstTIcmuijSdzlq1W0Nig5rQhg9qZF+lXozgV6KpWOO8mSxoIxVhZABVfOaQmnKahE68HgqlapfoWWnqInNRilzXz+IqOcj7rC4ZUoWQjDNQuuBTjLg1FJMKVOCZOIm0RMKfDH5jAYqrLOS2BV2ybaRkV1U6V2eFiKkmdDYWCkD5a2E0xNv3aqabcJgZrdilRhwa3qYdSVrHnxrzpyuYk1m0R4FQkEV0xiWQVm3liMEjivIxGDcdYnsYfGKejMhuhrPuJNpq9N8hwayrgGVtq1lhZWnZlYyHNC6ERDNyBmir9pBsixjNFe0qLsfPcrOwuwDH71yl+uJDj1revbjatc9LOssuM16EY3Zwxk4kUM0qkA9K1be445qmsQNDBkPFbRvE5a/LUNniQVWmt8j2qC3uDxnrV8OGFbJpnnTpuJiT23XisO/iYA8V2UkIYGsm8sg2eKiUR0panCtG/m1r6dvj+9V4aWPMzjirK2W0dK5XKzPXgromimXbzU8EayyetZdyroODUmn3+JcMea0jNSJlFx1O00+zGBgVqGIqMVnaXcqwHNboUSR5qZKwKVzPbgVA8lWriLB4rPmO2pGNlmArPuLwLnmo7u7296wbq5ZnODVqIJ6mjJe5frTftPvWOHY1KJDiuXEwbWh7eEqQWhpK+5q1bfAjFYVm26UA10KhVhrHCUmndjzGp7qSKV42axrqXbux1rRuiWJxWVdNhTkZr0DxTBvJWyTgk+1Y9zI0ilfmDVs3Ecshyv61RkSVc+aB+VIDn5IZwSpGR7mi3tppZQDgLW2YYJBjyyT71LDaKGXCED2q4oljVtAssShRyOtXLhTFaMwGOMmny2+JlKgjirktuZ9JJXvH/AErkxabSNqbseWTz+ddO/qauW9yVIJ/h6Gs6VSkrKRgg09CfWtI6JWI3Z00viW5S3xEy+ZnuO1Z02pXN6c3B3A+3ArOBJHSrNrFNMdijNVzSZNkjS0ZPKuzKOAOa9ssG823jf1ArxySE2Ols54dsAYr2Tw/A8ml2w/6Zrkn6VEPeuzTZHQWKliMVsopqtYWmFzitNIDnpxW1jNsaKcy8VZWCn+UMU7EmNOprOkyDW9cw5HArJnhOTxUOJUWUH6VTk6c9RV2aE4OKozKcelBRw/jS8ItTCvXI6eleYXwIDM2eK9H8YxnD+20j16151rLeXBtz1NQi0i1otqIdOacjluawpZFa7eaUZyeBXaafAp0NAB1jriL2JomPoCRWFB3m2FTZF+1vgJQGO6P09K6Jdd060jXJPJxwM1wizEe1K8hYdfwrujUaMHG518/iq2abyhE5XONxrn9SuBcXZmiACYxVDhscck9alls3VflIKmiU3LcIxSO78NzNd6aoPzADBqDQLZV1HUrIj5TyBVrwnZvDpSl+hGaTScf29qEw6IuK48O/3rR0N+5cluIoo7Moow2MGuRu4nRuRkGunnm8+DA6nqKxDGXkbdtPpjpXVYzkyjFM8T4BGV/hPFWvte1WVmAB7Lyc1MbfzTl+wxhRTIYIhIURCJMZyT0oZBct7me3iWVyqofXrV1b9D15z0IFY9yBEwiBEkpHLMc1EksyT4M6f7uScUijpYpyQXGcDsDUyu0rZVDn06D86zLa4nkU9Mr3XvVz7U0zLEZTG44OKaAuSwQFhK1l+96cGliX93jdtOemelRRw3IkyGG31JzVyeN4ADuTPXkmqsSSZyAGfaB/EBR5rmTajhlPQnk1W3TXMW1HT09h+VT2+nypHlpyW/2RimIvxweZF84/EcURCAMAJfmHtUSQeQPnaRie7HinuYkXcFcL3JNAF5vKB+9v46VIkpC421StwZjuSbb7Y5q9HE8fLMpPv1pIAjmlOdoA9jU9pMGkw8QB9QKasTiUEZ2981aW3A+ZRVBc0AcrxTSaUcRA1GTQIM0oao6OSapCLsUvNXo5eKx1bFWY5qpMVjV83iomkqr5vvRvzRcRbSTmrsUlZcZ5q9EaaBlsnNRvT1IFMeQUAV3qu4qwxBqJ8UgKUqdaqTLWg4qrLHkVLLRkyDBqu54rQlhqnJFWbiaU5amfMKrZANX5YuKzbjK5rysRFxdz3cLJTjyiSTYqlK+ajklbNQNKa5JYppWOyOATdwdqgY0rE1GQay+sNnTHLojWJqM1YCn8KaU9qX1iRqsDBdCm3BqMMc1PNGaZFBuNaxrMwqYWCYJuzVyEHHSp4bPj7tX4rLjpXVQxSi9TycVRj9kqxnbVlHJ6VKbQY6UzZtPSvWWMpuJzYbBynIkDHFSKKYi+tTqteNi8TzuyPscHhVSiAGKjkOKkY4FUbmcAV56O8gnnxVF7jjrSTS5JqOGB55AK66MG2edi5pIs2qmVtx6VqR/KBT7TTiqAAVdNiyrkivVp00j5uvUb2CK8x8orWs71wQO1Y8FkxkzW3bW+0e9bSscsE5bnQ2tyrRjJp1ywK1kB/KHBxQL7zPlzXNKPMauDhqinfxkk7aqQwc5rVcBhVZgENc0cHGM+Y76FWVSPKKg2DFFVJrnDYBorf2sTp+om3dxl0rn7m0aNty9a6BJtw5qOaBZBxXVdHycXYwIrsxnDVcWUSDINQ3dn7VSj82JsdquM+5M6SlqjZQZqxHnFUYZzgZq/A2a3icVdNKxZTpUUkQY1MORxSohJFXJnFTi3Iqi1z2qCWDbW9HBxUNxbZB4rimrnr0tEcrdQ5FZL27CXctdTcW3WqLW4zXNrF3OjdEukXLoQDXa2V0Gj5NcXbxeW1bVvPtArb2l0YcljoZCJBmsm9Xg+tKt5x1qGe4DDGaUp8quVGOpzl8rkmsaQ4bBrrHhWQGsHU7IgkisIY6MnynR9XW5SjIJqxtFU4sqcGrZIx15r0Yx0OOcpQloWrKLMoINbp4iHNYVgf3wxk1uSnMQrPlsazrSmtSlMowWyKxbkqwPrWrdSKkfXFYryqWOB+NMzMt9yS57VDIPOc8/gKuXS5GPmye9Yr/aYJCEXNMC4LTzPl2fl1rUs9NCRg5JPoRWNb3O1gJSyn1robGeGTmKb5u4JqhMsqqQTRybAyg4II4os4gZLm0OAYmIx6qeRVjaJYipK/hXO6pPd6XeRXsK5KDa4x95airHmjYIuzOU8YeHpbDUGuUU+RJznHQ1z0MEjnAFe6abcaV4t07ylZS7DDRHqK5y9+GU1tP5tpO6LngFciuenNbSNJXWxw9npYx8wOa3bLSvKXCLweprpLPwpdL/ryDjrgYFZviLVoNKj+xWLJNdsNuF5C+5qp1U/cp7smMW9ZbGLer9u1aDTkYbIyDI3bNfQWg6WLfTbdfvYQDPrxXjHgDR3u9egV1LsW3ysRkV9G20CxxKo7CumNNU4JEyldjbe22rVoRgU4cUGpuITFBFOFFFwIHQEVSnt1PPetE1C4HU00BhS2/JGKyLuHGQRgiusmiBU9xWHqcC+S3U4HWk0VFnlni9d0gTt3avKtTjM11Mvp0r0fxPc+deMiZwODmuF1CBo7sTAH3rJbnS4+6bHha4W504wlhkcYrP1nT/Iu3Rl/dy9D6Gs2xvG0XUw6nMEhz9K9BX7NrNluGGDDp6VzO9KpzdCWuaJ5RcWbQS7e3Y+tPFjKY92Pwrur3w4VXKfNjpnrWHLbXsUmxLGdvQ4rujKEtUznba0Myz0xmBaUdO1XLDSrm+1NIUjKwZ5JrX0zw/rOoNg2/2eM9S3Wu5tdKtdA07fO6rtGfmPzE1jXqxitB04uTM25aHStMLNhVVa5bSpmW1lfB826bP0FVdf1mXxBfeQi/uEb+HvW5p1kbWEF9xdgOg4FLCQ5U5Pdm1SS0iiWK1wDsePf/tDpVSeEDPmrgk5JUcGtO8l+zQ4UbmI4DE4rBmvmLFG2CT/AGRwK2bMRZJkhAGc/wCz1P5VC/kvGr/cBP3Qev1xTF3SMSXWHHQ4yTURhWSXzXLO3QFiP60rlDHtVnyxUgD+EHrRLaEgIhXjnGCpFW3kgswMIGOM5Dbf0rOe/uzPlbdWQ9mAx+FCAvJ/o+wrtDegNaVrM13JhrYqcemaxUuJ1mBYLH+HFadtPP8ANsEbN9KaA1V+1QHA5X3JGKk+2So21k3Z6EVSDO8QzvL+gOaemZ12yiMlehYbj+tMk1InJzhF3fQ1Mssh/iQnso61k5ngjlZZlRQMAKMVTz5sDMoYy553HBNDKNq8vv3XkKF8z61ThuL5TtdGC9mGcGks7eAqvm+Yp/2elbcCwAFNu5T1JoJIbPUCDtAQH1OOa145xdryAHHcCsK5+z25KwQ8fXinabdgNgAL9DQmFjroAyxcjNTQtg9ODWLBdNFL94nP5VqxTrIen5VYjRYjyqqs1TdVxVZvvUCHCnAUwU7dQIQnFORqaxpu7FMZb3ZqRDVFZealWaqEaKnFTpLis5ZamWSnckvGfAqBrg561XeSoGk5pXHYvibP1pxkz9aoLLUokouOxIzc0xjmms9RNJQAyXFUnqy7cVUk61BSIJQKzLqPNaLmqktc1aHMjsw1VxZiSw81A0YrSnWs+Q4NfOYmm4yPrMJV5kRbBSbBS7xRvrn1PRQoQUuwUgbmnZpajKk0fNS2cALZNPKEnOKt20J9K1jJ2PKxdXlLUMa1cSNcVFFDjrVpRS5mjyVepKxXljxVUx81fmFVttaRlKx9FgsOoxuxirilJwKVjiqs02BTuekNuJsA1k3E/NPup+vNZrMXbA5rWlHmZnWqqKHg7mrY05RkVVtbEkA4rYgs2j6V69ClY+axeJbehs2gXaKvmESCsu3LoRkVqwkkc10M4ovmFS2C1IzLGKe0mBWbdTkA4rO5eiHT3Haq8Uh8zNUjKWarcBzTVrj5XI0hJ8tUbq45p0kuFrNmJZ65cXW5VaJ6mBwyi7sHl3GiolVj2orxueR7Wh0MM3rVsS8Vhw3QPeppLvavWvS5q1Oep+dWjJGk5WTg1XazBOQKzY9RBlC5rVhnBXrXoxldC5JFZ7cr2qWHeGxVvKtSrEN2RWsZGUrPcuWkO/61qR2OQDiq1kVGM1sROCKtyuY8iRW8jaBxUTxZHStTaDUTwCoLOfurfg8Vz943kn2rsrqD5TxXIaxEcHAqXG5SkV4bsE9auLccda5iGdllKnqK045SQKxlFo2Vma/2n5etVZ70qetU3nIFUri49656r0OzDUOZm5FqC7eTUF1Orr1rnXvCvQ0iXpPVq5sFSjKsdWJwsqceZF1sbjSNntUayqe9TRMS3Ar6eVNKJ4sldl7SlYzDNbdyD5eAcVT01SGBNXLuVRXG7EMyblQI+SWPvVBmRR0/IVY1BuOhPsKzlklx9z8DQhEFw5kPXHpVY/MuDuDepFT3G4rzAMn3qusjRx/cb8s0wKxEWcFS3vjFWrZ4UOQVDdsjNOWSORccZPap4bBZCF3op6/NVpEmjBcqQPmXJ61Ze3ivYik8W5cY71mSae0GJTNlR2U1bh/1Yb7Xz6E4NVYRhXPhS9sbj7Votz5co5wGxWnYeMvF+nAW93ZLdqOMkjP510FnbTzR5VXlB7xjNalt4du7lR/o5AJ7is5UYy3Q1Oxwuoa34t17MNlB9ihPXBGTVrwx8NGecXN23mEHLMa9W0zwfHF812QfSNen410cdlBBF5UUSqvoBRCNOn8KBylLc5Dwz4eSw1B5vLVMjCr/ALNdsoxTIolToKlyKJSuJIUUYoBozU2GBOKZmnHmm0wEzUbjPWpKYTmqAjK8GsTVrYyqQB9c5rdaQLWdcyCYhFiJ3DtRYS0PJfEfh55599kMp054ya5uTQbqX908CqQOdx717mNPUx7GQKT17kVTfQo5A3yDf70KCL9o7HgOp+F0cEbSGHLY/Q1nWv8AaWhvuiJktxzxyRX0JqHg6C9tTtG2dBkEd/avOtW0FrOclhhCM9MHPpSlTUtGKNRox9N8Z6fLhL+Ir7gZrqLfVfC8sXnC9TA6gg5rjrnRLe5LloeVOcAYOKrL4asjGeJPw6j8K5nhuzNHNPdHW6p4+0KwhZdMRZ5scHBPNcPdT694unLykrCfwFakWlWlkNyWiv8Ahkip/tJVcD5c9mUDFEaEYu71DndrLQh03QYNNhLOm5scsR0qxc3cUKg7m5HYAf1rPfV5t3lCL6kGsy5luS3zkNnqvpW7ZBcu7jz4TkyM3XjgY+lY6sQ3C8j8KsmRViG8AZHQGoA7JkDgHpwP50ih6rLNId0LFQOgGAKZJC0uDuj8sHHTBqtFcPJJglcD1NTo3mSbluAo9MgGgCULEMAwSOB3ABx/n61OtxY7mVYmDY6N2qohVJAxmCseOaeqSmU52sh6cZFACCGF9w3KHHTJx/WpoIHhnAeXanfBNQ3sAEYyqq3Yg8U2ySaCZWbcOcAlcqfxoA1WEwYyQzFkPQkE1IJLktuUiQ9D1yP61GZ+Nmzy2zUzHzivmqV9GBwaYFpEleJjMNwJxgMM0jRQwr8sLKx9SKjuFl8oLuST+Lay5NVoFmGfmDD+7mkBo2N1cKSqgH/d5I+oq8n2qbJwx/ACsiGP96Sq+X6gVcYkxDnp/EP/AK1O5JTvZ3WQqZceoIOfzqe3g3EPECCR1zmpY7Tz13b1bHc1Fm5tZ8oY2H0oKNi1Ux4MrGtq1l7npWTaym6iAfarelatqu0dRxVIk2Yjuj4zUEmQ3NT2TCTjpTbyIKaoRBmgGodxzT1NAibtUTGnZpj9KAGhualU1WzzUqmmItIaso/FUlepFkqgLDtUDNSNJTCcmkwHiSniSoRTqkCRpKjLE0hNAoYxT0qvJVhulVJjjNIZBKetU261LIetRAc1O5cXYrzRbh71lTwNk10Ij3Ux7IEdK4MVhec9jBY32ehzRiNNEZrams9vaqTxYNeHWpypvU+joYhTVyoEqzFDnrT0hq1HDx0rn1exFfEqKI0hGauwQe1JFBk1fhj21dNWPEqVHNkPlbaUECpZTVJ5K2lC534HD3lqOlkzVdmxQzcVVmm4pbaH0UY2Q6aYDvWVc3HvUsz55rKuZacdWKdVRI5p8mr2m2LSsGK1Ss7driUccZrttKsQirxXrYajy6s8PF4rm0Q+y075RkVqpYgdqvQQKq9KlOF6113PNcb7lEWgHan7fLFTPKKpzzcUaiskNmmAGM1mzyZzSTz9earI5dqTYiRIs9qsKDGKtWkO4dKnntgFryq+JcZaHRhprmszMdiaRYCx6U8xEN7VcgiGK53V5tz3NIrQjjsxjkUVpRqMUUrGPtmcNBcECnzXTeX1pLWAMtWJLHMdfQVq1FbnxEG2ZcV2fO5retb07QM1zssBikBrQtG4rxsTiZRleJ9Dh6UJUdTpoLjdV5JsVhWslaCSHFFLHSW55NaiubQ2IbmtGC8x3rm0kNWIrkg16lHExmckqbOuhvAauJIrVycN571owXvvXUncycTXnjDKa5zU7EP2raF2rDrVOch+lUiTkW0lfM3bal/s/A6VvLECxqf7KGXgUSVyoyscTdQGPPFYd0xBru7+x68VyGo2ZDHivPxELRPZwE05GFLMc1GsxpLhSrVWJwa82jN05XR9JKkqlOxorcMDxWxp0xYjNcr57Ka0tNvH8wFiAPeve+t+0p6HzOKwbps9EsQCBTroc9qztNvVOPmBq/cMZFyBms6LbWp5k42Zk3UmOAQPpVMwLJHuLkH2FXJkZmyRgUiqvl5wfxroRkYs8ax5272+pqtGA4wysv41q3BGcYzTIZYQ21IVZ/8AayapCK8NpDIMOSvuRWpbWH7rYHaRfdR/jUZuguF+z2w9eRWhaql2Q3m7PaPbj+daoli2WjrM23Maqf4WDA112jeF4opQ7W6t/wB9Y/XNRaJB++XJu2x0OOK7m1ztwWDfhim3yoz3Y63tYoEASNV+gqcADtS0tc7dzVISkNKelMY45NNDGs2BgVGo+ctTGlGTk0CdfWmOzJs4pPM45qLz1ZiAcj1FJ5g9adgsTA0/cKqCT5vlP0qTfjrRYLE+abio1lGM7s0ofJwKCRs65jP0qO3iFtb4yWPqetWaeMUriZAYgMY5qNIP3pYnI6CrbDP40BcCi4WIvL/KsHxFolvfQmV/ldRnIGQfqK6QDioLu3E8JUnHvRcLHid7BBbzFNkgPYqR/IjmsS8uFsIP35mO4nA8gA/owrsPF+i3rTZzAQDkYyDj/PvXN3cM0kKmVPNA/igk+YfUHFUNHL3OoeZExG7BHAIHH/j1ZhlJJberP23L/kVd1eCK0mw42CTkecu0n6EcVkbQWzz5RGARzisihTNKzHGAfQUklujria5Ge45FIYZgp8kt/wB87qQTzR8Tyqy+hHSgoaIBFhRGvPfcWAq7DbQnmUbvZVGKrvcIF+QK6Y6gHFOt5n6RGTOPugCgB81laMC6q2B69DUW1bdTs+UHrjI/nUimWaUjyZC3tSz3AUFGlCHusvekBD5EE0eBN8/YZqErPFJhT7c5qWNbQnfAoL91zV3DT2+WgX/eB6UCZVSZjgTxYI43Rkj9OlW4ZgY9rJui9ccipFjHkKq9DxknioNz2xDE7XH93ofwpBYkjngaXa0rewJxVuGSHBUSkD0bkGs5I7e4Jdwobtg9aW7YiJfIccfwsKYzReUEhiFK/wDXTFTKy+UD8zL1xnJH41ixMW2iQMHHYj+VaMcvlN8jqDj7rKaaYx81whUNFKygHlc8j9Kls7mJW2nOfXIIP4YpG+zXUO6W2xP/AHo24NZzEIw3CRFB4yKBHQK8Xm5RV3f7PWl8+OUZlXgf3hiqUdxmMH7rD+KrouGKgbkf6Gi4rEqXiKARyvselbVtOvUHg1hopYZ8rqex61p2W1GCkEZPemmM6zS3BxgVLfxd81HpUHG5TmrV6jY61oQYhPNPBpzx4NMoEPXmlbpQvalHSmBFt5pxFKTimlqoQueaeJMVEDRQBKGzTs1FnFNaWpAmLUF6rF6TzKQyz5lOVqp7qlSWgZaJ4qrMOtTCTimPjFJgZ7jmgCiY4NRq/NAyzF1q4qgiqSGr0XK1LLTsUrtRg1jyAAnNb9xFkHisaeA7q8nHU7q56mGxLiiKIZNX44siq8UWAK0IkIFeKmom6lKqyNUwalDBRUbtioDLlsVUVzM3jQ5dSSUlqoygg1pqvFVblMCuhx5Ua0sZ7OVkZrycVVkJJqy/U1Xl4Fcjlqeksc5GfPJgEVnEGaYKKtXZLHAq5p1jkhiK9HCUebU5a+IZf0qyCheK6u0j8sCqFpAFUcVpoSBivYSsjzHK7LqyYFRyTUzIxUEsgUGmPmGSyms+4nOKfNOB3rOuJgRxQSV5Z8nrVyyXzCKxnJMnPSt3SioxWFdtRuS2dBZRYWp5lyKLfGKnkwBXizXOx0073MtoRmmM6xCnXUwjzWJdXuTgGlCDuen9YaiaovAehorGt3Zsmiu9U9Didd33MzTrlema1/MUrXEWF6VxmtyK+ynWqr4bn2Z4ybi7NEl9jJPeksWyuKp3Vxup9hLhgO1cdbDOnHU9ChirR5To4VwKux1QhlGBVoSiueFSL0Zm3dlhjioBNg9ajln461W83mqdT2bujWjR53Y14bir0dwR3rn4pquJPXp4fGqRrWy7TQ3o7vPerCzbu9Ycc9W4p69CNRM8qrh3E1Y8Z5NX4CKxEmzV6GfB61smcrVie6gDZ4rmNRsgSeK6d5gRWVd4Y1hXV4no4H4jz/UbHEnSsSaAqeld3f24Y9Kwbuz68V83KbjUaPsaL9w5do6WOZYzhs1dubUrnArLdmVsbCT6V6GHcmzixbg0dTol6JJ1UE12LyHyRz2rhtAiuGnQ+U30ArvTaE2oZyE46GvWpxsj5TEtc2hkTzEHmTj61Ue8CxnHNXLiBSc5z9aqGNPMHybh7VocxVM28dCPwpy7h0RSPUdamfYx4gYH86eYhGPmfb9V/pVIkILSSU7Y2IJ7EDFbFpZX9tHiGXaR1+7j8qzI5UjwpeN89jHg1v6LYr5ocCFc/wB4k/pW0SJHT+H2ufLG+ZZB3BTFdXCdwzWbpkaRJ/yzP+6MVqqR+FTMmJMOlFMMmKN1Z2LHMQBWfqF6ttAT1Y8AU66uNqnHauN1TVS8hTOCKTdjajDmkjSk1Jjzuqs16xByxA/nXOtqGG27hn60fbz0qLo9f6uraHS2urCMshJIPrU/9pbj14rkftBHOaPtzetVzWIWETZ2EWq+U2etXPtyzjIY8da4IamudvmLu9M81at9U2kAngU/aEVMJpdHeRXC8AGrcZGB3B9a4xdZiiwxlVR6E4rYs9SFyBtP5GqUkzzp02joAQB7Uu4HkGqUU4bjNWlPemYk2eKFPrUYPrQfbkUgJs0xzkdKAahlJI4pAYWv/ZmtWyCfp/8AWryjUpbsXb2+2HGfkLRDH0OOter6vYtPHuhnaJx2J+U+1eZalFNM08E6iCfPytuO0/QjkfpVAjk9XRoWKiymjL8jypdyMfcGsFZYbwNbktanP3iMYNbN5qGsWUxt7pBPbNxuk7/8Cqldwi8tlM6vgH5Wx86/j0Yf54qBlE6Xc5CxPBKR1ZZVVsfzqnPbTRyAjPpz1q61jC0w2XCo/ZpFZQfyzT/JJG52TA65k3flSLK8UCH/AFpIfsP/AK9Na22ytuIZRzjFSLCWYmCFZAOeuDUoA8vJwvqCeVpAMzbTjaiSKw/izn+dOksiADKxYEfKQOKZKGhjDsN8bj7ynBH1p1vc/KFSY+UevmZ4oAqGxI3MrYcH+6OaEvp1AHlBSvUgdfwqS6M+4lgc9mXofwp0ULSqAT82OARtyKkZAJnlZnwob1X5aic3Ll8csOSvQ1qNGYGXfHuU8MwH3frUfl/LsjXvwTzQBiOLjAOOvTnmpo/tBA/eu+B909q2Gt1WMNJEQe/GcVSvGXd+5ZQRyPeqAltm3RjcMc+tWWlU4DscdiDnFZC3xmUggb+mSOn0pYLd927ev1Gf5UAb1rqDQybGC7RxnHNajyRSDKoMn0NYdnIJAUdVc/7J5FaiQQN0mY46gUxFuKOKZdrBc+2KgNoIZD8x574qY2QfBRlYjsRircdqBGNw5FAyC3RkIIYEewrXtMysNuOvpWabds5jGPbrWnprkSgMPwFEQOw0eJwOtWr1COan0nPkD5c8elF4vtWqRm2YEn3jUWKszx4PSq205osK4A5p/akAooEMeoqmaoGOKLjAyU4NxVZjzSq1MRM71AZOaHOaixSGTA4p4NQipVoAcaTOKd2pjUDHpKTT2fioV60rdKRZWuGqoG5qzNVQ9akC9btk1tWoyK5+2PNdDZAkCgY6aH5ay54OeldG0OVrOuIMVx4uN4nZhVeVjKjhxU/3Vp5XHJ4qtPPgYFfNyw8m9T6GkoQRBPJVdD8wNQzyljTrcE9aI+4VVmnE1I34qtdSZFSKMLTHAI5rou5I8lxs7mVKKpTvnitK5worMVfOmwKVKg5ysjojUSWoW9h50gJ5rpbPTlVelN02yHGa3UiCrXt0oezVjneIUmVlt9oGKkEeBTmcKaRpk9a64wcilqNbgVRuZgAfWppZhisi9fjg1LjYCrPc/MRmqzT5qGWUb+TTAw7VDBDuS1bGnMAQKy44y1a1lAykE1FW3JqXGKbOktZMKKddXQjU81VR9kXNY+pXjYIWvJUW2bNKBDqOpfMQDWdGWmaokgeeXc1bNpZ4A4rspUO5x1cQiS0gIj5orThgAWiuvkRw+2Z5Gv7s1eglY4AquVz9antSAea46dTld2fRZhgLxvFGkIGkGaswwYaltpBtA7VZJUGuPGYlz0PnoUnB6liMnj0qyslUkkGcVKJK8tm4s0pqsZqJ5OKovLz1pxjc9LBfEasc1WUnrGinPrVqO49QKLOLPaaNiOfpVuOesVLqPun5GrUd5CP4T+ddtDFOO7OSthlNbG3HNV2KbBrCjvYv7v61divlx90V69HFwl1PFxGAl0RqPPwaqSzkmozeAjoKhku2H3Qo/CuiVRNGeHoShLYZNHLKflQmqsliuMzyqg+tVby/uDx5rAe1ZM07nqx/GvPhSoSq3Z6VWtWjDQ1ZbTTRndKz/QVUkksLbJgslZh0MlUUuc8E1YVVc816v7uC0PKbqVPiY6HV7+aUIAkKeiiumty8luC5ZjjvXPRxxRnKrXRWMm6DBHaiMlI5a9PlWxVmidumAKrM4T5Tg1euTgHFYkzEMS2aZzlmOVXmw2T7KcVpAERjyolTP8TDrXLzag8IIhYR+/eo7eaedg1zM8w6hd3X61SZNjYmguJps+a4QdwcCt/RIriGUK12yAcjIyfyrLs/NkhDF1QD/nmv8q2bQ3O4BbgqPzNXF2IaO1tbiUqMtuAHUpitKKct1NczbXDxxgeazH3qYXsrHG6k5BY6MzZbGelO3PJwBx61lWfnPgHOPY1pLlQBTEJJCHjweprkfEejfui6ZB9R2rtBnqeKpX9sJ7dgaicbo0pTcXc8A1uyuluC/mvuB4INXtA1ae43W11zKgyrf3hXZ6roqSMxK59q5dtM+x3gcLgg4zXJFNM9yjiFJWZoifLYrJ8SXFymnMlsxV2HLDqBVxpCsi+9Nu0E0W1hmtGrmydjyW0t9VS9FxA8izA53k12lrPr98oSe4wB/dGK6Oz0KOTog9c1v2OiJFztFGrOSdWNN6GVpOizTSK1xK7H3r0LTLLyIh+VQafYqCPlHFbsUI24Axit4RPOr1XNjUXkcH61bRjjGaSNBjkc+opSApHOCf1rQ5rkyyYODQsnzGomYd+tVTdKsm05FIDSMo700yD0qgJzyQc+1Na56EZwe1SFixcNtHUc9iOtcjr1iLhXKWyGUnoe9dIZt3HUehqC4KSxlXi6dCO1O4HjF61pb3f2acPZydtx+Rvz4rnruyuLCZpIE/dvzmKTAP4d69f1zSra8t3QoDkdGxzXml1pQ0ozRStPBk5jXBI/LJBqCkZX2mxuF23csitjIKrnFUxbxef5u1JFzjdH3qS7tJoiJ2AdGPEo/r3/ADogtDK4WGYLLjlTxu+lIokRbaSYI/yZ6NjOKZJYW5mO58P0DAkZFVrmzniLOhYYxu7021+2xPhpd6E9GPH5UBYW4Xy96qyuQckDNMtJgWIeHYWGOBkH8K13tVuNpQIr46eUoP4ECsm6t5lkOIiuOdy/zoGO3bQYhEyxE4LKcgf4UOHhjVcswzninreGIBmmyeh3DrV2KeJ494zg9MDpQBBanGTtkfHU9asSwQeVui+YD7wHVfwqBpVjlBVMH+8pxmlEhkl3DzEJHXOaAK73c0cpwA+Dwen4VTu5Ir4BThJTxWuLckF0XzM8MOKzpbVWkKyw9eMjg0mKxShsDC2H6+9XR9miIDA78djio5YXiVRKxYdiRzVaSdjndltvTNIdjUWRdylk2p221egiikO+3Yqw6qRWJFqi+XsZPl9etW49RHlqPKBJ6MKLgdFCkpbPmqCPUc1YVbnzfmlGPQisa1NxMu5ipUf3qvxNMDtV+PQ07isXy2P+WvHpitfSUV5Vz61hRI0kgwct6V1OjR/Z5F3gM2eg7VcQZ3umWai3XjPHalvLYKM1e0ra0AwKXUIiYziuhGLONu1AJrPNbNxZuWPFUpLJl7Umh3KJpKsNFimMtSMrtUL1a20x1FICgQSacq1N5fNPAFAiArTCtTsKYVoYyEjFIslSMOKiIoBEm+kJqMGlqWMeGpTJkUzNMY0xg/NQNHTmakB5qWUS2yfNXS2C8CuftzyK6CwbgUmM2BGPLqhdxqATWkp/d1lX7HBrKa0NqMnFmJcycmsyd88VbuW5NVBCZK8XE1VHRHvUIOSuyskTM1aVvbHaDT4LYCryjy1rgtf3hVX2Kjx7RWfcTBM1oXcoUGuYvrjLEA1pFt6IzhHm3Hzz+YcDqat6dZFmBxVPT7ZppAzV1tjbCNenNe7gqChG7ODGVeX3Yk1rAI1FSzSbF6044VazLy4wCM10yjeRyULsgmuSCeaqm8PrVK7ueeDVBrg5603ilSfKz6PBYRzhzGyb3PBNQTtuHtWYJyT1q1FLuGK6JWqRvEmrQcGVJ4PnyKbCvzVp+RvHSnw2GGyRXG/d3OZonsrbODitaODaKZaRbR0q4eBXPL3io+6QyDjFZ01t5jVpkZoEYzVwpqxyYivcz4LIDtWlFAFp6qB2p4NbJHA22PAAoozRVEnj+KTpyOKkxQRxXiXP0Zq+hPbTtkCtaAlu9YsC85rXtTxXLXZ83j8Ooyui6imnt060iScUMQRXJfueeU7lyo61j3NyQeta1ym7pWJewHPFddBJs6cNNRkTwXfPJrSimDd65lWKnmr8F1gcmuiWGuz2VWdjeDYqRZaxxfcdamS8B71xzoNM1hO6NhJz61Zjusd6x0nBFTCWsbOL0NGkzejuwe9PacMKw1m96lWet/rU0rGP1eN7lqX5jVGWL2q0smaSTDCudVp3uW6UWrGX5WDVuAYPWnNHmpLa0nncCGJn/wB0V2U69WbOOdGnEuwqh5rWgZfL9KhttLlXHnvHCP8AabJ/StSKKxiXHnSTH/ZXAr2MNz9TyMY6dtDDupduetY0sx53V1s0UU5xFpsk31b/AOtTDZiHl7Gyg/67vk/zrvUTw5PU88mtzcyHAOas2Ft5MoCxvJIfXO0V27atp2nrl7qzJ/u29qD+ppn/AAld1KSLG0YqP+WjlUX9AKpJIm7K9ot1kCK0kkx3AOK6O1tb1lBe3dPYrisCXxHdKo8293uf+WVvyB9WNWdN1q7lkOE2Z43E7m/M0AdNHb3W37jYq1awENl3iB9N24/pVWygmvnUMxb1LHNdVaWcFqmABRFBcjs0mK9ML6kYq4I8HLHAoa7iAwCM+lU5L/5toxVklppRVaefCFRyTVZpWkbA/Sq00p3HB4pFJFO7i3SH9TWFqVgHXKjkVuPJuzVeQAgisGjenNo4S8iKrk9ualtUNzIiryDWzqtmpG4DkjmotDt1iX7uOvJoR3+3901rKzWKIYrSiQdMVDGMYqzGwpo4ZSuadmVj4wM/zrRUgd6xUk7VZ+0nbg/StUzBmoMdyPrTJt3l8VnpebBtJyp9asrcIOpyBTuTYZHM7ZDrz/OkdUPDHHoTU4lt5fuEfSo5LdZ48ZyP5UDK7rt+46t7VDuPPr6U/wCwPCcZyKWWEsvKkH1FQ0MqNOfTNQSXD7OOfxqR7SXOfvgdx1/KmeVFnklagop3LCeMq4K/UZrldWgPk7FWNlHvz+RrtWhDfdKt+NZ95Yb1Imh3L6kUwPJb4TRSkQxlIm6SLyufT2/Gs+8hlj2efCYnA/dyqPlNd9qvhqCQmWC4ktn9VbGawLjT9Vs1YHZcxHgqeh/DsaGBz8ru0Stu2y9GBPX3FUnvfJYxOFRifvEcf/WrSuLBoFaVLVyhGGjzxWS6hImG07em1ucUmCEF2shG9mU+oq19rEi+USzN2DDr7VneWs/IwrqONp61D9suYRhAWH03CkM04dmCzKfL7rxxQ08MRD27YZesR6/iDWbdSTXkaXIVV3fK4GfvUyK0f7wByO1AGvJdpcNuQIr4wy8iqiHy5s/P8p+7nIp0ZWQKrYznBXHb+tTyW4UYTafYnFAImivAWJR/LPY9qsz6pEsQ80KT3YCudNiJJsYx71aEYgChWUj3NAy+17ayRj5wyHrjrUJ0pb7mwlLuP+WLcOR7f3v500W0TglX2f1oQGJgh+Ydd2aLiKkdq8NwUngxjqGGK1I7aKIgJtRTzWi+qwS26RXq/aAgwJPuuPx7/jRDZaPcwlhdToT/AAmLmluMghkIO1eWHatGCGWU75sQRf8APRhwfYetQW8Vrb48mJnlXo0xz+lOJa5lLTylmPrVJCNe2nhj+WFdvrJ3P+FdHokQMy7uea5q0gTcoJxXdaDaL8vNaxRLO001UWFccGrskQkHPNVbWLaoxWgo4qzNmdLZL6Vm3VkCOFro3Xiqc0OatMk5GaywelUpoQO1dLdw4BrAulOTQwTM5hioG61ZdW5qB4zWdiisxxTRJk0simos7TQMlpD3poegmgCNzUDmp26e9QOKAGA0bqMUu2pY7js1G33adinY4pjK7ULRJxTFagotwdRW/YN0rnojg+1bdhJUDOkj5jrPvo/lNXIHyoqvecqaiotDWi0mcvcL+8NJEM1PcRfvKbH8tfN4mn71z3IVbxsiwgAFRXE4VTTnlwKyL244IzXI6nQ7MPh+fcqahe9QDWbawtcS80soMslbOm2wAFe1l2G51zSOXMKkcPG0dy9p1mIwOK2VxGtRwKI1qO4nCqea9hwtsfNKTqyuxtzcAA81g3lzx1p95c+9Ys8xkNKUlTjdns4TDOo7IjlkLNUJ5p22nBa8KvU55XPraEFTjZDFHNW4Ac0xIsnitSztST0rsweL9npLYwxUFKNy3aRblFaCwYHSnW1ttFWyoAoxeLTfunhS0ZXXCCo3nHrTbmTbWNNdfvMZrGhUcjCczaSbJ61YU9KyLWXOK1YjkV3o8+W5ODS0gFPAqkZ2DmilAop3FY8kApaCKFrxrH6LcsWyZrVhg+Ws627Vt2+DHXHVvc+ezFvmI/LZeajdverzpkVUni4rn66nlplYuCSKr3EQPNDfKaikn6gmvWwdOO5lOUk9DKuEG6q2SPpV2X5mp62ZYdK66vLE9LD4nlVpFLzTTlnYHrU01qV6VUYYOMVypxkepCopLQ0be7Pc1oR3II681zwYipFuCvek8PGQnUlFnRrMKmWWudS+Ixk1o2Mk15MsUCNI5OAFGa5HhpXsjdVopXbNdZT61dtLS4vBlF+QdZGOAPxq1b6XZaUA+r3MRm6i3jO4j64p0uu2WNkVqZAOgkOFH/ARWkcJGOtR/I5amLctKav5k8FpYpgKr3c3cDhB/U1sJY6g0O1tltB/dGFH/wBesm11Sd8BCsS+kYxWoj+Zy7Fj7nNeph6VNLQ8XF1qqepYh0+wiH765eRvSJf6mrAmtrcfubVfrKdxqGLbkZqaQK64UV2xSWx5k6kpbmZqGq3skZQTbI/7sYwKwTDNdSBR8xJrYubN5ZCB90DJPYCs25ibb5UJKJ3/ALzVVjIzriLTrXlytzcf88wfkH4jrVG51Wa4CxNwqcLGOAPwrYh8K3t4hcBYIv8AnpIcCrFtoOlWMv7+WS7l7iIYX86NeoHNwS3O8ZQba6fRoL6YZWJgPpgfrWxZR+bKsOn6Ugb1I3Ee+a6SDSPskYfUrkAn/lkv+FUloQ2P0mJ44x1ZwPwFbEckpGJj9AKr29wko8q0h2qP4jV8eVCmWPPeqER/ZGbnBGe2aabVI+43e3aknvlVepFZ098239yMZ7mk2MtSMsS4FUJZQehqF52Kjcearkt1NRcpIlkk9Kgc9qU9M1BLLtzSKRWucsrZ7Cq9sSCRnHaqmq6vFZw8nk1ytj4lZ7xASfnb8hQja+h6PDKdqirkTZP86w7HUEmiU/UfjWrE3dWzQYs1Uwc89KkRhuwTx2NZqSt9KlE5HHrVXJsaDRAvnH1FElvg/KTx0NV4bs5wf/1VL9t2sMjIp3EIkEgYNE22XsD0NTf2i8HDJhqVJA3KsOe1EkSzruxn1FADo9V8w4YcGp/tkMi+ntVZLcR9VBU0k1gX+eI5HoetK7DQtkwkfLwap3FuGbLcE9G/xpkcXln7+0+jdKlaRoyUlUj0z3qb3KM+QCFtj8GhWkXlGxVySBZxjo3Y1U+zOvQ5FSBVmt4Js+fEpB67flP+FZN3okH3bS4J44jlO1h7DtW7JGcc1lXsWVP6ZqiTz3xBDqGnytEyhkxgxSqAfzrirqAxlikSof7ua9RvLu7gzHKBLbjrHL8y4/pWBewaZeT+VG62cjn5Y5uY/wAHHT8fzqSjiFSeaTO0E9z3qO4R4ZQGiyD1I7Vr6jaXVlO1vLD9luU/vD5T7g+nvVZ7y7h+WVeTwAQCpoGJaKkhnhAPI3ceopkMu1hj7h4YkdKm065ebVoleHrxn8KpvHKs8hV9gPQEYFAGi2nuT5qAuoPVecVBMqW3Mu7J/hxnP4Vmub6ReGZQOPlPWp4GlAzJ82O560AKlzKGxDEXz0UjH41XlO9twUqc87TVyR0hJZ5WKkZEYIohngnOUgKjoCaAK9uHGcFt/oelWYMsWLrgDrxmrDWkjEFSC3v8tNkwuUeLb2JpFIkc28kQAGcd+lS28TRj5Dn2NU47OZuYnB/2fWpPMlil2kbD3xQMu/vYyHHH0q7ZKZCd2TVSA+coDbfetWzHlEcZFVFEGvYwgsMiu40SAjGCa5HT03SBl4+td1pMJAHGfpWqJZ0dvu281fjNUYOlXE6VRDJG5qCQVYprLQmSzLuIPMrNnsBXQOAKzbqYKK1TEc3c2gjJrPlAFa92+/NY0/3qllIpyrVKSOr7c1VkXrUDKvNKKk280YxQgG0xlp+RTS1AEZFFKetAoGJtp4WgVKKBleSLNVjHtNaRHFVZVpAQoea1bKTpWOTg1btp8EVJR11tL8gp0/IrMtJ+BV5pMrSktARm3KDJqg8gWr90/WsWeQ5NeFjo2Pey6HOxJ5+Disi5cscVadutRGInmvMw1F1Ktj6GTjRp3K8Kc81uWRxiskRlTWhaZzX29GlGlSPjMdUdaZtiT93WdeyHBxV2PlapXcfWuKpioxM6FCxhXLEk1RxV64GGqsoryq1d1GfW4CCjHQaseanSHNLFFk1p21p3IrG53ynyrUjtrQdxWxbRKuKgEYQU03HlnisJXexzt+0NgSBRVWe5AHWs9744qjLcNITV0qUnuePioqLJ7u53cA1nbctk9afgk81KsdehCHKjzmya1bFasL8CstIyKuQk10RMpI1Eapl5qnE35VcjNaoxaJAMUVKoyKKsk8l2g9qQwe1LGwzVpGGK85JM+oeOcSGBdprYtjxWeu3NXLdgOlcuIp22PPxOI9qzQ42VVm6GplORUMozXJynIZFzwTisyRjk1sXC8ms2WEZrqoJrY9DDwhJakMJ+bmtm3RWUVkquDV23m29DXTWXNGxz4jDyjK62L01spXpWPdWeM8VupcKRg1DOoccVnhcLK92Y/W3S2OVkiKUxVMjAKpJ9B3rSuouelaGmWawqrwsPPIy855WBf/iq7nRt1O2GYJx1WpFp2gIxD3z4I58kHBA9WP8ACKtal4ljs4jp+jKtvEOHmiG0t7A9fxqjqdy7Qm1tcrbbskn70p9WP9KxTG27kVm6sbWiVGn7R88maUF0T1NXI7nGOay47c5+Vg1WRBMo5U1yT5ehqq1tDo7G+wRzXR2l3u7159C8iHiun0pbq4xsiYj17fnWtCq07HBimpanXwyKe9X4d0uFRdxPpWJFPaWg/f3AkkH/ACzjP9a07bV/3J2bYlPAx1r0KdVM8mSLN1aKI8TSiNB2HU1hT6jBZnbaQLv/AOesg3GtWQPdrsiVnY9hVdtCtbIGfWLgoeot4uXb/CtzEy47i71CXB865c/wjmtyHS0tgH1O5WD0t4eXP1Paq/8AbQtIzBZW62oP8I5bHqzf0qsoaZt7MWZuTmn6COmi1XEPkWMIto+5X7x+poSFnbcSWJ7nk1StIHCbjwo7mrX20RfLEN3+0aGBqW7m3GB3qU+bNyOB3PpVa1U+X9pvj5cfbPVqlN4J/lQBYl6AUrgBVeg+b/aNRuox0qVnGMZqCRs0DIWULye9V3I4qd+RzVSU4pFEcrgCsHVdSWCNstjFW7678qMtmvO9bvp55XyQF6AVLLijP13Wftt1gMQg4A9az7WRlmVjng5FRSWLlTM52k9M96baSkT7c5xzj1osXzI6Wx1l7ZW+fgNurttN1jzPL+YHPXmvJ5JS5GeMkjFW7bWLiyuDFn5ARg57VUSZWPdIplcDmrCgH3rhNH8RxT7lZwGXg11VpfLNGpU9aZBtLGNoNO8tWGDVaO4O2plfuDzQIkWHjA6jpQssyluT9DTllUt9asMFkTcOp60ARC5eP73I9KsRXinkcVGsYYYbkfyoNrsGV5FGoFxmSdcN97+9UYV4eMBkP8J5FV1VlPFWEn4w3IpEiCFJPmiba390/wBKZNGVkyVwTzTnHOQcjsanWb9yFcblz360AZ74xyKozojZBOPZhW1LarMN1u271U/eFZM0JyQc5HapEctrFhKYW8pMDH3h2rjptNTb88Ocf3fl/H2/CvUHhKHKkg+1Z09jBOx+0W/X+KLg/l0qhnnaXn2GD7NcRfbbE8CGX+D/AHD/AAn6fjmopNFhkgN1pk7XEOMvE3Dp7H/GuxuPCqszfZis6t1HRvxU/wBKpwaFNY3aTRho2Tp2YUDucdZW7R3W4243AMc9e1Uhpd0ZyViK55BViDXotzY2TxN5IxeMOYV4Df7vv7Vyl3ezRs0MIUHptPJzUjRkqlzbHYVZyf4m+aq93bqpc3DMqnsCMU15tWkl8pnC8/dEeKqzxTMwDOGU9mbmhjGrbWiNuihOPXdkfyqZYlUlipC4yMDpTY5JbcFFVtv+0Ki+0SSzYJYAHp6VIyR5rkRbkdip98ioPOcrtPK9far5zBG5QYD8YHSqZmVJcSodo6kdRTEEUxjbuAa0Duk2iceYhHDfxD8aiSEfeXa6HoRT43POw59iKYy7DbGPoQ6+uK17Tt8uPasy2u2DKGQVuWYE0g28exq4iZ0ekQhmGRXa2EPlqK5vSrMjaQa6213BQGFaCLsdW4zVRKspQiC0KDzTVp9MkrTLxWVdQnFbbjiqk0IIq4sTOTuxtFYszcmutvrVcGubu4QpNNgihTZFGKCeaD0rMoqyAKagZqnmqk8nNADieKQmkU040DsR7qPMxSPVd2xSAtiSplkFZgkNWI3oAubqikOaaDxTHNAFeWmxyYNJKeahDc0rlnQWU3TmtpGzHXL2UhBFdBDJ+7pgV7ysWdua2btqxJ+WrgxdHnR6+X1+RkKLuariwDHSmQRd6uhgq1y4TCum+Zm2Px3P7sSg8IBqxBGAc1FPKMmiGXmtsXi3blRyUqHu8zNENiq1wc5p6tmmSDivMk21cqKszGul56VDHFu7Vdmj3NxU9tbgdaUdj6DCVVGAlrbeorRUCMUIoUVFI1aRg5GVfEXY2eXg4rPYk1cKlqYIM1008N3OWeM5FZFTaSaVYTV4QU9YMdq6lSSPMnXc9yqkNTJDVpYakWKrUTLmK6xY7VMsVTiOnBfar5SOYbGMVaiOKiC09eKZLLqHiioVaimSeZzadcQMcpVdvNi/hNeuXOhq3VQfwrLuPD6EfcH5VzrBcuzOmWO5t0eYG72nmp4dQXPWup1Hw1FIp+TH4Vxeo6FPZyMYnOKieFY4V4yZv296pA5q55wYVwSXlxanDg1pW+tjAya5/qzRbs9jfuIw1Zs0RB4oTUVfvUglDc1tTppExqSg9Cg2R2pEdgautGr1C1vjpVuKO2ni76SHxXGOtWPP+Ws4gqeaUSdqTlKK0HPD0qmpeihW8lEYOD1z2A9aufJ5Qtrf/Ur1/wBtvWqYcwwLEOGkGXI9OwqeDisMRVfLY4pUlF6Cy2YYVnyWXPSt1PmFBt/N4Aya8lTlF2No1WjEitSD0rStbGeeQJCpJrUh0pIFE104Vey9zS3Go4j8m1TyE9R1NdNre9N/IiVRy2GG00/T+b4LPP8A884+31qK51j7QvlRO1tH08tRxVNxvOT19anstEudTmxEAsY+9K3CqPc041p1HyxRPLFK8mRRDdIAlwpPYGu107RJYbOC41O4W2t8ZAJ+ZvoKwRquleGlZNOjF7qA4+2Sj5U/3B/WsefVbnUWWa5neaYE5JNd1Bwpbav8DOVKU/Q9Uj1aBI/J01FhXGDITljWfPbgnz3fc56Z/nXMaNJMw8w5CDHHrXZRW5lh3TttJ7d69OPvK5wyjyuxiLZiWf75Ziew61sQW1vZ/f8Ank/ujtSuqRrsgUL6t/EaWG1eQhUXLGqIFaSWY7e3ZRV6O0hsAs1380p5WH/Gpkjh0uM4AluzwCOQtRBfIbzrj95cHna3OPrSEJKs05FzduVj/gUdT9KjDH7+PLhHQDvU23I8+4JOei+v/wBanRRNM29+nYdgKgsdDl1Lt8qD1ocgnjpUNxPuYLHxGvQf1piuep6U7gSy9BVCcYU1aMmahk5BouBzGrg+Wx7Vw19KFlyVJfoB6/4V6NqcRMTADJNcZNbIkxLjcc/lUPctPQ52eE7t0zB5CuMKfu57VnmxYXBcttAHAHat25h3BXTKID09TVRYPm2srNk5LNVElF4g00JHPy9TVWaFRKS3HHNW2LtcALzt7VY+xsQwcjOeuO1ACaVL5cr7mIJI59RXXafrTJdLCz4J+6PyrlkgQRoqknqBnqDU1sji9DE5B457U7getWl95sS+takEm4CuQ0eVjAinrXRwOy07ks1kWrEZOcetUYpOKsxzDii4iwGqzDcbTgjK9xVMyDdT8gGi4WLc0ezDpzE3Q00IJfu8N6etRRXSodkuTG3X296bLlZdgOD2PrUgTrJs4PSkflQoI3HkehqON/tfyP8ALOOh9frTWyD5DDkdDQALIySdSrCrRaK6G2YbX6Bh3qn5gYbH7dG7imOWhODyP507hYkuLJojz09apvEo61dhvSF2v80Xv1H/ANaoLqzPLwNuXrt60gM2byl6darvfOWCzoJk6c9fwNTTR1WfG3B/OhMDMu9NW5lzaXHlzZyqynaQfZq53V9EkvDI5gaDUolzOCOJV/vCuluwDEQenY1zk/iG/sJFNtNGyxtnyZhuRh9D0+oqtBo5DUHlhjy7AsBwwrNW6Fyo3qCw7sK7PU9P0XXbWC/sp/sMlwSGjlG6NX7ruHKj6iuXu/DF/YynzoiFB/1i/MrD2YcVNijMe7UEwOGGejA5AqEwTwncGJz3PcVoizUy7Oc+hFSx2/IjGaQFeE5UxO+PTPahol24Y5J4q2LdJoiGGCPzqB0AwCQR60rjGQR+RJxna1aNraASZzj29arqA3yselXVK4AZWOOhFUgLdvbLIflIyOxrd0yzbcAecVnWhRl+Xr79a63Rog2Bjn1rSJLOh0mAqo5rfiHFULSHaorQSmBYSrCVWWrCVRDLKVIKhQ1MKBMCKjkTIqbFJihMRi30DbSRXJ30T7zmvQJ4Qy4rmtVswuSBWt7ok4xxtajNS3S7ZDxVctgVBRDNzWfLwauzydazpJPmpFDkapd9QJT6QCMarSVYaq8gpMZF1NTxnFRDrUg6UwLAkpryjFVnlxVZ56kollbmmKarmbJpytQBs2XUVtxthawrE4xWp5ny0AFy+azHGWq65zmqsnFJo0jJrYliHFMuJNoojkwKim+avNxWLVPQ9PCYF1PekZss7GTFWrbJNQtb5atCzg5rg51P3mdlePs1youQoSKfJCcGrcMPFSOoqPaRRyxhJmMYMGlB21anAGaosCTVx9/Y6oycFqT+Zml8vNJEnSrCrxXoUKfKjkr177EYip4jqUCnBa6TjbuRBKeI6lC0oWqIGCOlC4qULTtlAEQFKBU3l0COlcCMCnAVIEp4jpisRAUVNsoqbhY7OSFT2qlPbpirE1xjvWXc3nvXczgVyhfW6EHiuT1OxVs8V0dzeZ71iXk4OazZrG5weqaYuT8tcldwGFjjivQ9SKkGuN1EDJrmnodVORjJeSxHrV+DWSMBjWbJHzUBXmpsmaSkzrrfVFk7ir63KsOtcJHM8R4NadtqTDGTUuNioSR0zYaoHQryKqw3wYdalM4apRtGryk0dxzyelXI7ketZQRp5AsQJY9q1Y7a30qITag26XtCKh4dS1HKojYsIZZxuPyx92bgVpi9tbQbbch37sa4K88S3Nwdn+rgHSNaSDWeeTWU8Lb4ER8W52U0zTtuZtxqt5LyyBUUsx6ADOai0iObUVMq4WBPvSt0FdDJqtloMe21UPekD5pB933/APrVhHBO95g5paREi0W206Dz9Yfa5GYrZT87f7390VWv9QnvIRAoWG3XpDHwPx9azmvnvJ2mmcvK5yWPetJLT915s7CGP1bqfoKyrU5/DTVkCjbWW5zl1aE9Kn07R5hMr3T/AGeM8jd1P0XrWpPfQW5/0SL5h/y1k5P4Cs/bNcFpXcmSTqzHoveu7C4dJXeonVlsblnq2W8qxQLBHwZm6n39q2IdYDfIrcdyeprgX1AKPs9txGOp7sa2tBsbrULhQnT1PYV3xk72InSi48x31kPtJAUbia1UPkDZb8ysMM39BWStzBY4sLQ75sYml9fYVrR5giDf8tW/StDgasIwW256zf8AoP8A9eo1jAXzpunYd2qRIwQZX+6P1NQSsZpM49gB2qSQXddTEt09P6VNNJhdi9KD+5jCjqev0qtISTUssTGSKQ9cUqjCE0g61ICjv6U0pTlp4waYFK4gDqRjrXI65prYAQHJPJruilVLixWbqKATPMLuCWMBdhaq0ytEWc9SOBius1rT2iJ2ZHHaudljbG0+lAzIUrEqkoobvj0q2j79zKVJ6UiKksg3euKnjgwSeBg4IzU3GVoovMYlsfIR+Rq3ZQxG6VTu4JGfSnyRZiYL8ynnj0rX0rTFnhB5JPVvWqA3NJt9pyOc10SRd8VT061MMYWtiOLApkkAUipFBFWBHR5dIZViMrb/ADQBhjtweoq7Ed8e3uKZspVyrAjtU3AY/XBq3EPtMHlf8tEGY/celRyqNwYDAYZpqEowI7c5ouBKn7wZ/wCWo6e9WVIvF2scXC9D61BMNwWdeM9cdjTGYyL5yH94vJx/OmmISYEHJ4YcEe9Q/a1UbHG5P5Ve+XULZiBi4Uc+9Yroc80noJEszNCfNiO9D0YD9D70QXz8LztznaOo9xTIS0RIHKt95T0NSvaKR50H3R1HdaEMLiIz9CqyNyrdFlH9D7VmPGQCHUqQcYNbEIVl8qXoefofWmTxCXMNzxLj5Zf5ZqyTlrsMdwU/Q1xWqQzGcgL068da9GubNoZGSQYYVz+o26LIXI7dKCkclpCeVNLpkwzBej92T/BL/Cfz4qGHUrvSZniglOxuHgl5U/UVfuPL89Qg2leQfSofEyxTSxXqLjz4wxx2bv8ArRcYCLTNWjIyNOvOseT+5Y+nqv8AKqFzb3GmXULXEBXeeJOqP7hhwazZpJtq7fvDjcKu2WqXFpD5OEmtid0kEo3IT6j+6fcUKwDXuIWndBwc8VXmVeecOeSPWtOTSbTVmE+kFo7gLmW0lb5s/wCwf4hWdJZzgEMrZHBzwwqbDK8ZZZBnOPWtGzlYnpxTbW33sMg9K2bTSyG+ZfyqkNlvTrUSsDjn2rttHsyFU4PHesXS9PKkFeldnYptUdjjmtFoQzQhUhRU61ChxUymgTJlqZOlQLUyGrJLKVOtVUNWENBJLRQO9FIAYVn3tp5ynitDtSEU07AcFqOksrE7TXP3EBQnivTby3EiniuN1Wz8sk4rTcSOVmU4NZ8qc5rXnXmqEy1myyolTjkVBjBqZDQAbabIlSFhTGbikwKzLikzxTpGqrJLikMZPJVCSXmluJuaqGXmkNFlWqxGeapI1WI25ouM3bI9K1B0rEtHxitRJuKALG3IqrOMVYV6hmIIqZ7GlH4kUlbmrCRtIcYzTIYTLKFUV0VjpnAJFfP1MPKrUuz6iGIhSpmbFpzMMkVbisvKrcFqsa9KqXBVa1lh1GNkccsR7R3ZX+6Kqyy06SXmqzksa5Y4aTY1XjEZId1RqmTUojNPWKvToUVFHLWr8zEUYqQCnrFUgjrqOVjAtPEdSBaeBTJGLHTxHUgFPApiGrHTvLpwFOCk1QmM20banWEntUiWx9KVhXKwSpFhJ7Vfjsye1WY7H2qlEXMZYgOOlFbq2Ix0op8hPOY1xee9ZFxeZJ5qvNc5qpLL71tcwSCe4PPNY93c4zzU1zNx1rntQusZ5qJSsjSMblTUL3g1zlw5ds1auHMjk9qqPXK5XZ0RjZFVxmq7rzVpvpUD00DRXIwKZkjmpWqMKWYKoJ+lWtSB0dyyVu6TBcagd2dkS9ZGqG00qC2iF1qTbF6iLu1VNS1ua6AhgHkWy8BV7/WrsieZnUSa1a2EZg08B5OjTH+lZMly07FpWLse5Nc4lwymr1pM88yxRKzuxwFUZJo5WXCaRalQsflGTWpa6JBpkEWoa27qrn9zZx/fm+v90Vr2dg+i24mW2+1aoR91h+7tx6ljxu9qaIrbRpG1LxBdGfUZF3W8MZ3Fc/xZ6fStIq25FSpzbEt/r11Y4iaBVvnUGG2h+5aKemR3f+VUYrO6EP23VblbVGOf3nzSN9F61mv4ieNSmn26W2TlpfvSMfdjWe80s7F5XZ3PUsck1M2maUlJI6pPE9par5NjbkN/z8TYLH6DoKadbec5d2Y+pNcXMzKeKfbTuzKq5JJ4FZOnzrU3VRRdmdta3D3U6xJzk9fSrF/e8fZ4RwOGb1rCN0LC1+zq3+kv98j+EelO05bnULtLe3RpJXPAFCVtEbw5Jas6HRtLN1OEXBY8sT91R6mumuNZt9K04W9kzZcEBuhb1Y+3p+dY4vbayt5LS2YGztsG8uM83D9kX2zWbpYk1fUZtTviRaxHc57eyiqbeyJcYyfM9judAMsUAu5Vyz8RKe/vXY2ULzLulb3JNef6ZrX229DYGTwsa9APQV6LZNvjEQ5x94+9bQWh51d6iTfMcAYUdBSRxYJY9qtSw4qu52jbUtGBA4ySaj8upu1AqGWQstNEf51OaTFICDZS4qbbS7RSAjFPwCKdtFAFAGbfWAmUtj6VzM+gkJKT0NdwetMkiWRduKBXPNl0cRyqNmcGoX00eZINvysMmvRWsE6qOaoy6SOoH6UWHc4qKxPlsq98V1GjWflQ4x71bg0kKeVFakFr5YGBTBskhTAq2gpFUAU8YqbgKBSEUZpCaGwDAoOBTSfemHpUXCxOW3RAelQtJg0inkenSmlaTYye3myTE33X4qMloJueoPOahPBqefMsSS9f4W+vaqWwCM5hYTwnaM9B29qnu1SaIXUI4P8ArFHY1UifaSGGVPBFWIGNtMAeYnGD6EetVERVVlNSROY+VOKddWohlyv3D0NRD0o2AmlAkXfGMY+8vp9Kasy3EYgf/W/8s29faohuQ7gSDSywiYedDww5ZR29xV3ArmdeYLgfJ2bHzJ/9asnWLLYMHDAjKsOjfStq6i+0RibHz9Gx61nGUxKYZl8yBuCp7e49DQB57NCsplI++nIpJP8ASdAyyAPHKRg+4rU1rRns5t8LeZbyHKyDv7H0NZ8m6LSTxy0owPpQM5yKAvMwZeOwpzWLfw8Mf1rWih80/dwasrYu2ML+VIDBg80EbhyPwIro4JodRhFte/LMRlbkDJ+jev16/WopbBmYZXr1rTsrDopH04poGZa6JPazlXGfRhyrD1FdPpNiGGGHIrR022xEIJl3p2z2rXi01YeUOV9aqwrjbawWLBWtGOPFJGuBUyigY9amUVGtTLVIQ9akWmCnrVkEymrEZqstTJTAsDpUgqJakqWSLSGimmkAyRcisDVbEyqSBXRHkVXnjDDpVxYmeZXliyMeKypocV6FqFgGBOK5S+sypPFW0CZzUkdQcitGZMGqT9ayaLEzSGlHSo3pMZXnbArNml61euDwayp+ppAVJpM1W3nNSS1CBk1JZYSWrEUvNVQOKniBzQJmzatWrGcisW1bGK2IGBqkBOGIpM7jinmmxD95Uy2Lg9TZ0y0XIOOa6aKNI46wbFgoFX5Lg7cCuGWh3Rk5bj7q5AziseVzI3Wp33OaQQ1mot7jlKy0KwjNAiq4IfaniA+laqJhzFMRVIIqtrbMe1SrZse1apE8xR8vFLt9q0009j2qdNMP92rUWLnRjCNvSplgY1vR6X6irMem+1WoMl1Uc+lqxqwli1dClgB2qdbMDtVchLqHPJp5ParKafW6tuPSpBCKfKiPaMxksParKWI44rSCCnhRTsTzMpJagdqmEAqfFLTFch8oUVNRRcR4w0uahllwOtMMnFVJ5eDSbsbJFa8n4PNc5eSFmPpWneT9ax5zk1yzlc6acbFGQcmqzirT1XcVmasqvxmoG61adalttOe4+diEiHVjVozkUre1mupNkKbjWp5Vvo65VDPeY44yFplxfpbxeRYjb6ydzWabu56ec/51pdGDTHXEeo6hMZXikY/TAFRDSpf+W00EY/6aOKbJPPIMNM5HoTVcg1asLU1LXT9JMqxS3U9zM5wsVtF1Ppk1vXN/p3hKNU02yUaz/FLM3meQPT03fhWBpWpppUVxKkAa8cbYZSf9V6ke9VLa0n1TUUhVt0szfeY/mSatPoiLdzbt9Vvr6KTU9Znkns4T8kRO1ZZeygDj3NYl1fzX13JcTtudzk+3sKsaxdrI0VlbsTZ2o2xDsx/if6sf6VlAGk30HHQuxsPWrKsMVmqxFSLOQak3hNIszDdVm3VdOg+0Sj/SH/1K+nvTbKNXzPMcQpyff2qteXDXNwXbp0A9BVJ9CJu7uTw755c8szH8zXbxxf2HZx2Nru/tm8GJsdYVPYe571k+HrddI0//AISC6RWwSlnC38cv94j+6tT2888FrPq0zlrq5JWJj1yfvNQ9DSDuhbsG6ubbR7EZjjbBYf8ALVz1Y/0qXVtThtYk0qwlzbQn94w/jfuadEP7D0c3bj/Tb1SIPVF7t+NYmlWR1LUo4GO2PO6Vj/Co5J/Kjd2NJM9E8E2Rggiv3+/JxAD+rV6zpkAitx6mvH9F1qK41P8AcJ5dugCQoT91R0r1nSp/MgGT2rpVraHnVL31L8y5FZs33jWlK3y1myj5qxkJEYHFOFMzTgagYYo20+jmpYDMUUpFJSAKQt6Up71Gc80ihwp1R0uaAH0daaDSigBwAqQU1acBQJC5ozRSgVBQlJTsUfyoAbijHFLS0rCuMFK45pTQ1OwXIGFSwfMrRdmH69qQrTo/3cgPoaEMh24NWE2tGEJ57Uk4xKxHQ8imCmhFuIiWFoH4YdKqOpVsGp1Y/LMPvL1p88ayfMoxkZx7VoBSYcUR7kbK8VMY+KaFxSAcIkbLLwrDDL6e9Z9zbZYjHzD9a0UJU5FPuIhJGsyfQ+1Mk5lYynmxTIHtnP7yNv5j0NZup6D9yW3+e2Awjf4+9dTJbCTJ/OnQQhMxON0D9V/rQO5w8OmASAlefSr8VhtbIFdJNpSxS7hgqeQcdRQtmB2oGZQ01H6r1p0Wm+VL935a20gGOlSGNQKAKsVuI8VZSQoeP/10w0lFxljK9V/L0pytUK09adxWLANSLUS1KpqkBKvWpV+7UamnrzVogkBqVDUQqRDTuBZQ1LUK1KKTJYpphp1NNMBO9BFKKcaAKc8AYVzWq2Q5wK6xuQaz7q08wGtIsk80vbRgTxWRLHg13mqWG0HiuQvLdlJ4qZItMzulRPSyZBqB5cVmyiGfoax7hhk1fuZxg1i3M/JpMoZIaYlQNNzTkmwakZeHap41qrG4PNWo2oAvQg1pQHFZsLVeiNMRoZ4p0AJeoUfOK1NPg8xhSauCdjSs4mKjitNLJ5B0q9pun8Dit2KxUDpUexL9u0c4mmMe1WE0r2roxaKO1SLbr6VSpoj2zZgLpPtUq6UPSt8Qil2Cq5ELnZippg/u1Omnr6Vp7BS4p2RPMyglko7VOtso7VZxRTFch8lR2pQgFSmkqgECj0oxTqMUANxQBThS0ANxQBTqKkLjcUYpaKoYmKKWipsB4NK2BWVdTVeuG4NY1ySTxXPUkdkIlWY7qpSLk1eK5qJ4qxNtjOaM81GISxAUZJrVWyLDc3yr60rSpANsA5/vGnbuLmKQtIrZfNuCC3/POqV5dPcHA+WMdFFWpQzkliSTVcxc0w5e5nGP2qMx81omD2ppg9qZmzOaKmeVWkYPam+RVXIsZxirVt4lsdGluc/6Rc5ijHovc/0psNm086RKPmdgo/GrWrov2zyITuhtwIlI746n881onZXE0c8Y6Qx1o+R7U0wH0qLj5TP2UsUDSyBVHWrxtz6VOsX2eMsP9Yf0FVcTRTuZcRrbp91P1NWdD0s6nqKq/wAttGN88nZVHWofs5Y9ya6DUIn0HRV0pDtursCW8x1C/wAKf1P4VadtSJdipe3z+Idchgtk8uDiGCLsq/55NaVoqarrPkebtsLReW7bV7/if51j6fbva2N3fAENt8iI/wC03X9M1enUaT4djgH/AB93vzyj0TsPxpvuVGVtBNU1M6hfvN0TpGvZVHQVeSRdL8ON8v8ApN/wD3WIH+prn9Oge81CC3H8bgfhWlrV8l7q7CAYtogIoh/sjiojojfnT0NbwwcXaYyTXuGhhmgUn0ryPwfHbeapON1ez6WyCBRHjpXVHSJxVpJyLrjiqUoq854qlLWUiUVGNIJKV6gY8VmxllW5p+aqK2DUokzSAsZoqMGnZ4oYDsU0x07NGaAIytJt5qbijFRYdyILzTsVIAKXAphcaBTqM0FqBgaM0zzKPMqRWH5opM0ZFUMXrRSg8UtKwCYoYcCnClNMCMUuKcQPxoxQK46RcxxN3xio/Lqx1h+hplAxijBqxGMxFB/D8w/rUdPjba1UiRhX9aTZU0g2sQOnUVFQAzbUsPyk5GQeCKbRQUEkIU5HKmo9tWAw6N90/pTGUqcGgkWPDL5Ln5T0PoaieLYxBpScU9pfPiz/ABoPzFBRCxAFVmkpXkqA8mkMkBqRahGakTikgJwKcBimrUy1aAVakUUgFPAqhD1FSrUYpwqiSUVIpqCpFNUIsxmph0qBKlFMkkFFMzS0gHU2nUw96AFNMaloK0IDKvoA6niuT1CwHPFd1LBkVkXtlkHitNyTzq6sQCeKwL6PZmu71GzMe7iuL1dMZFZyRaZyV5cEGsmWfJrQvkO7NY8qmoZdw8zJqWPJNQopq7AmTUgixBuq/EtMhhAFWlwBSbKJ4hiraNVIN6VftIGlI4oTCxatUZ2Fdlo1mcrkVmaZpvQ4rs9LswoHFaxRnJmxZQhYxxWko4qvEuBVgGmzMXFFFGKChaKPWipGBpKKBQAuKMUtFUAmOaMUtFAxKKWkoFYKKPWigAooooADSUtFACUUGigD58uOlZUi5Nas/NVDFk1wz3PShoU/Kp5RY+2TU/lYpDHxSQMpybn61XMPNaBi9qTyabQrmW0FMMFa3kU0wUJBzGUYPamG39q1/s9L9n9qoi5j/Z/am/Zq2fs9H2fnpQhFTTLPbJNcn/l3iLA/7XQfqaoG256V0kcG2xlH99lH5Zqt9m9qpkpmJ9l9qT7L7VufZfaj7L7UuUdzC+y4OcUjWhZstzW/9k9qBZ5PSgLlTQ9Li86W9uAPItRvI/vN2H51nXMUt3dS3Ex3O53EmuvvYFgsoLFBgj97KfVj0FZ6WQZgPU1T7EeZWttMFw1lYv8ALCuZ5T+p/QVn6lCb/UJZsfLnEY9FHAFdjcW6xQTyr1kxCv8AujrWULbHarYkUdG05LOC8v2+/GmyL/ebj+Wazf7NHO0V19xa+VpttF3ctKf5D+tVo7EE0N3Aq+HLOe3u89q9e0cy+WufSuC0+1aOQYrvtEilKjNbw2MJ7m5/BVWXvVtk2iqsgqJAiowqFhU7moWrNlIjpFbmnMKZUjJxJUgeqm+pFkoAs5pQaiDU4NQSSZpd1RE0oNIokDU7fUQpc0ASFqid6KTFADMn8aATmnYpMVNgJF+7SlwKjzTe9AEwenBuagB5p2eOtArFndgUx7jA4quxJpG6Ci47E6z1MJBiqCgiplzQmJo0FYeW1QmXmmxjKt9Kbtqhkm/Ipd1NA5pTSuBYLeZCrd14qI4606AgkoejDFIe4qhIM80VHmkaQCgZKzYXNMSYP+7J+hqtJP8AujVQyVNwsXJZu3cHFRRzmOUMO1Rs3nR7/wCIfe9x60zB5pjLNxH5c3ynKsNwPtTBT0/e2pHeM5H0pgpgPWngUxalFMB61MtRLUq0CY9RUgpgp4q0SSCnCmCn81QDqVTUdPU81QizHU61WRqlVqYicUhNIDRSELmkpaUUAIBT6KKAGkVDLCGFWKaelCYHLaxaDy2wK861m2OW4r2C8tBNGRXE61peM8VpuidjyG/hwTWLJDk13GqaZljxXPTWRjPSsWjRMy4rcH61digC1IqhKY83YVmykTeaFFNWZpDgVCqPOeOlbNhphOCRSsVcWytmcjNdRp1iBjimWVgFxxXQ2cGMcVSRLZfsLQADiujtItorPs4ula8IwK2RkyylTDpUS9BUgoEPFLSClpFBS4pKKBhSiigUkAdKKM0UwQUUtFSMKSloqgG0U7FJSuITFFLRimAhopTSUAIaKDRUgeAumTURjrQaA+lNMB9K5eU7+YoGKm+VV8wH0pPI9qOUXMUDFSeXV7yD6UfZz6U+UnmKPlUeTmrwtzTvs59KfKTcoeTR5NaAtz6U77OfSnYLmb5NL5NaP2U+lOFsfSlYLlLyf9FA/wBo/wAhUfkVq/Zj5WMd6QWp9KuxPMjM8mjyPatYWZPalFifSiwcxleR7VasbZTPvlHyRje34dq0BYt/dNWBZeXZ7QPmkOT/ALoquUlyMKaMzTO7dWOaWC3zMgx3rYGnk/w1ZttNImUlehoURcyMi/h/fbB0TiqotjnpXRNpxZiSMkmnx6WSw+WnysXMjGu4C023HCKqj8qhW3YHgV076WZJWbb1NOXSf9mhQYc6M7TIGaQZFd/plvtgBx2rEsNLKyD5a6y3i8uED2reKsjKUrsgnGKz5a0pqoSjms5DRTYVGVqV6jNZM0I2xUZWpDim0gISKTPNSsKjK1FhkqHNSCol4FODVQiYUtRhqeDQA4UtIDQTQA6img0uaACmmlJpcCgBuKTFPxS4qbAM29aXFDHBpRRYVxNtKV5p4FAosMYFqVVxQKC1FgJF4B+lNLc0wNwaTNMSJC9IDk0yk3BTQFidW2kH0pbmUI1UZLj0pJZTJHGx7jafqOlK4WHSz4PHQ1AZ896jbLLz1Hao6ksm3ZU0goQfKacooAfETHJuH5VLJHjBH3T0qNRU8WCCh79PrVIQtr8swz0PBoePZIVPY0gqVm8xt3eqAjC1MKaOKkAFMBVqVRUQNTKaoQ4U4UgpwHpTRI4EinB6jopgSZzTl5NRipEpoCeMVOOKhRqkzVEskBpwNRinCmIeDmnCo804GkBJSimU4UALSGlpDSAjcZFYmqWolU8VuGs++OIzVxEzzzUrBQTxXI6lZgZIFdprVz5bGuUuGac4xSkOJyNxG27Ap9rprzMMg10sWj+cwJWtyx0QL/DWXKaXMGx0cgD5a3rbTio+7XQ22lcD5a04dKHHFWoEcxhW9keOK1rWzPpWtFpwH8NXI7MKOlPlFzFe2t8DpV9I+KckWBUoFMkQClFOoFAAKWilpAFLSUCgtC0UtJQAClpBSmlcAop2KKQAKKKaaAHU2iigB1NopKoANFFFADcUUtFAHk7WNMNj7V1TWPtTDY+1LkL5zlzY+1J9hPpXUfYfaj7B7UuQPaHMfYPal+we1dSLD2pRYe1PlJ5zlRYe1O/s4+ldWNPHpS/YB6Ucguc5Yaf7U4ab7V1QsB6U8WI9KfILnOUGne1PGme1dWLEelOFiOOKfIg5zll03jpT10z2rqRZD0p62i+lHKLnOYXS/apF0r2rpltB6VItsPSnyoXMc4mlc9KmbTMnp04roRABT/JFOyDmZzg0oelTR6cB2rd8kUoiFIm7MQaaM9KlTT1HatfYKXbVC1M0WK+lPFivpWgFoIqRleGBVPSrZ6UxRzT2+7QUipNWfKavzVSkFRItFGSoWOKsuM1AwrE0IicUmaUr+VAwKADFBWgtSZpAJTSaU1GaljH+ZTlk4qA8YpCx5ouFi15wpGlqpk0biaLisXElp2/mqiNinebzRcLFwGnbhVTzaUzUXCxbDChnAFUxN+VRyS5FFwsTefzU6S5rLDHNWUl2imM0Q3FG6qiz0omoFYsmSmF6rmamGTmlcLFvzcKaRZc1WZv3YH40zcelFx2LMs4FQNMWNQvmhakB5JqdRusn9UYH86rirVuMxyL6x5/KmgI8ZOfUU3ZUyLxj8aXZ81FgGKnFPVak2808LVAMAp6ikxg1KtMB0kYb5hxnrSBcVJH6HoeKaw2k0wALTgKRTUo5FNCG4pwHNKKdimACnA0gp1MkXNJmigimAtSLUQWplFAEimpVNRLzUqirESKafUYqRRTJF704Uop2KQCCn03FOFACikoozQAxqoXoypFX3rNvJMCmiTjtWsPNJrIh0b950rrJsO1SQ2wJ6VTVx3sZFppQGOK2bbTQv8NXobcDtV6OICjQlsrRWgHarSQgVKFpw6UXJsNVKeFpQKWobLExRijFLilcdhBQKXFFMQlLS4ooKEpRRg0CgA/GgUop1AwooptABRRRQAetFFOxUgNooNFCAQ0UGkqgFpKWk7VICUUUU7gZBhHNHkCrRFGytSCt5A9KBCKs4oxQIreT7U4Q+1T4oAqSbkPlil8qp8UYqrjIREKd5YqXFKBQBDsp20VJijFFxWGbaULT8UuMVIxu2lxS0tO4DcUuKXGadii4WGYoA60+jtUjGYoNPNFAxgpMZNLT0HNCEKsdNccVOBTHHFFxlCUVTk4q/KKpyCpZaKTDmoGWrT1A1ZsohK0wrUjU0j86kojximmnGm4pANpDTz1pMUhIhK0hGKmNMK0FERFNHWpGFNxUAITSZpSKQimAueKTfTRTsUANyQaCaSlApDEFLmngUbaYhAxBp/mcU3FLigBQaVeTSAVIgxzQAh6/SinKOadigCIrQq1LtpwWiwxoWrNmP3pHqrD9KjVantV/fpTQrjUHzZqUrjn8KaBzUrVQEYFLSGnr0oAMcUDiilHNADg1Pb5lz6VFinxnB56UxABTxQRg0DimA+nDtTRS0wHUoYUygUAP3CgGm4xTgcUwJFqQGoc04c0ySdSKmFQIOferKVSESKvFOApBT1FUSKtSCkFLUgLikxTs0UhjKbT6YRVCGP0rLvOa036VnTruamgZmrblj0q/Bb4FSww1aVBTIGpHUoWlAp1IVhuKdiilxQVYQU7FAFPpAMAp+KKKCgxTcU6igLDRQaKKAsIKWjvR/OgB1Np1NoAdRQKKkaA0006g0DG0CnU01Qgoop1ADTSU+m1ICUmKdSGgYlFO/OigClRT6bitDIbilxS0tADdv0pQKdQBQTYTFGKdTqdwsR4pafijFIoZilxTqKBWG4p1GKKRQCilFFAWEpfWlooCwhoFKaQUABpKDSc0APVc08LSIOlTAYpNghMVE/SpTUb9KSApzVSkHNXpjVCU0MpFWSomqV8VExrNlkRpjGldutQtnmpKEJoHNJtpelIApDS0e1ADetBWncCkJ4pDGEVGRTzz9KVqAITTSKl20m3mpsMiA5p2OKk200iiwiMDmlFOxQBQhiGheaUihRigQdzS0oHNKBzTsAKtS4wMUuMClFMBAKdikB5p9KwABTgBQKB1pgOAqaAfvk+tQg81NAf3yfWmAjDk0/8AhprffNHb8aBDRT1plPBoGLSiiigQ4e9LikpRTAl6qPUUmBSrjPsaRvlNVcBcUCmCTFO8ygB1FICKKAHilApgNPGaZJII6kVMUwZqRRTESqKlSolp4NUgJwacDiowcU5eaZLJRTxUY6U+gBwopuaM0rAONNNLmkNAMryZxVF2+ar0prMuGwaaAtxNmrIrNt5a0EbiqJJKUUgp1IAxS0CnUgCm06igBtOoooKCjFLRSGJRRRTAKM0UUAFNp3ajFAgooooGFBoFFSAUUYoxQA3NOoxRVAFFFFSAU00UUDCikJxRQBXNJin0VdzMZilp1NNABTqbTqAsFAooFABilxSUooAWikp1K4Ce1FFFMAoFAooAKWkpTQAGkooFK4CHrT0FMp6UwJAKdQKKRQhqKSpTUbUkSU5qz5a0ZaoygU2Uii/FQ81YkqA1kUMIpmKkNGKksiIwKbipcUmKAIsYpueaVqQCkAp6UwjNP7UoFIY0LxSYp9FADCPypNtPNIaAGYppWpBzSlaAIsYpxFOooAj20h4qTtSGgAFKPU0Cl7UAJnNPHSmCnigApVoxQKAJKYx5pQeKaaAFzzU9uczp9arVPbf69aSAex/eH60ucgVH3pw61QDh1p/8qZ3pRQA8U4UwVIKYBTgaSkoJuSinMNyg1AD+VPRucetAxMUtB706gBcCnACm0veqAkCgUoOKi3GlElMCcGpFNVg1SA0ybFkNTlOarq1SK3arEWB9akU1AtTLQIlFOpgp2aBC06mA0uaAHGmGlzSE0AVLlsCsi4ky1bFwuVNYdxGfNpMaLNt1rVhHFZdola8Q4qiWSAU7FFLSELTTThR3oKCiiigAopaSgBc0UCnjipGM5pKlpjCi4xpoooqhBRRQKADFFFFABRRRigBRRQKKQCUUppKYBTacTTaACg0lFACUUUUDIqKdSUzMTFFOpO9FwsJRinUGgBtKKKUUAHWjFFLSuAlLRQe1MBKDSmkoAKKWloAbQKcaSgAxSUtJSASnrxTO9PFMB4NOpmaM0h3Anio3p7VGwzTEV35qrKtXHHWq0vWkykUJI6hZTVxwAKrtis2UiDZQVqTNNJqBkZqNqec00igoiNAFSbaUrgUgITzTgOKdgZoNAyJqQU5qbSAcOaRqUcCmHk0APUcUhoBooASg9KQmmA5oAcTSGjvSmgBBT8U2nigBppRS4oAoAUGlpDSigBQKDTwKDQAwCpoR+8z7GmLUsY5J9qYEa/KacOtBHNKKAFpRSCloEOFOBxTVNONMB2aTNIKWgABzThSUUAPPQGgCkHIxTM80ATDilzUOfenA0ASUopoIp4YVVwHLTgtMBBp6mqJJVWnio92actMROhqVahWnrVATg0tRA076UEkgpajBpwNADqDSZprScUwIZ2+WsmQZlq/cScdapqMtUsZbtY60FHFVrdeKuUyB1AopRQUJRRS0AApMUooxSAMUUUuKQwFOFIBS5oGLTWozSGgBKSnZpKYBikopcUCEopaSgAooopgFKaSigBTSUZooAaaSn000AxDSUUlAgopKKAuGKMU6kpkiUUUGgBKDSmkoAMUUoooASnUlKaQCUd6KKLgApaQUUwA0ZpKKVgFNFFKKYBjmkxS0UgG4pwpDTWOM0wH5pDJUPmYprPmgCUy0wyiomaoi1A0SSS1WeSlY1A8gqWNDXaqzNUjMKiODWbLGUuKdxTCwqQQmKSjOaXbQA0Uj04jFMNBQgpDgCjkGmtSGNJyaWgCpKQEeKQDmnE0HgUANPFHagHJpGbmgBjdaFoao1bnFAEoFOFIKDQAUvQUoprGgBRS96h381IpyKAJBS+tMU05aAHijOKDxTCaAJBUy/daqytmpt4EZoQBmnCow3FOBpgOFLmmUoNADhQSaTNBNADgc0/OKgzTw1AEmaQuKjJo60xWJFbFOY85pg4pQeMUhjhQQaYKduIpgOGactNDCnA0ASAU9TioC+KUS1VxFkGplaqiyU8NTuSWg9ODmqoaniSncRaDU8ScVVVjTwasCfzPelDZqLFKKBWJDJxVO4uCoqyWGKrT7WFJjM97hmNWLYEmqzKPM4q9a9qYM0oRgVYFRR1KKDMWlpPalFBQUUU4DFSAAUUuaM0hiEUDrQQTSgYFMYGkNKaQ0gEoopaYkJSU6koAQ0YopaYCGkp2KKAG0tFFA7BSGl7UlAWCjNNNApiCkp9NNAMaaQ0tJmgQYooooAWkxS0UyRvailoxSuAlJinUlFwCiiii4BmgUd6WgBDS0UelMBKX86KTtQAUlLjNFJgApaBS0kAhopDSUwFNRuOKkprcimBSlbbUP2ipLleKzXbDYoAuGemGU1CvNSVBQxpCBVeSXmpnGaheOpY0QtNSeeKikWq7HGai5Vi55lJvGaomU0sb85NFx2NBOKduqmbgUCcnvRcZbNIagElO80UAONFM3flSFqQxc80jNgUmajY0gFDc0rPioxwKYzc0BYlDUwnmmA0pNACs3FMXnmomahG4oGWg1OzUKGlY8UCJd2aYxzUQkxRvoGx5qZKrhqlSQdKBMmNKpqNzSb6AJWkppPFQeaM04NxQBIDTt3FRqRTiKAHBqlVuKiA4oFAFgNQaiWng0AG6nBqYaaDjNAE/WkNNDUuaYCg04Gos4oEopBYn5pB1qLzgKb5/NMLE/IPtT+1QGUEUwSnpSAn704NUAJp+3NAWHluaep5qHBpy5pgWAakVqrgmnA00BYElPDVCDThVXEWFJp+cVXWSpN4qiCUNSlsVB5oFNaYYphYlkkxVSSbNMecetVmbJp3AnXlq07VelZkCktWzax8ChAy4nSpKRRxS4pki0UUUAKKXNJSVLGOpRSA0ZpDHUgozRQAUlOpKAEpaDSUxAf0ooooGJS0Ud6BBSGig0DCj+dFFAXEpKdSUAJRS0mKBBmmU/FM6jiqASkbpSeYDIyhgSvUDtSnp/WgBpooNFAh+KKKDTJEopaSkAUdqMUUDsFFLRQIQ0Cg0GgAooooAKKMUUAJRS0dqYAKWkozSADSGnUlADaQmnU00wK0/3axp+GrYmPymsO8kwaQ0TREVZAzWZBLk4rVh6UDEMeKhlWrZqBxUtAjMlXrVV461ZIs1Vli5rNotGY681C2avPFiq7x1BaK2fWnLNTXjOah5U1Fxl/fnHNKJOaoebipYpe5p3FYtmWkE3NVWmyaQycUXKsXDNQGBqj5mOaRZ8nrSuFi+xqPPNRedSBqLiLA5prcZpiyUM1MBhpAcUhNR5y3tQUWFkokkqAybagmmLcCi5JM04WmiYk1WwTip448c0FFjfxT42qvnmgy9hRcVi6ZhUfn5+lUnlOMZ5pynEdFx2LKPk1OG4qhGeanEuKLklkPg1KHDDINZzS5ojnKmlzDsaiyCnVlmc5zT4rxs0XCxpgigmqfn5ponOadwsXS3FRmTBqsZTmlzuxTuFifzttBuag2k0oipXEOM5PSo2kbNLs5qRUBqgGgt609M0/wAvFPRcUrAKoJqRRjrSqKkxTENA5pRmnheKeFoAQU7bShaeOlVYBgpcUpFJRYQZxS7xUbmoTJzQMtF8UGaqpmqNpqq5Ni40tQPNx1qo09RmbJouOxKXJNSR5JqFTmrcI5piZftRyK2IBis+1i6VqRDirREicdKU0wuq9TTt2aGIUU7FIKdSGJikIpw5paTGR4NKop9JQACiiigBaKQ0UAFGKKKACiikpghcUhpaSkACkpaDTASilNJQAUlFIaBBmjrQabQAcYpvTpS02qAaFAJPrTs8UUZpWASikNFMRJRiigUEgaQipOPxo4NBRFS0p69KKAEpKWkoJAUd6KKACg0tJQAUtApTQAlFHpRQAlH8qXFJQACig0maB2FpjU+o5KBFC5PBrn72Tk1vXXQ1zt6eamTLihltN+8ret5crXKxviStyzm+Uc0RCRrDFIwzTEYYp5aqEQsBVeRRVphUTrUFIoOlV3jrQZearulQ0UjNeKq7xVpPHVdo+ayaNEZ7RdajKkVfaOmGKpsMoYagEirhiFNMApDKjtxUKuwNW3gqNoMdqTQDFmqZJarshpobbQhlgzEGnCbPeq5OajdtozVXJsXnfjrTEfms9bnJwalWWncLElxLzUURxkmmyNk0L92puOxYRsmrOeKqRCrHNO4Ck4+tM5NSAZFAj5oERbCTUmPlqUR07bxRYCBBinYqULTgtMCALT/LqcR08RUWArCLIpwhqfy8U8CnYLkKx04R81LilAosFyPys0gBU1YxRtzVBciBJqUU3GDUmOlMkTZSgc09akC0ANFKF5p4FOXigQCnAUYpRQA6pFPFRinUwJBS1GGpSeKpEsU0zdil8zdVaVttSMWZuM1UaanSTDFUpWPagZM03vUTTn1qt+9J4FPWBz3qbgKZCaljUk0+O3NWooParQBDFWhBD0pIYqvQx1SJZbtlxiry4AqrH2qcZNUiGO2jOaeKYtSgZoYBmlAzShPWnUhgOKWkzRmgAoJooNABR6UUtACUUUUAFFFFAAe9AoxRigQUUUUDDFJ3paSgBO9IaU9aaapCYUUGikA00UGkoAbRQabVALSUUUkIM/SikopgSU9c9aaKePcUEi96aOpNL2NB7CkUIQc0hBp560dKLgRin4popT60wEIHpSAUH070vagAx7frSY5xT/emL70AGBnNL27ZpwpnegBeKQ8dqfTeSKCRvWkpTRQUIaMZpaKAExTHqTFNYUAZl0OK568jyTXU3CZBrCu4DnpUSRSMHy8NmrttLgiop0IPSmK4Q0IbOggfIq0tYtpc5wK14jkVVyCUio2FSZpQKYIrMvtULrV5lqJ0qGUZzR1A8daLR1C8dZtFpme0eKjMdXXjqIpWZRUMVBjqyY6aRSKKxiqNoquEVGy0ElB4vSqkkVajiq0kdJoq5nZ2nmmSnip5kqjJJ1FIZCzANmpom3VQZj52O1X4BnFJDLOKkRaRFqVRiqFcEXBqcLSLUqimITaRSAGpgKXbmmgEWlxShadg0AAFLtzQKkXmqExmMVIpFDLTQMUCFemgmn0mOaVgCpAaYaeo4pgOHSlFIBSimApFOFItKBzQA5RzUnemgYpw5piFHSn8VHnFLmkAoNLmmE0x3xTAs5ozUAfIqMy4NFwJmamLcc4NQtL1qq8oBzRcC8745FRvNuHvVT7SCOtQG4+bFFwsXiQTnGPammMGo45M1YSgGNSH2qdIKegFW44807ARJBVhIPap0iqdIqdhEMcNWo4+lPSOpkjqibiIDUmcd6cF4qJgSaaJJUOcVYUVDDHirAoEKKWm5pCaQxTRSGjNAXFpabmjNAx1LSUZoAWikzRQAUUUUAHc0UUUCFpKKKADrSClozQMTrTTTqaaaAQ02nGmmmhCUlFBosAhpppxpppiEooNJQAUU3NFKwFiikpf4jTAXNFJS0AFBFFHagAFJ6UtBoASjnFKKBQSNANLmikNAC/zpMmlpDQULmkoooJCilpBQULRR2paAGmil7UdqAInXNZ9xBkHitJqry9KAOburfGeKx50Iaumu+hrBn+9WbKQ20O1veuituY65yD/AFororT/AFQqoiZYA5pwpDTqoQ3JzQV4qQUGpGVWjqJos1dPSom+7UsZSeKoGWr0lVTWTNEysRUZFTt96om61DGMIpjVIaa1AFdxVd161ZboaiegooTjg1hXUnlyV0M3Q1zOo/epMaI1YNJWlbnpWLF98VsQdKEDNBCKlqvH0FWEqmIcKmSoRUqUASCpQaiHWnigB4p9NXoaUVRIuKUHFIaB1oKH0lApR0pkiUooPSkHegB9LmmDrR3oAlWnDFMFKv3qAH0Dg0dqSgCUnimBsGlPSo260AS5pucYpq/dpDQBIWyKiduKUdKjegBqTYNMnlxzUP8AFTZ/uGgBrXfNVprj0qrL96o3qblFhJCT1qVV3GoIetW46YE8O4Cr8earw9avRdqpEMliQmtO2g4qtF1rShrSJDZKkPFSCOnpTqqxFwVKeFpy/dp1ILjcU3y+af3paoAHSgv70w1G9AE/miml/Sq6dasL92gBytmn0nelFJjENApaSgBaXNNNLSAdmjNNFFFhj80lIKDRYB2cUA000ooAWilpBSAKKWkpiENIaWkNADaYaU0hpoBDSGlNIaYCUhpe9IaYhpphNKaaKAEJoprdaKQH/9k=", 133 | "imageHeight": 634, 134 | "imageWidth": 950 135 | } -------------------------------------------------------------------------------- /get_miou.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from PIL import Image 4 | from tqdm import tqdm 5 | 6 | from segformer import SegFormer_Segmentation 7 | from utils.utils_metrics import compute_mIoU, show_results 8 | 9 | ''' 10 | 进行指标评估需要注意以下几点: 11 | 1、该文件生成的图为灰度图,因为值比较小,按照PNG形式的图看是没有显示效果的,所以看到近似全黑的图是正常的。 12 | 2、该文件计算的是验证集的miou,当前该库将测试集当作验证集使用,不单独划分测试集 13 | ''' 14 | if __name__ == "__main__": 15 | #---------------------------------------------------------------------------# 16 | # miou_mode用于指定该文件运行时计算的内容 17 | # miou_mode为0代表整个miou计算流程,包括获得预测结果、计算miou。 18 | # miou_mode为1代表仅仅获得预测结果。 19 | # miou_mode为2代表仅仅计算miou。 20 | #---------------------------------------------------------------------------# 21 | miou_mode = 0 22 | #------------------------------# 23 | # 分类个数+1、如2+1 24 | #------------------------------# 25 | num_classes = 2+1 26 | #--------------------------------------------# 27 | # 区分的种类,和json_to_dataset里面的一样 28 | #--------------------------------------------# 29 | name_classes = ["background","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] 30 | # name_classes = ["_background_","cat","dog"] 31 | #-------------------------------------------------------# 32 | # 指向VOC数据集所在的文件夹 33 | # 默认指向根目录下的VOC数据集 34 | #-------------------------------------------------------# 35 | VOCdevkit_path = 'VOCdevkit' 36 | 37 | image_ids = open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),'r').read().splitlines() 38 | gt_dir = os.path.join(VOCdevkit_path, "VOC2007/SegmentationClass/") 39 | miou_out_path = "miou_out" 40 | pred_dir = os.path.join(miou_out_path, 'detection-results') 41 | 42 | if miou_mode == 0 or miou_mode == 1: 43 | if not os.path.exists(pred_dir): 44 | os.makedirs(pred_dir) 45 | 46 | print("Load model.") 47 | segformer = SegFormer_Segmentation() 48 | print("Load model done.") 49 | 50 | print("Get predict result.") 51 | for image_id in tqdm(image_ids): 52 | image_path = os.path.join(VOCdevkit_path, "VOC2007/JPEGImages/"+image_id+".png") 53 | image = Image.open(image_path) 54 | image = segformer.get_miou_png(image) 55 | image.save(os.path.join(pred_dir, image_id + ".png")) 56 | print("Get predict result done.") 57 | 58 | if miou_mode == 0 or miou_mode == 2: 59 | print("Get miou.") 60 | hist, IoUs, PA_Recall, Precision = compute_mIoU(gt_dir, pred_dir, image_ids, num_classes, name_classes) # 执行计算mIoU的函数 61 | print("Get miou done.") 62 | show_results(miou_out_path, hist, IoUs, PA_Recall, Precision, name_classes) 63 | -------------------------------------------------------------------------------- /json_to_dataset.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | import os.path as osp 5 | 6 | import numpy as np 7 | import PIL.Image 8 | from labelme import utils 9 | 10 | ''' 11 | 制作自己的语义分割数据集需要注意以下几点: 12 | 1、我使用的labelme版本是3.16.7,建议使用该版本的labelme,有些版本的labelme会发生错误, 13 | 具体错误为:Too many dimensions: 3 > 2 14 | 安装方式为命令行pip install labelme==3.16.7 15 | 2、此处生成的标签图是8位彩色图,与视频中看起来的数据集格式不太一样。 16 | 虽然看起来是彩图,但事实上只有8位,此时每个像素点的值就是这个像素点所属的种类。 17 | 所以其实和视频中VOC数据集的格式一样。因此这样制作出来的数据集是可以正常使用的。也是正常的。 18 | ''' 19 | if __name__ == '__main__': 20 | jpgs_path = "datasets/JPEGImages" 21 | pngs_path = "datasets/SegmentationClass" 22 | classes = ["_background_","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] 23 | # classes = ["_background_","cat","dog"] 24 | 25 | count = os.listdir("./datasets/before/") 26 | for i in range(0, len(count)): 27 | path = os.path.join("./datasets/before", count[i]) 28 | 29 | if os.path.isfile(path) and path.endswith('json'): 30 | data = json.load(open(path)) 31 | 32 | if data['imageData']: 33 | imageData = data['imageData'] 34 | else: 35 | imagePath = os.path.join(os.path.dirname(path), data['imagePath']) 36 | with open(imagePath, 'rb') as f: 37 | imageData = f.read() 38 | imageData = base64.b64encode(imageData).decode('utf-8') 39 | 40 | img = utils.img_b64_to_arr(imageData) 41 | label_name_to_value = {'_background_': 0} 42 | for shape in data['shapes']: 43 | label_name = shape['label'] 44 | if label_name in label_name_to_value: 45 | label_value = label_name_to_value[label_name] 46 | else: 47 | label_value = len(label_name_to_value) 48 | label_name_to_value[label_name] = label_value 49 | 50 | # label_values must be dense 51 | label_values, label_names = [], [] 52 | for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]): 53 | label_values.append(lv) 54 | label_names.append(ln) 55 | assert label_values == list(range(len(label_values))) 56 | 57 | lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) 58 | 59 | 60 | PIL.Image.fromarray(img).save(osp.join(jpgs_path, count[i].split(".")[0]+'.jpg')) 61 | 62 | new = np.zeros([np.shape(img)[0],np.shape(img)[1]]) 63 | for name in label_names: 64 | index_json = label_names.index(name) 65 | index_all = classes.index(name) 66 | new = new + index_all*(np.array(lbl) == index_json) 67 | 68 | utils.lblsave(osp.join(pngs_path, count[i].split(".")[0]+'.png'), new) 69 | print('Saved ' + count[i].split(".")[0] + '.jpg and ' + count[i].split(".")[0] + '.png') 70 | -------------------------------------------------------------------------------- /model_data/README.md: -------------------------------------------------------------------------------- 1 | 这里面存放的是已经训练好的权重,可通过百度网盘下载。 2 | -------------------------------------------------------------------------------- /nets/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /nets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /nets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /nets/__pycache__/backbone.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/backbone.cpython-37.pyc -------------------------------------------------------------------------------- /nets/__pycache__/backbone.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/backbone.cpython-38.pyc -------------------------------------------------------------------------------- /nets/__pycache__/conv_.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/conv_.cpython-37.pyc -------------------------------------------------------------------------------- /nets/__pycache__/conv_.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/conv_.cpython-38.pyc -------------------------------------------------------------------------------- /nets/__pycache__/segformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/segformer.cpython-38.pyc -------------------------------------------------------------------------------- /nets/__pycache__/segformer_training.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/nets/__pycache__/segformer_training.cpython-38.pyc -------------------------------------------------------------------------------- /nets/backbone.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------- 2 | # Copyright (c) 2021, NVIDIA Corporation. All rights reserved. 3 | # 4 | # This work is licensed under the NVIDIA Source Code License 5 | # --------------------------------------------------------------- 6 | import math 7 | import warnings 8 | import numpy as np 9 | from functools import partial 10 | 11 | import torch 12 | import torch.nn as nn 13 | 14 | 15 | def _no_grad_trunc_normal_(tensor, mean, std, a, b): 16 | # Cut & paste from PyTorch official master until it's in a few official releases - RW 17 | # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf 18 | def norm_cdf(x): 19 | # Computes standard normal cumulative distribution function 20 | return (1. + math.erf(x / math.sqrt(2.))) / 2. 21 | 22 | if (mean < a - 2 * std) or (mean > b + 2 * std): 23 | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " 24 | "The distribution of values may be incorrect.", 25 | stacklevel=2) 26 | 27 | with torch.no_grad(): 28 | # Values are generated by using a truncated uniform distribution and 29 | # then using the inverse CDF for the normal distribution. 30 | # Get upper and lower cdf values 31 | l = norm_cdf((a - mean) / std) 32 | u = norm_cdf((b - mean) / std) 33 | 34 | # Uniformly fill tensor with values from [l, u], then translate to 35 | # [2l-1, 2u-1]. 36 | tensor.uniform_(2 * l - 1, 2 * u - 1) 37 | 38 | # Use inverse cdf transform for normal distribution to get truncated 39 | # standard normal 40 | tensor.erfinv_() 41 | 42 | # Transform to proper mean, std 43 | tensor.mul_(std * math.sqrt(2.)) 44 | tensor.add_(mean) 45 | 46 | # Clamp to ensure it's in the proper range 47 | tensor.clamp_(min=a, max=b) 48 | return tensor 49 | 50 | 51 | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): 52 | r""" 53 | Fills the input Tensor with values drawn from a truncated 54 | normal distribution. The values are effectively drawn from the 55 | normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` 56 | with values outside :math:`[a, b]` redrawn until they are within 57 | the bounds. The method used for generating the random values works 58 | best when :math:`a \leq \text{mean} \leq b`. 59 | Args: 60 | tensor: an n-dimensional `torch.Tensor` 61 | mean: the mean of the normal distribution 62 | std: the standard deviation of the normal distribution 63 | a: the minimum cutoff value 64 | b: the maximum cutoff value 65 | Examples: 66 | >>> w = torch.empty(3, 5) 67 | >>> nn.init.trunc_normal_(w) 68 | """ 69 | return _no_grad_trunc_normal_(tensor, mean, std, a, b) 70 | 71 | #--------------------------------------# 72 | # Gelu激活函数的实现 73 | # 利用近似的数学公式 74 | #--------------------------------------# 75 | class GELU(nn.Module): 76 | def __init__(self): 77 | super(GELU, self).__init__() 78 | 79 | def forward(self, x): 80 | return 0.5 * x * (1 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x,3)))) 81 | 82 | class OverlapPatchEmbed(nn.Module): 83 | def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768): 84 | super().__init__() 85 | patch_size = (patch_size, patch_size) 86 | self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, 87 | padding=(patch_size[0] // 2, patch_size[1] // 2)) 88 | self.norm = nn.LayerNorm(embed_dim) 89 | 90 | self.apply(self._init_weights) 91 | 92 | def _init_weights(self, m): 93 | if isinstance(m, nn.Linear): 94 | trunc_normal_(m.weight, std=.02) 95 | if isinstance(m, nn.Linear) and m.bias is not None: 96 | nn.init.constant_(m.bias, 0) 97 | elif isinstance(m, nn.LayerNorm): 98 | nn.init.constant_(m.bias, 0) 99 | nn.init.constant_(m.weight, 1.0) 100 | elif isinstance(m, nn.Conv2d): 101 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 102 | fan_out //= m.groups 103 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 104 | if m.bias is not None: 105 | m.bias.data.zero_() 106 | 107 | def forward(self, x): 108 | x = self.proj(x) 109 | _, _, H, W = x.shape 110 | x = x.flatten(2).transpose(1, 2) 111 | x = self.norm(x) 112 | 113 | return x, H, W 114 | 115 | #--------------------------------------------------------------------------------------------------------------------# 116 | # Attention机制 117 | # 将输入的特征qkv特征进行划分,首先生成query, key, value。query是查询向量、key是键向量、v是值向量。 118 | # 然后利用 查询向量query 叉乘 转置后的键向量key,这一步可以通俗的理解为,利用查询向量去查询序列的特征,获得序列每个部分的重要程度score。 119 | # 然后利用 score 叉乘 value,这一步可以通俗的理解为,将序列每个部分的重要程度重新施加到序列的值上去。 120 | # 121 | # 在segformer中,为了减少计算量,首先对特征图进行了浓缩,所有特征层都压缩到原图的1/32。 122 | # 当输入图片为512, 512时,Block1的特征图为128, 128,此时就先将特征层压缩为16, 16。 123 | # 在Block1的Attention模块中,相当于将8x8个特征点进行特征浓缩,浓缩为一个特征点。 124 | # 然后利用128x128个查询向量对16x16个键向量与值向量进行查询。尽管键向量与值向量的数量较少,但因为查询向量的不同,依然可以获得不同的输出。 125 | #--------------------------------------------------------------------------------------------------------------------# 126 | class Attention(nn.Module): 127 | def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1): 128 | super().__init__() 129 | assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." 130 | 131 | self.dim = dim 132 | self.num_heads = num_heads 133 | head_dim = dim // num_heads 134 | self.scale = qk_scale or head_dim ** -0.5 135 | 136 | self.q = nn.Linear(dim, dim, bias=qkv_bias) 137 | 138 | self.sr_ratio = sr_ratio 139 | if sr_ratio > 1: 140 | self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) 141 | self.norm = nn.LayerNorm(dim) 142 | self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) 143 | 144 | self.attn_drop = nn.Dropout(attn_drop) 145 | 146 | self.proj = nn.Linear(dim, dim) 147 | self.proj_drop = nn.Dropout(proj_drop) 148 | 149 | self.apply(self._init_weights) 150 | 151 | def _init_weights(self, m): 152 | if isinstance(m, nn.Linear): 153 | trunc_normal_(m.weight, std=.02) 154 | if isinstance(m, nn.Linear) and m.bias is not None: 155 | nn.init.constant_(m.bias, 0) 156 | elif isinstance(m, nn.LayerNorm): 157 | nn.init.constant_(m.bias, 0) 158 | nn.init.constant_(m.weight, 1.0) 159 | elif isinstance(m, nn.Conv2d): 160 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 161 | fan_out //= m.groups 162 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 163 | if m.bias is not None: 164 | m.bias.data.zero_() 165 | 166 | def forward(self, x, H, W): 167 | B, N, C = x.shape 168 | # bs, 16384, 32 => bs, 16384, 32 => bs, 16384, 8, 4 => bs, 8, 16384, 4 169 | q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) 170 | 171 | if self.sr_ratio > 1: 172 | # bs, 16384, 32 => bs, 32, 128, 128 173 | x_ = x.permute(0, 2, 1).reshape(B, C, H, W) 174 | # bs, 32, 128, 128 => bs, 32, 16, 16 => bs, 256, 32 175 | x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) 176 | x_ = self.norm(x_) 177 | # bs, 256, 32 => bs, 256, 64 => bs, 256, 2, 8, 4 => 2, bs, 8, 256, 4 178 | kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 179 | else: 180 | kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 181 | k, v = kv[0], kv[1] 182 | 183 | # bs, 8, 16384, 4 @ bs, 8, 4, 256 => bs, 8, 16384, 256 184 | attn = (q @ k.transpose(-2, -1)) * self.scale 185 | attn = attn.softmax(dim=-1) 186 | attn = self.attn_drop(attn) 187 | 188 | # bs, 8, 16384, 256 @ bs, 8, 256, 4 => bs, 8, 16384, 4 => bs, 16384, 32 189 | x = (attn @ v).transpose(1, 2).reshape(B, N, C) 190 | # bs, 16384, 32 => bs, 16384, 32 191 | x = self.proj(x) 192 | x = self.proj_drop(x) 193 | 194 | return x 195 | 196 | def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): 197 | """ 198 | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). 199 | This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, 200 | the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... 201 | See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for 202 | changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 203 | 'survival rate' as the argument. 204 | """ 205 | if drop_prob == 0. or not training: 206 | return x 207 | keep_prob = 1 - drop_prob 208 | shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets 209 | random_tensor = x.new_empty(shape).bernoulli_(keep_prob) 210 | if keep_prob > 0.0 and scale_by_keep: 211 | random_tensor.div_(keep_prob) 212 | return x * random_tensor 213 | 214 | class DropPath(nn.Module): 215 | def __init__(self, drop_prob=None, scale_by_keep=True): 216 | super(DropPath, self).__init__() 217 | self.drop_prob = drop_prob 218 | self.scale_by_keep = scale_by_keep 219 | 220 | def forward(self, x): 221 | return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) 222 | 223 | class DWConv(nn.Module): 224 | def __init__(self, dim=768): 225 | super(DWConv, self).__init__() 226 | self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) 227 | 228 | def forward(self, x, H, W): 229 | B, N, C = x.shape 230 | x = x.transpose(1, 2).view(B, C, H, W) 231 | x = self.dwconv(x) 232 | x = x.flatten(2).transpose(1, 2) 233 | 234 | return x 235 | 236 | 237 | # class MFF(nn.Module): 238 | # ''' 239 | # 多尺度特征融合 MFF 240 | # ''' 241 | # 242 | # def __init__(self, channels=640, r=4): 243 | # super(MFF, self).__init__() 244 | # inter_channels = int(channels // r) 245 | # kernel_size1 = 1 246 | # self.conv1 = nn.Conv2d(2, 1, kernel_size1, stride=1, padding=0, bias=False) 247 | # self.local_att = nn.Sequential( 248 | # nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), 249 | # nn.BatchNorm2d(inter_channels), 250 | # nn.ReLU(inplace=True), 251 | # nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), 252 | # nn.BatchNorm2d(channels), 253 | # ) 254 | # 255 | # self.global_att = nn.Sequential( 256 | # nn.AdaptiveAvgPool2d(1), 257 | # nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), 258 | # nn.BatchNorm2d(inter_channels), 259 | # nn.ReLU(inplace=True), 260 | # nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), 261 | # nn.BatchNorm2d(channels), 262 | # ) 263 | # 264 | # self.sigmoid = nn.Sigmoid() 265 | # 266 | # def forward(self, x, residual): 267 | # xa = x + residual 268 | # xl = self.local_att(xa) 269 | # xg = self.global_att(xa) 270 | # xlg = xl + xg 271 | # avg_out = torch.mean(xlg, dim=1, keepdim=True) 272 | # max_out, _ = torch.max(xlg, dim=1, keepdim=True) 273 | # xlg = torch.cat([avg_out, max_out], dim=1) 274 | # xlg = self.conv1(xlg) 275 | # wei = self.sigmoid(xlg)*xlg 276 | # xo = x * wei + residual * (1 - wei) 277 | # return xo 278 | 279 | class Mlp(nn.Module): 280 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=GELU, drop=0.): 281 | super().__init__() 282 | out_features = out_features or in_features 283 | hidden_features = hidden_features or in_features 284 | 285 | self.fc1 = nn.Linear(in_features, hidden_features) 286 | self.dwconv = DWConv(hidden_features) 287 | self.act = act_layer() 288 | 289 | self.fc2 = nn.Linear(hidden_features, out_features) 290 | 291 | self.drop = nn.Dropout(drop) 292 | 293 | self.apply(self._init_weights) 294 | 295 | def _init_weights(self, m): 296 | if isinstance(m, nn.Linear): 297 | trunc_normal_(m.weight, std=.02) 298 | if isinstance(m, nn.Linear) and m.bias is not None: 299 | nn.init.constant_(m.bias, 0) 300 | elif isinstance(m, nn.LayerNorm): 301 | nn.init.constant_(m.bias, 0) 302 | nn.init.constant_(m.weight, 1.0) 303 | elif isinstance(m, nn.Conv2d): 304 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 305 | fan_out //= m.groups 306 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 307 | if m.bias is not None: 308 | m.bias.data.zero_() 309 | 310 | def forward(self, x, H, W): 311 | x = self.fc1(x) 312 | x = self.dwconv(x, H, W) 313 | x = self.act(x) 314 | x = self.drop(x) 315 | x = self.fc2(x) 316 | x = self.drop(x) 317 | return x 318 | 319 | class Block(nn.Module): 320 | def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., 321 | drop_path=0., act_layer=GELU, norm_layer=nn.LayerNorm, sr_ratio=1): 322 | super().__init__() 323 | self.norm1 = norm_layer(dim) 324 | 325 | self.attn = Attention( 326 | dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, 327 | attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio 328 | ) 329 | self.norm2 = norm_layer(dim) 330 | self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) 331 | 332 | self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() 333 | 334 | self.apply(self._init_weights) 335 | 336 | def _init_weights(self, m): 337 | if isinstance(m, nn.Linear): 338 | trunc_normal_(m.weight, std=.02) 339 | if isinstance(m, nn.Linear) and m.bias is not None: 340 | nn.init.constant_(m.bias, 0) 341 | elif isinstance(m, nn.LayerNorm): 342 | nn.init.constant_(m.bias, 0) 343 | nn.init.constant_(m.weight, 1.0) 344 | elif isinstance(m, nn.Conv2d): 345 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 346 | fan_out //= m.groups 347 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 348 | if m.bias is not None: 349 | m.bias.data.zero_() 350 | 351 | def forward(self, x, H, W): 352 | x = x + self.drop_path(self.attn(self.norm1(x), H, W)) 353 | x = x + self.drop_path(self.mlp(self.norm2(x), H, W)) 354 | return x 355 | 356 | class MixVisionTransformer(nn.Module): 357 | def __init__(self, in_chans=3, num_classes=1000, embed_dims=[32, 64, 160, 256], 358 | num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., 359 | attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, 360 | depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]): 361 | super().__init__() 362 | self.num_classes = num_classes 363 | self.depths = depths 364 | 365 | #----------------------------------# 366 | # Transformer模块,共有四个部分 367 | #----------------------------------# 368 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] 369 | 370 | #----------------------------------# 371 | # block1 372 | #----------------------------------# 373 | #-----------------------------------------------# 374 | # 对输入图像进行分区,并下采样 375 | # 512, 512, 3 => 128, 128, 32 => 16384, 32 376 | #-----------------------------------------------# 377 | ## patch_embed,通过定义卷积操作的步长/stride,时相下采样## 378 | # # stage1, 大卷积核7*7 4倍下采样## 379 | self.patch_embed1 = OverlapPatchEmbed(patch_size=7, stride=4, in_chans=in_chans, embed_dim=embed_dims[0]) 380 | #-----------------------------------------------# 381 | # 利用transformer模块进行特征提取 382 | # 16384, 32 => 16384, 32 383 | #-----------------------------------------------# 384 | cur = 0 385 | self.block1 = nn.ModuleList( 386 | [ 387 | Block( 388 | dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale, 389 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[0] 390 | ) 391 | for i in range(depths[0]) 392 | ] 393 | ) 394 | self.norm1 = norm_layer(embed_dims[0]) 395 | 396 | #----------------------------------# 397 | # block2 398 | #----------------------------------# 399 | #-----------------------------------------------# 400 | # 对输入图像进行分区,并下采样 401 | # 128, 128, 32 => 64, 64, 64 => 4096, 64 402 | #-----------------------------------------------# 403 | self.patch_embed2 = OverlapPatchEmbed(patch_size=3, stride=2, in_chans=embed_dims[0], embed_dim=embed_dims[1]) 404 | #-----------------------------------------------# 405 | # 利用transformer模块进行特征提取 406 | # 4096, 64 => 4096, 64 407 | #-----------------------------------------------# 408 | cur += depths[0] 409 | self.block2 = nn.ModuleList( 410 | [ 411 | Block( 412 | dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale, 413 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[1] 414 | ) 415 | for i in range(depths[1]) 416 | ] 417 | ) 418 | self.norm2 = norm_layer(embed_dims[1]) 419 | 420 | #----------------------------------# 421 | # block3 422 | #----------------------------------# 423 | #-----------------------------------------------# 424 | # 对输入图像进行分区,并下采样 425 | # 64, 64, 64 => 32, 32, 160 => 1024, 160 426 | #-----------------------------------------------# 427 | self.patch_embed3 = OverlapPatchEmbed(patch_size=3, stride=2, in_chans=embed_dims[1], embed_dim=embed_dims[2]) 428 | #-----------------------------------------------# 429 | # 利用transformer模块进行特征提取 430 | # 1024, 160 => 1024, 160 431 | #-----------------------------------------------# 432 | cur += depths[1] 433 | self.block3 = nn.ModuleList( 434 | [ 435 | Block( 436 | dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale, 437 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[2] 438 | ) 439 | for i in range(depths[2]) 440 | ] 441 | ) 442 | self.norm3 = norm_layer(embed_dims[2]) 443 | 444 | #----------------------------------# 445 | # block4 446 | #----------------------------------# 447 | #-----------------------------------------------# 448 | # 对输入图像进行分区,并下采样 449 | # 32, 32, 160 => 16, 16, 256 => 256, 256 450 | #-----------------------------------------------# 451 | self.patch_embed4 = OverlapPatchEmbed(patch_size=3, stride=2, in_chans=embed_dims[2], embed_dim=embed_dims[3]) 452 | #-----------------------------------------------# 453 | # 利用transformer模块进行特征提取 454 | # 256, 256 => 256, 256 455 | #-----------------------------------------------# 456 | cur += depths[2] 457 | self.block4 = nn.ModuleList( 458 | [ 459 | Block( 460 | dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale, 461 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[3] 462 | ) 463 | for i in range(depths[3]) 464 | ] 465 | ) 466 | self.norm4 = norm_layer(embed_dims[3]) 467 | 468 | self.apply(self._init_weights) 469 | 470 | def _init_weights(self, m): 471 | if isinstance(m, nn.Linear): 472 | trunc_normal_(m.weight, std=.02) 473 | if isinstance(m, nn.Linear) and m.bias is not None: 474 | nn.init.constant_(m.bias, 0) 475 | elif isinstance(m, nn.LayerNorm): 476 | nn.init.constant_(m.bias, 0) 477 | nn.init.constant_(m.weight, 1.0) 478 | elif isinstance(m, nn.Conv2d): 479 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 480 | fan_out //= m.groups 481 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 482 | if m.bias is not None: 483 | m.bias.data.zero_() 484 | 485 | def forward(self, x): 486 | B = x.shape[0] 487 | outs = [] 488 | 489 | #----------------------------------# 490 | # block1 491 | #----------------------------------# 492 | x, H, W = self.patch_embed1.forward(x) 493 | for i, blk in enumerate(self.block1): 494 | x = blk.forward(x, H, W) 495 | x = self.norm1(x) 496 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 497 | outs.append(x) 498 | 499 | #----------------------------------# 500 | # block2 501 | #----------------------------------# 502 | x, H, W = self.patch_embed2.forward(x) 503 | for i, blk in enumerate(self.block2): 504 | x = blk.forward(x, H, W) 505 | x = self.norm2(x) 506 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 507 | outs.append(x) 508 | 509 | #----------------------------------# 510 | # block3 511 | #----------------------------------# 512 | x, H, W = self.patch_embed3.forward(x) 513 | for i, blk in enumerate(self.block3): 514 | x = blk.forward(x, H, W) 515 | x = self.norm3(x) 516 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 517 | outs.append(x) 518 | 519 | #----------------------------------# 520 | # block4 521 | #----------------------------------# 522 | x, H, W = self.patch_embed4.forward(x) 523 | for i, blk in enumerate(self.block4): 524 | x = blk.forward(x, H, W) 525 | x = self.norm4(x) 526 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 527 | outs.append(x) 528 | 529 | return outs 530 | 531 | class mit_b0(MixVisionTransformer): 532 | def __init__(self, pretrained = False): 533 | super(mit_b0, self).__init__( 534 | embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 535 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], 536 | drop_rate=0.0, drop_path_rate=0.1) 537 | if pretrained: 538 | print("Load backbone weights") 539 | self.load_state_dict(torch.load("model_data/segformer_b0_backbone_weights.pth"), strict=False) 540 | 541 | class mit_b1(MixVisionTransformer): 542 | def __init__(self, pretrained = False): 543 | super(mit_b1, self).__init__( 544 | embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 545 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], 546 | drop_rate=0.0, drop_path_rate=0.1) 547 | if pretrained: 548 | print("Load backbone weights") 549 | self.load_state_dict(torch.load("model_data/segformer_b1_backbone_weights.pth"), strict=False) 550 | 551 | class mit_b2(MixVisionTransformer): 552 | def __init__(self, pretrained = False): 553 | super(mit_b2, self).__init__( 554 | embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 555 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], 556 | drop_rate=0.0, drop_path_rate=0.1) 557 | if pretrained: 558 | print("Load backbone weights") 559 | self.load_state_dict(torch.load("model_data/segformer_b2_backbone_weights.pth"), strict=False) 560 | 561 | class mit_b3(MixVisionTransformer): 562 | def __init__(self, pretrained = False): 563 | super(mit_b3, self).__init__( 564 | embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 565 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], 566 | drop_rate=0.0, drop_path_rate=0.1) 567 | if pretrained: 568 | print("Load backbone weights") 569 | self.load_state_dict(torch.load("model_data/segformer_b3_backbone_weights.pth"), strict=False) 570 | 571 | class mit_b4(MixVisionTransformer): 572 | def __init__(self, pretrained = False): 573 | super(mit_b4, self).__init__( 574 | embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 575 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], 576 | drop_rate=0.0, drop_path_rate=0.1) 577 | if pretrained: 578 | print("Load backbone weights") 579 | self.load_state_dict(torch.load("model_data/segformer_b4_backbone_weights.pth"), strict=False) 580 | 581 | class mit_b5(MixVisionTransformer): 582 | def __init__(self, pretrained = False): 583 | super(mit_b5, self).__init__( 584 | embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 585 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1], 586 | drop_rate=0.0, drop_path_rate=0.1) 587 | if pretrained: 588 | print("Load backbone weights") 589 | self.load_state_dict(torch.load("model_data/segformer_b5_backbone_weights.pth"), strict=False) 590 | -------------------------------------------------------------------------------- /nets/conv_.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | class DilationConv(nn.Sequential): 4 | def __init__(self, in_channels, out_channels,k_size=3, dilation=1,padding=0): 5 | modules = [ 6 | nn.Conv1d(in_channels, out_channels, k_size, padding=padding, dilation=dilation, bias=False), 7 | ] 8 | super(DilationConv, self).__init__(*modules) 9 | 10 | class DSConv(nn.Module): 11 | def __init__(self,in_channels,out_channels,kernel_size,stride=1,dilation=1,padding=0,bias=True): 12 | super(DSConv,self).__init__() 13 | self.body = nn.Sequential( 14 | nn.Conv2d(in_channels = in_channels, out_channels = in_channels, 15 | kernel_size = (kernel_size, 1), 16 | stride = stride, 17 | padding = (padding,0), dilation = dilation, groups = in_channels, bias = bias), 18 | # 1x3 19 | nn.Conv2d(in_channels = in_channels, out_channels = in_channels, 20 | kernel_size = (1, kernel_size), 21 | stride = stride, 22 | padding = (0,padding) , dilation = dilation, groups = in_channels, bias = bias), 23 | # PointWise Conv 24 | nn.Conv2d(in_channels, out_channels, kernel_size = 1, stride = 1, padding = 0, bias = bias) 25 | ) 26 | 27 | def forward(self,x): 28 | return self.body(x) -------------------------------------------------------------------------------- /nets/mf_head.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from nets.conv_ import DSConv, DilationConv 4 | import torch.nn.functional as F 5 | 6 | 7 | class FeatureFusion(nn.Module): 8 | def __init__(self, low_channels, high_channels, out_channels): 9 | super(FeatureFusion, self).__init__() 10 | self.conv_low = nn.Sequential( 11 | DSConv(low_channels,out_channels,3,dilation = 2,padding = 2,bias = False), 12 | nn.BatchNorm2d(out_channels), 13 | nn.ReLU() 14 | ) 15 | self.conv_high = nn.Sequential( 16 | DSConv(high_channels,out_channels,1,bias = False), 17 | nn.BatchNorm2d(out_channels), 18 | nn.ReLU() 19 | ) 20 | 21 | self.cat_conv = nn.Sequential( 22 | DSConv(out_channels*2,out_channels,5,dilation = 2,padding = 4,bias = False), 23 | nn.BatchNorm2d(out_channels), 24 | nn.ReLU(), 25 | ) 26 | 27 | 28 | self.sig = nn.Sequential( 29 | DSConv(out_channels,out_channels,3,padding = 1,bias = False), 30 | nn.BatchNorm2d(out_channels), 31 | nn.Sigmoid() 32 | ) 33 | 34 | self.ff_out = DSConv(out_channels, out_channels, 1, bias = False) 35 | 36 | def forward(self, x_low, x_high): 37 | x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True) 38 | x_low = self.conv_low(x_low) 39 | x_high = self.conv_high(x_high) 40 | 41 | x_cat = torch.cat([x_low,x_high],dim = 1) 42 | ga = self.cat_conv(x_cat) 43 | # ga = self.sig(x) 44 | 45 | x_low = torch.mul(ga, x_low) 46 | x_high = torch.mul((1 - ga), x_high) 47 | 48 | x = self.ff_out(x_low + x_high) 49 | 50 | return x 51 | 52 | class ASPP(nn.Module): 53 | def __init__(self, in_channel=768, depth=768): 54 | super(ASPP, self).__init__() 55 | self.pool2d = nn.Sequential( 56 | nn.AdaptiveAvgPool2d((1, 1)), 57 | nn.Conv2d(in_channel, depth, 1, 1), 58 | nn.BatchNorm2d(depth), 59 | nn.ReLU() 60 | ) 61 | 62 | self.block1 = nn.Sequential( 63 | nn.Conv2d(in_channel, depth, 1, 1), 64 | nn.BatchNorm2d(depth), 65 | nn.ReLU() 66 | ) 67 | 68 | self.block2 = nn.Sequential( 69 | nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6), 70 | nn.BatchNorm2d(depth), 71 | nn.ReLU() 72 | ) 73 | 74 | self.block3 = nn.Sequential( 75 | nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12), 76 | nn.BatchNorm2d(depth), 77 | nn.ReLU() 78 | ) 79 | 80 | self.block4 = nn.Sequential( 81 | nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18), 82 | nn.BatchNorm2d(depth), 83 | nn.ReLU() 84 | ) 85 | 86 | self.block5 = nn.Sequential( 87 | nn.Conv2d(depth * 5, depth, 1, 1), 88 | nn.BatchNorm2d(depth), 89 | nn.ReLU() 90 | ) 91 | 92 | self.dropout = nn.Dropout2d(0.1) 93 | 94 | 95 | def forward(self, x): 96 | size = x.shape[2:] 97 | 98 | image_features = self.pool2d(x) 99 | image_features = F.upsample(image_features, size=size, mode='bilinear') 100 | 101 | atrous_block1 = self.block1(x) 102 | atrous_block6 = self.block2(x) 103 | atrous_block12 = self.block3(x) 104 | atrous_block18 = self.block4(x) 105 | concat = torch.cat([image_features, atrous_block1, atrous_block6, atrous_block12, atrous_block18], dim=1) 106 | block5 = self.block5(concat) 107 | net = self.dropout(block5) 108 | return net 109 | 110 | class MF_Head(nn.Module): 111 | ''' 112 | Multiscale feature fusion 113 | ''' 114 | def __init__(self,in_channels=[32, 64, 160, 256],num_classes=2): 115 | super(MF_Head,self).__init__() 116 | 117 | self.ff0 = FeatureFusion(in_channels[3],in_channels[2],256) 118 | self.ff1 = FeatureFusion(256,in_channels[1],256) 119 | self.ff2 = FeatureFusion(256,in_channels[0],256) 120 | self.aspp = ASPP(in_channel=768, depth=768) 121 | self.seg = nn.Conv2d(768,num_classes,kernel_size = 1) 122 | 123 | def forward(self,inputs): 124 | c1, c2, c3, c4 = inputs 125 | ff0_out = self.ff0(c4,c3) 126 | ff1_out = self.ff1(ff0_out,c2) 127 | ff2_out = self.ff2(ff1_out,c1) 128 | 129 | ff0_out = F.interpolate(ff0_out,ff2_out.shape[2:],mode='bilinear', align_corners=True) 130 | ff1_out = F.interpolate(ff1_out, ff2_out.shape[2:], mode = 'bilinear', align_corners = True) 131 | x = torch.cat([ff0_out,ff1_out,ff2_out],dim = 1) 132 | 133 | aspp_out = self.aspp(x) 134 | out = self.seg(aspp_out) 135 | 136 | return out 137 | 138 | -------------------------------------------------------------------------------- /nets/segformer.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------- 2 | # Copyright (c) 2021, NVIDIA Corporation. All rights reserved. 3 | # 4 | # This work is licensed under the NVIDIA Source Code License 5 | # --------------------------------------------------------------- 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | 10 | from nets.mf_head import MF_Head 11 | from nets.backbone import mit_b0, mit_b1, mit_b2, mit_b3, mit_b4, mit_b5 12 | 13 | 14 | class MLP(nn.Module): 15 | """ 16 | Linear Embedding 17 | """ 18 | def __init__(self, input_dim=2048, embed_dim=768): 19 | super().__init__() 20 | self.proj = nn.Linear(input_dim, embed_dim) 21 | 22 | def forward(self, x): 23 | x = x.flatten(2).transpose(1, 2) 24 | x = self.proj(x) 25 | return x 26 | 27 | class ConvModule(nn.Module): 28 | def __init__(self, c1, c2, k=1, s=1, p=0, g=1, act=True): 29 | super(ConvModule, self).__init__() 30 | self.conv = nn.Conv2d(c1, c2, k, s, p, groups=g, bias=False) 31 | self.bn = nn.BatchNorm2d(c2, eps=0.001, momentum=0.03) 32 | self.act = nn.ReLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) 33 | 34 | def forward(self, x): 35 | return self.act(self.bn(self.conv(x))) 36 | 37 | def fuseforward(self, x): 38 | return self.act(self.conv(x)) 39 | 40 | class SegFormerHead(nn.Module): 41 | """ 42 | SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers 43 | """ 44 | def __init__(self, num_classes=20, in_channels=[32, 64, 160, 256], embedding_dim=768, dropout_ratio=0.1): 45 | super(SegFormerHead, self).__init__() 46 | c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = in_channels 47 | 48 | self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim) 49 | self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim) 50 | self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim) 51 | self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim) 52 | 53 | self.linear_fuse = ConvModule( 54 | c1=embedding_dim*4, 55 | c2=embedding_dim, 56 | k=1, 57 | ) 58 | 59 | self.linear_pred = nn.Conv2d(embedding_dim, num_classes, kernel_size=1) 60 | self.dropout = nn.Dropout2d(dropout_ratio) 61 | 62 | def forward(self, inputs): 63 | c1, c2, c3, c4 = inputs 64 | 65 | ############## MLP decoder on C1-C4 ########### 66 | n, _, h, w = c4.shape 67 | 68 | _c4 = self.linear_c4(c4).permute(0,2,1).reshape(n, -1, c4.shape[2], c4.shape[3]) 69 | _c4 = F.interpolate(_c4, size=c1.size()[2:], mode='bilinear', align_corners=False) 70 | 71 | _c3 = self.linear_c3(c3).permute(0,2,1).reshape(n, -1, c3.shape[2], c3.shape[3]) 72 | _c3 = F.interpolate(_c3, size=c1.size()[2:], mode='bilinear', align_corners=False) 73 | 74 | _c2 = self.linear_c2(c2).permute(0,2,1).reshape(n, -1, c2.shape[2], c2.shape[3]) 75 | _c2 = F.interpolate(_c2, size=c1.size()[2:], mode='bilinear', align_corners=False) 76 | 77 | _c1 = self.linear_c1(c1).permute(0,2,1).reshape(n, -1, c1.shape[2], c1.shape[3]) 78 | 79 | _c = self.linear_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1)) 80 | 81 | x = self.dropout(_c) 82 | x = self.linear_pred(x) 83 | 84 | return x 85 | 86 | class SegFormer(nn.Module): 87 | def __init__(self, num_classes = 21, phi = 'b0', pretrained = False): 88 | super(SegFormer, self).__init__() 89 | self.in_channels = { 90 | 'b0': [32, 64, 160, 256], 'b1': [64, 128, 320, 512], 'b2': [64, 128, 320, 512], 91 | 'b3': [64, 128, 320, 512], 'b4': [64, 128, 320, 512], 'b5': [64, 128, 320, 512], 92 | }[phi] 93 | self.backbone = { 94 | 'b0': mit_b0, 'b1': mit_b1, 'b2': mit_b2, 95 | 'b3': mit_b3, 'b4': mit_b4, 'b5': mit_b5, 96 | }[phi](pretrained) 97 | self.embedding_dim = { 98 | 'b0': 256, 'b1': 256, 'b2': 768, 99 | 'b3': 768, 'b4': 768, 'b5': 768, 100 | }[phi] 101 | 102 | #MF decoder 103 | self.decode_head = MF_Head(in_channels = self.in_channels, num_classes = num_classes) 104 | 105 | #Segformer decoder 106 | # self.decode_head = SegFormerHead(num_classes, self.in_channels, self.embedding_dim) 107 | 108 | def forward(self, inputs): 109 | H, W = inputs.size(2), inputs.size(3) 110 | 111 | x = self.backbone.forward(inputs) 112 | x = self.decode_head.forward(x) 113 | 114 | x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) 115 | return x -------------------------------------------------------------------------------- /nets/segformer_training.py: -------------------------------------------------------------------------------- 1 | import math 2 | from functools import partial 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | 8 | 9 | def CE_Loss(inputs, target, cls_weights, num_classes=21): 10 | n, c, h, w = inputs.size() 11 | nt, ht, wt = target.size() 12 | if h != ht and w != wt: 13 | inputs = F.interpolate(inputs, size=(ht, wt), mode="bilinear", align_corners=True) 14 | 15 | temp_inputs = inputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c) 16 | temp_target = target.view(-1) 17 | 18 | CE_loss = nn.CrossEntropyLoss(weight=cls_weights, ignore_index=num_classes)(temp_inputs, temp_target) 19 | return CE_loss 20 | 21 | def Focal_Loss(inputs, target, cls_weights, num_classes=21, alpha=0.5, gamma=2): 22 | n, c, h, w = inputs.size() 23 | nt, ht, wt = target.size() 24 | if h != ht and w != wt: 25 | inputs = F.interpolate(inputs, size=(ht, wt), mode="bilinear", align_corners=True) 26 | 27 | temp_inputs = inputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c) 28 | temp_target = target.view(-1) 29 | 30 | logpt = -nn.CrossEntropyLoss(weight=cls_weights, ignore_index=num_classes, reduction='none')(temp_inputs, temp_target) 31 | pt = torch.exp(logpt) 32 | if alpha is not None: 33 | logpt *= alpha 34 | loss = -((1 - pt) ** gamma) * logpt 35 | loss = loss.mean() 36 | return loss 37 | 38 | def Dice_loss(inputs, target, beta=1, smooth = 1e-5): 39 | n, c, h, w = inputs.size() 40 | nt, ht, wt, ct = target.size() 41 | if h != ht and w != wt: 42 | inputs = F.interpolate(inputs, size=(ht, wt), mode="bilinear", align_corners=True) 43 | 44 | temp_inputs = torch.softmax(inputs.transpose(1, 2).transpose(2, 3).contiguous().view(n, -1, c),-1) 45 | temp_target = target.view(n, -1, ct) 46 | 47 | #--------------------------------------------# 48 | # 计算dice loss 49 | #--------------------------------------------# 50 | tp = torch.sum(temp_target[...,:-1] * temp_inputs, axis=[0,1]) 51 | fp = torch.sum(temp_inputs , axis=[0,1]) - tp 52 | fn = torch.sum(temp_target[...,:-1] , axis=[0,1]) - tp 53 | 54 | score = ((1 + beta ** 2) * tp + smooth) / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth) 55 | dice_loss = 1 - torch.mean(score) 56 | return dice_loss 57 | 58 | def weights_init(net, init_type='normal', init_gain=0.02): 59 | def init_func(m): 60 | classname = m.__class__.__name__ 61 | if hasattr(m, 'weight') and classname.find('Conv') != -1: 62 | if init_type == 'normal': 63 | torch.nn.init.normal_(m.weight.data, 0.0, init_gain) 64 | elif init_type == 'xavier': 65 | torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain) 66 | elif init_type == 'kaiming': 67 | torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 68 | elif init_type == 'orthogonal': 69 | torch.nn.init.orthogonal_(m.weight.data, gain=init_gain) 70 | else: 71 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 72 | elif classname.find('BatchNorm2d') != -1: 73 | torch.nn.init.normal_(m.weight.data, 1.0, 0.02) 74 | torch.nn.init.constant_(m.bias.data, 0.0) 75 | print('initialize network with %s type' % init_type) 76 | net.apply(init_func) 77 | 78 | def get_lr_scheduler(lr_decay_type, lr, min_lr, total_iters, warmup_iters_ratio = 0.1, warmup_lr_ratio = 0.1, no_aug_iter_ratio = 0.3, step_num = 10): 79 | def yolox_warm_cos_lr(lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter, iters): 80 | if iters <= warmup_total_iters: 81 | # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start 82 | lr = (lr - warmup_lr_start) * pow(iters / float(warmup_total_iters), 2) + warmup_lr_start 83 | elif iters >= total_iters - no_aug_iter: 84 | lr = min_lr 85 | else: 86 | lr = min_lr + 0.5 * (lr - min_lr) * ( 87 | 1.0 + math.cos(math.pi* (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter)) 88 | ) 89 | return lr 90 | 91 | def step_lr(lr, decay_rate, step_size, iters): 92 | if step_size < 1: 93 | raise ValueError("step_size must above 1.") 94 | n = iters // step_size 95 | out_lr = lr * decay_rate ** n 96 | return out_lr 97 | 98 | if lr_decay_type == "cos": 99 | warmup_total_iters = min(max(warmup_iters_ratio * total_iters, 1), 3) 100 | warmup_lr_start = max(warmup_lr_ratio * lr, 1e-6) 101 | no_aug_iter = min(max(no_aug_iter_ratio * total_iters, 1), 15) 102 | func = partial(yolox_warm_cos_lr ,lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter) 103 | else: 104 | decay_rate = (min_lr / lr) ** (1 / (step_num - 1)) 105 | step_size = total_iters / step_num 106 | func = partial(step_lr, lr, decay_rate, step_size) 107 | 108 | return func 109 | 110 | def set_optimizer_lr(optimizer, lr_scheduler_func, epoch): 111 | lr = lr_scheduler_func(epoch) 112 | for param_group in optimizer.param_groups: 113 | param_group['lr'] = lr 114 | -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------# 2 | # 将单张图片预测、摄像头检测和FPS测试功能 3 | # 整合到了一个py文件中,通过指定mode进行模式的修改。 4 | #----------------------------------------------------# 5 | import time 6 | 7 | import cv2 8 | import numpy as np 9 | from PIL import Image 10 | 11 | from segformer import SegFormer_Segmentation 12 | 13 | if __name__ == "__main__": 14 | #-------------------------------------------------------------------------# 15 | # 如果想要修改对应种类的颜色,到generate函数里修改self.colors即可 16 | #-------------------------------------------------------------------------# 17 | segformer = SegFormer_Segmentation() 18 | #----------------------------------------------------------------------------------------------------------# 19 | # mode用于指定测试的模式: 20 | # 'predict' 表示单张图片预测,如果想对预测过程进行修改,如保存图片,截取对象等,可以先看下方详细的注释 21 | # 'video' 表示视频检测,可调用摄像头或者视频进行检测,详情查看下方注释。 22 | # 'fps' 表示测试fps,使用的图片是img里面的street.jpg,详情查看下方注释。 23 | # 'dir_predict' 表示遍历文件夹进行检测并保存。默认遍历img文件夹,保存img_out文件夹,详情查看下方注释。 24 | # 'export_onnx' 表示将模型导出为onnx,需要pytorch1.7.1以上。 25 | #----------------------------------------------------------------------------------------------------------# 26 | mode = "dir_predict" 27 | #-------------------------------------------------------------------------# 28 | # count 指定了是否进行目标的像素点计数(即面积)与比例计算 29 | # name_classes 区分的种类,和json_to_dataset里面的一样,用于打印种类和数量 30 | # 31 | # count、name_classes仅在mode='predict'时有效 32 | #-------------------------------------------------------------------------# 33 | count = False 34 | name_classes = ["background","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] 35 | # name_classes = ["background","cat","dog"] 36 | #----------------------------------------------------------------------------------------------------------# 37 | # video_path 用于指定视频的路径,当video_path=0时表示检测摄像头 38 | # 想要检测视频,则设置如video_path = "xxx.mp4"即可,代表读取出根目录下的xxx.mp4文件。 39 | # video_save_path 表示视频保存的路径,当video_save_path=""时表示不保存 40 | # 想要保存视频,则设置如video_save_path = "yyy.mp4"即可,代表保存为根目录下的yyy.mp4文件。 41 | # video_fps 用于保存的视频的fps 42 | # 43 | # video_path、video_save_path和video_fps仅在mode='video'时有效 44 | # 保存视频时需要ctrl+c退出或者运行到最后一帧才会完成完整的保存步骤。 45 | #----------------------------------------------------------------------------------------------------------# 46 | video_path = 0 47 | video_save_path = "" 48 | video_fps = 25.0 49 | #----------------------------------------------------------------------------------------------------------# 50 | # test_interval 用于指定测量fps的时候,图片检测的次数。理论上test_interval越大,fps越准确。 51 | # fps_image_path 用于指定测试的fps图片 52 | # 53 | # test_interval和fps_image_path仅在mode='fps'有效 54 | #----------------------------------------------------------------------------------------------------------# 55 | test_interval = 100 56 | fps_image_path = "img/street.jpg" 57 | #-------------------------------------------------------------------------# 58 | # dir_origin_path 指定了用于检测的图片的文件夹路径 59 | # dir_save_path 指定了检测完图片的保存路径 60 | # 61 | # dir_origin_path和dir_save_path仅在mode='dir_predict'时有效 62 | #-------------------------------------------------------------------------# 63 | dir_origin_path = "img/" 64 | dir_save_path = "img_out/" 65 | #-------------------------------------------------------------------------# 66 | # simplify 使用Simplify onnx 67 | # onnx_save_path 指定了onnx的保存路径 68 | #-------------------------------------------------------------------------# 69 | simplify = True 70 | onnx_save_path = "model_data/models.onnx" 71 | 72 | if mode == "predict": 73 | ''' 74 | predict.py有几个注意点 75 | 1、该代码无法直接进行批量预测,如果想要批量预测,可以利用os.listdir()遍历文件夹,利用Image.open打开图片文件进行预测。 76 | 具体流程可以参考get_miou_prediction.py,在get_miou_prediction.py即实现了遍历。 77 | 2、如果想要保存,利用r_image.save("img.jpg")即可保存。 78 | 3、如果想要原图和分割图不混合,可以把blend参数设置成False。 79 | 4、如果想根据mask获取对应的区域,可以参考detect_image函数中,利用预测结果绘图的部分,判断每一个像素点的种类,然后根据种类获取对应的部分。 80 | seg_img = np.zeros((np.shape(pr)[0],np.shape(pr)[1],3)) 81 | for c in range(self.num_classes): 82 | seg_img[:, :, 0] += ((pr == c)*( self.colors[c][0] )).astype('uint8') 83 | seg_img[:, :, 1] += ((pr == c)*( self.colors[c][1] )).astype('uint8') 84 | seg_img[:, :, 2] += ((pr == c)*( self.colors[c][2] )).astype('uint8') 85 | ''' 86 | while True: 87 | img = input('Input image filename:') 88 | try: 89 | image = Image.open(img) 90 | except: 91 | print('Open Error! Try again!') 92 | continue 93 | else: 94 | r_image = segformer.detect_image(image, count=count, name_classes=name_classes) 95 | r_image.show() 96 | 97 | elif mode == "video": 98 | capture=cv2.VideoCapture(video_path) 99 | if video_save_path!="": 100 | fourcc = cv2.VideoWriter_fourcc(*'XVID') 101 | size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) 102 | out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size) 103 | 104 | ref, frame = capture.read() 105 | if not ref: 106 | raise ValueError("未能正确读取摄像头(视频),请注意是否正确安装摄像头(是否正确填写视频路径)。") 107 | 108 | fps = 0.0 109 | while(True): 110 | t1 = time.time() 111 | # 读取某一帧 112 | ref, frame = capture.read() 113 | if not ref: 114 | break 115 | # 格式转变,BGRtoRGB 116 | frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) 117 | # 转变成Image 118 | frame = Image.fromarray(np.uint8(frame)) 119 | # 进行检测 120 | frame = np.array(segformer.detect_image(frame)) 121 | # RGBtoBGR满足opencv显示格式 122 | frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR) 123 | 124 | fps = ( fps + (1./(time.time()-t1)) ) / 2 125 | print("fps= %.2f"%(fps)) 126 | frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) 127 | 128 | cv2.imshow("video",frame) 129 | c= cv2.waitKey(1) & 0xff 130 | if video_save_path!="": 131 | out.write(frame) 132 | 133 | if c==27: 134 | capture.release() 135 | break 136 | print("Video Detection Done!") 137 | capture.release() 138 | if video_save_path!="": 139 | print("Save processed video to the path :" + video_save_path) 140 | out.release() 141 | cv2.destroyAllWindows() 142 | 143 | elif mode == "fps": 144 | img = Image.open(fps_image_path) 145 | tact_time = segformer.get_FPS(img, test_interval) 146 | print(str(tact_time) + ' seconds, ' + str(1/tact_time) + 'FPS, @batch_size 1') 147 | 148 | elif mode == "dir_predict": 149 | import os 150 | from tqdm import tqdm 151 | 152 | img_names = os.listdir(dir_origin_path) 153 | for img_name in tqdm(img_names): 154 | if img_name.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')): 155 | image_path = os.path.join(dir_origin_path, img_name) 156 | image = Image.open(image_path) 157 | r_image = segformer.detect_image(image) 158 | if not os.path.exists(dir_save_path): 159 | os.makedirs(dir_save_path) 160 | r_image.save(os.path.join(dir_save_path, img_name)) 161 | 162 | elif mode == "export_onnx": 163 | segformer.convert_to_onnx(simplify, onnx_save_path) 164 | 165 | else: 166 | raise AssertionError("Please specify the correct mode: 'predict', 'video', 'fps' or 'dir_predict'.") 167 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.6.0 2 | torchvision==0.7.0 3 | -------------------------------------------------------------------------------- /segformer.py: -------------------------------------------------------------------------------- 1 | import colorsys 2 | import copy 3 | import time 4 | 5 | import cv2 6 | import numpy as np 7 | import torch 8 | import torch.nn.functional as F 9 | from PIL import Image 10 | from torch import nn 11 | 12 | from nets.segformer import SegFormer 13 | from utils.utils import cvtColor, preprocess_input, resize_image, show_config 14 | 15 | 16 | #-----------------------------------------------------------------------------------# 17 | # 使用自己训练好的模型预测需要修改3个参数 18 | # model_path、backbone和num_classes都需要修改! 19 | # 如果出现shape不匹配,一定要注意训练时的model_path、backbone和num_classes的修改 20 | #-----------------------------------------------------------------------------------# 21 | class SegFormer_Segmentation(object): 22 | _defaults = { 23 | #-------------------------------------------------------------------# 24 | # model_path指向logs文件夹下的权值文件 25 | # 训练好后logs文件夹下存在多个权值文件,选择验证集损失较低的即可。 26 | # 验证集损失较低不代表miou较高,仅代表该权值在验证集上泛化性能较好。 27 | #-------------------------------------------------------------------# 28 | "model_path" : "logs/best_epoch_weights.pth", 29 | #----------------------------------------# 30 | # 所需要区分的类的个数+1 31 | #----------------------------------------# 32 | "num_classes" : 2, 33 | #----------------------------------------# 34 | # 所使用的的主干网络: 35 | # b0、b1、b2、b3、b4、b5 36 | #----------------------------------------# 37 | "phi" : "b5", 38 | #----------------------------------------# 39 | # 输入图片的大小 40 | #----------------------------------------# 41 | "input_shape" : [256, 256], 42 | #-------------------------------------------------# 43 | # mix_type参数用于控制检测结果的可视化方式 44 | # 45 | # mix_type = 0的时候代表原图与生成的图进行混合 46 | # mix_type = 1的时候代表仅保留生成的图 47 | # mix_type = 2的时候代表仅扣去背景,仅保留原图中的目标 48 | #-------------------------------------------------# 49 | "mix_type" : 1, 50 | #-------------------------------# 51 | # 是否使用Cuda 52 | # 没有GPU可以设置成False 53 | #-------------------------------# 54 | "cuda" : True, 55 | } 56 | 57 | #---------------------------------------------------# 58 | # 初始化SegFormer 59 | #---------------------------------------------------# 60 | def __init__(self, **kwargs): 61 | self.__dict__.update(self._defaults) 62 | for name, value in kwargs.items(): 63 | setattr(self, name, value) 64 | #---------------------------------------------------# 65 | # 画框设置不同的颜色 66 | #---------------------------------------------------# 67 | if self.num_classes <= 21: 68 | self.colors = [ (0, 0, 0), (255, 255, 255), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128), (0, 128, 128), 69 | (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0), (64, 0, 128), (192, 0, 128), 70 | (64, 128, 128), (192, 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128), 71 | (128, 64, 12)] 72 | else: 73 | hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)] 74 | self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) 75 | self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) 76 | #---------------------------------------------------# 77 | # 获得模型 78 | #---------------------------------------------------# 79 | self.generate() 80 | 81 | show_config(**self._defaults) 82 | 83 | #---------------------------------------------------# 84 | # 获得所有的分类 85 | #---------------------------------------------------# 86 | def generate(self, onnx=False): 87 | #-------------------------------# 88 | # 载入模型与权值 89 | #-------------------------------# 90 | self.net = SegFormer(num_classes=self.num_classes, phi=self.phi, pretrained=False) 91 | 92 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 93 | self.net.load_state_dict(torch.load(self.model_path, map_location=device)) 94 | self.net = self.net.eval() 95 | print('{} model, and classes loaded.'.format(self.model_path)) 96 | if not onnx: 97 | if self.cuda: 98 | self.net = nn.DataParallel(self.net) 99 | self.net = self.net.cuda() 100 | 101 | #---------------------------------------------------# 102 | # 检测图片 103 | #---------------------------------------------------# 104 | def detect_image(self, image, count=False, name_classes=None): 105 | #---------------------------------------------------------# 106 | # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。 107 | # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB 108 | #---------------------------------------------------------# 109 | image = cvtColor(image) 110 | #---------------------------------------------------# 111 | # 对输入图像进行一个备份,后面用于绘图 112 | #---------------------------------------------------# 113 | old_img = copy.deepcopy(image) 114 | orininal_h = np.array(image).shape[0] 115 | orininal_w = np.array(image).shape[1] 116 | #---------------------------------------------------------# 117 | # 给图像增加灰条,实现不失真的resize 118 | # 也可以直接resize进行识别 119 | #---------------------------------------------------------# 120 | image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0])) 121 | #---------------------------------------------------------# 122 | # 添加上batch_size维度 123 | #---------------------------------------------------------# 124 | image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0) 125 | 126 | with torch.no_grad(): 127 | images = torch.from_numpy(image_data) 128 | if self.cuda: 129 | images = images.cuda() 130 | 131 | #---------------------------------------------------# 132 | # 图片传入网络进行预测 133 | #---------------------------------------------------# 134 | pr = self.net(images)[0] 135 | #---------------------------------------------------# 136 | # 取出每一个像素点的种类 137 | #---------------------------------------------------# 138 | pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy() 139 | #--------------------------------------# 140 | # 将灰条部分截取掉 141 | #--------------------------------------# 142 | pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \ 143 | int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)] 144 | #---------------------------------------------------# 145 | # 进行图片的resize 146 | #---------------------------------------------------# 147 | pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR) 148 | #---------------------------------------------------# 149 | # 取出每一个像素点的种类 150 | #---------------------------------------------------# 151 | pr = pr.argmax(axis=-1) 152 | 153 | #---------------------------------------------------------# 154 | # 计数 155 | #---------------------------------------------------------# 156 | if count: 157 | classes_nums = np.zeros([self.num_classes]) 158 | total_points_num = orininal_h * orininal_w 159 | print('-' * 63) 160 | print("|%25s | %15s | %15s|"%("Key", "Value", "Ratio")) 161 | print('-' * 63) 162 | for i in range(self.num_classes): 163 | num = np.sum(pr == i) 164 | ratio = num / total_points_num * 100 165 | if num > 0: 166 | print("|%25s | %15s | %14.2f%%|"%(str(name_classes[i]), str(num), ratio)) 167 | print('-' * 63) 168 | classes_nums[i] = num 169 | print("classes_nums:", classes_nums) 170 | 171 | if self.mix_type == 0: 172 | # seg_img = np.zeros((np.shape(pr)[0], np.shape(pr)[1], 3)) 173 | # for c in range(self.num_classes): 174 | # seg_img[:, :, 0] += ((pr[:, :] == c ) * self.colors[c][0]).astype('uint8') 175 | # seg_img[:, :, 1] += ((pr[:, :] == c ) * self.colors[c][1]).astype('uint8') 176 | # seg_img[:, :, 2] += ((pr[:, :] == c ) * self.colors[c][2]).astype('uint8') 177 | seg_img = np.reshape(np.array(self.colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1]) 178 | #------------------------------------------------# 179 | # 将新图片转换成Image的形式 180 | #------------------------------------------------# 181 | image = Image.fromarray(np.uint8(seg_img)) 182 | #------------------------------------------------# 183 | # 将新图与原图及进行混合 184 | #------------------------------------------------# 185 | image = Image.blend(old_img, image, 0.7) 186 | 187 | elif self.mix_type == 1: 188 | # seg_img = np.zeros((np.shape(pr)[0], np.shape(pr)[1], 3)) 189 | # for c in range(self.num_classes): 190 | # seg_img[:, :, 0] += ((pr[:, :] == c ) * self.colors[c][0]).astype('uint8') 191 | # seg_img[:, :, 1] += ((pr[:, :] == c ) * self.colors[c][1]).astype('uint8') 192 | # seg_img[:, :, 2] += ((pr[:, :] == c ) * self.colors[c][2]).astype('uint8') 193 | seg_img = np.reshape(np.array(self.colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1]) 194 | #------------------------------------------------# 195 | # 将新图片转换成Image的形式 196 | #------------------------------------------------# 197 | image = Image.fromarray(np.uint8(seg_img)) 198 | 199 | elif self.mix_type == 2: 200 | seg_img = (np.expand_dims(pr != 0, -1) * np.array(old_img, np.float32)).astype('uint8') 201 | #------------------------------------------------# 202 | # 将新图片转换成Image的形式 203 | #------------------------------------------------# 204 | image = Image.fromarray(np.uint8(seg_img)) 205 | 206 | return image 207 | 208 | def get_FPS(self, image, test_interval): 209 | #---------------------------------------------------------# 210 | # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。 211 | # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB 212 | #---------------------------------------------------------# 213 | image = cvtColor(image) 214 | #---------------------------------------------------------# 215 | # 给图像增加灰条,实现不失真的resize 216 | # 也可以直接resize进行识别 217 | #---------------------------------------------------------# 218 | image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0])) 219 | #---------------------------------------------------------# 220 | # 添加上batch_size维度 221 | #---------------------------------------------------------# 222 | image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0) 223 | 224 | with torch.no_grad(): 225 | images = torch.from_numpy(image_data) 226 | if self.cuda: 227 | images = images.cuda() 228 | 229 | #---------------------------------------------------# 230 | # 图片传入网络进行预测 231 | #---------------------------------------------------# 232 | pr = self.net(images)[0] 233 | #---------------------------------------------------# 234 | # 取出每一个像素点的种类 235 | #---------------------------------------------------# 236 | pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy().argmax(axis=-1) 237 | #--------------------------------------# 238 | # 将灰条部分截取掉 239 | #--------------------------------------# 240 | pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \ 241 | int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)] 242 | 243 | t1 = time.time() 244 | for _ in range(test_interval): 245 | with torch.no_grad(): 246 | #---------------------------------------------------# 247 | # 图片传入网络进行预测 248 | #---------------------------------------------------# 249 | pr = self.net(images)[0] 250 | #---------------------------------------------------# 251 | # 取出每一个像素点的种类 252 | #---------------------------------------------------# 253 | pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy().argmax(axis=-1) 254 | #--------------------------------------# 255 | # 将灰条部分截取掉 256 | #--------------------------------------# 257 | pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \ 258 | int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)] 259 | t2 = time.time() 260 | tact_time = (t2 - t1) / test_interval 261 | return tact_time 262 | 263 | def convert_to_onnx(self, simplify, model_path): 264 | import onnx 265 | self.generate(onnx=True) 266 | 267 | im = torch.zeros(1, 3, *self.input_shape).to('cpu') # image size(1, 3, 512, 512) BCHW 268 | input_layer_names = ["images"] 269 | output_layer_names = ["output"] 270 | 271 | # Export the model 272 | print(f'Starting export with onnx {onnx.__version__}.') 273 | torch.onnx.export(self.net, 274 | im, 275 | f = model_path, 276 | verbose = False, 277 | opset_version = 12, 278 | training = torch.onnx.TrainingMode.EVAL, 279 | do_constant_folding = True, 280 | input_names = input_layer_names, 281 | output_names = output_layer_names, 282 | dynamic_axes = None) 283 | 284 | # Checks 285 | model_onnx = onnx.load(model_path) # load onnx model 286 | onnx.checker.check_model(model_onnx) # check onnx model 287 | 288 | # Simplify onnx 289 | if simplify: 290 | import onnxsim 291 | print(f'Simplifying with onnx-simplifier {onnxsim.__version__}.') 292 | model_onnx, check = onnxsim.simplify( 293 | model_onnx, 294 | dynamic_input_shape=False, 295 | input_shapes=None) 296 | assert check, 'assert check failed' 297 | onnx.save(model_onnx, model_path) 298 | 299 | print('Onnx model save as {}'.format(model_path)) 300 | 301 | def get_miou_png(self, image): 302 | #---------------------------------------------------------# 303 | # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。 304 | # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB 305 | #---------------------------------------------------------# 306 | image = cvtColor(image) 307 | orininal_h = np.array(image).shape[0] 308 | orininal_w = np.array(image).shape[1] 309 | #---------------------------------------------------------# 310 | # 给图像增加灰条,实现不失真的resize 311 | # 也可以直接resize进行识别 312 | #---------------------------------------------------------# 313 | image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0])) 314 | #---------------------------------------------------------# 315 | # 添加上batch_size维度 316 | #---------------------------------------------------------# 317 | image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0) 318 | 319 | with torch.no_grad(): 320 | images = torch.from_numpy(image_data) 321 | if self.cuda: 322 | images = images.cuda() 323 | 324 | #---------------------------------------------------# 325 | # 图片传入网络进行预测 326 | #---------------------------------------------------# 327 | pr = self.net(images)[0] 328 | #---------------------------------------------------# 329 | # 取出每一个像素点的种类 330 | #---------------------------------------------------# 331 | pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy() 332 | #--------------------------------------# 333 | # 将灰条部分截取掉 334 | #--------------------------------------# 335 | pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \ 336 | int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)] 337 | #---------------------------------------------------# 338 | # 进行图片的resize 339 | #---------------------------------------------------# 340 | pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR) 341 | #---------------------------------------------------# 342 | # 取出每一个像素点的种类 343 | #---------------------------------------------------# 344 | pr = pr.argmax(axis=-1) 345 | 346 | image = Image.fromarray(np.uint8(pr)) 347 | return image 348 | -------------------------------------------------------------------------------- /summary.py: -------------------------------------------------------------------------------- 1 | #--------------------------------------------# 2 | # 该部分代码用于看网络结构 3 | #--------------------------------------------# 4 | import torch 5 | from thop import clever_format, profile 6 | from torchsummary import summary 7 | 8 | from nets.segformer import SegFormer 9 | 10 | if __name__ == "__main__": 11 | input_shape = [256, 256] 12 | num_classes = 2 13 | phi = 'b5' 14 | 15 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 16 | model = SegFormer(num_classes = num_classes, phi = phi, pretrained=False).to(device) 17 | summary(model, (3, input_shape[0], input_shape[1])) 18 | 19 | dummy_input = torch.randn(1, 3, input_shape[0], input_shape[1]).to(device) 20 | flops, params = profile(model.to(device), (dummy_input, ), verbose=False) 21 | #--------------------------------------------------------# 22 | # flops * 2是因为profile没有将卷积作为两个operations 23 | # 有些论文将卷积算乘法、加法两个operations。此时乘2 24 | # 有些论文只考虑乘法的运算次数,忽略加法。此时不乘2 25 | # 本代码选择乘2,参考YOLOX。 26 | #--------------------------------------------------------# 27 | flops = flops * 2 28 | flops, params = clever_format([flops, params], "%.3f") 29 | print('Total GFLOPS: %s' % (flops)) 30 | print('Total params: %s' % (params)) 31 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | 4 | import numpy as np 5 | import torch 6 | import torch.backends.cudnn as cudnn 7 | import torch.distributed as dist 8 | import torch.optim as optim 9 | from torch.utils.data import DataLoader 10 | 11 | from nets.segformer import SegFormer 12 | from nets.segformer_training import (get_lr_scheduler, set_optimizer_lr, 13 | weights_init) 14 | from utils.callbacks import LossHistory, EvalCallback 15 | from utils.dataloader import SegmentationDataset, seg_dataset_collate 16 | from utils.utils import download_weights, show_config 17 | from utils.utils_fit import fit_one_epoch 18 | 19 | 20 | if __name__ == "__main__": 21 | #---------------------------------# 22 | # Cuda 是否使用Cuda 23 | # 没有GPU可以设置成False 24 | #---------------------------------# 25 | Cuda = True 26 | #---------------------------------------------------------------------# 27 | # distributed 用于指定是否使用单机多卡分布式运行 28 | # 终端指令仅支持Ubuntu。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。 29 | # Windows系统下默认使用DP模式调用所有显卡,不支持DDP。 30 | #---------------------------------------------------------------------# 31 | distributed = False 32 | #---------------------------------------------------------------------# 33 | # sync_bn 是否使用sync_bn,DDP模式多卡可用 34 | #---------------------------------------------------------------------# 35 | sync_bn = False 36 | #---------------------------------------------------------------------# 37 | # fp16 是否使用混合精度训练 38 | # 可减少约一半的显存、需要pytorch1.7.1以上 39 | #---------------------------------------------------------------------# 40 | fp16 = False 41 | #-----------------------------------------------------# 42 | # num_classes 类别数 43 | # 自己需要的分类个数+1,如2+1 44 | #-----------------------------------------------------# 45 | num_classes = 2 46 | #-------------------------------------------------------------------# 47 | # 所使用的的主干网络: 48 | # b0、b1、b2、b3、b4、b5 49 | #-------------------------------------------------------------------# 50 | phi = "b5" 51 | #----------------------------------------------------------------------------------------------------------------------------# 52 | # pretrained 是否使用主干网络的预训练权重。 53 | #----------------------------------------------------------------------------------------------------------------------------# 54 | pretrained = True 55 | #----------------------------------------------------------------------------------------------------------------------------# 56 | # 当model_path = ''的时候不加载整个模型的权值。 57 | #----------------------------------------------------------------------------------------------------------------------------# 58 | model_path = './model_data/segformer_b5_backbone_weights.pth' 59 | #------------------------------# 60 | # 输入图片的大小 61 | #------------------------------# 62 | input_shape = [256, 256] 63 | #------------------------------------------------------------------# 64 | # 冻结阶段训练参数 65 | # 此时模型的主干被冻结了,特征提取网络不发生改变 66 | # 占用的显存较小,仅对网络进行微调 67 | # Init_Epoch 模型当前开始的训练世代,其值可以大于Freeze_Epoch,如设置: 68 | # Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100 69 | # 会跳过冻结阶段,直接从60代开始,并调整对应的学习率。 70 | # (断点续练时使用) 71 | # Freeze_Epoch 模型冻结训练的Freeze_Epoch 72 | # (当Freeze_Train=False时失效) 73 | # Freeze_batch_size 模型冻结训练的batch_size 74 | # (当Freeze_Train=False时失效) 75 | #------------------------------------------------------------------# 76 | Init_Epoch = 0 77 | Freeze_Epoch = 0 78 | Freeze_batch_size = 30 79 | #------------------------------------------------------------------# 80 | # 解冻阶段训练参数 81 | # 此时模型的主干不被冻结了,特征提取网络会发生改变 82 | # 占用的显存较大,网络所有的参数都会发生改变 83 | # UnFreeze_Epoch 模型总共训练的epoch 84 | # Unfreeze_batch_size 模型在解冻后的batch_size 85 | #------------------------------------------------------------------# 86 | UnFreeze_Epoch = 100 87 | Unfreeze_batch_size = 16 88 | #------------------------------------------------------------------# 89 | # Freeze_Train 是否进行冻结训练 90 | # 默认先冻结主干训练后解冻训练。 91 | #------------------------------------------------------------------# 92 | Freeze_Train = False 93 | 94 | #------------------------------------------------------------------# 95 | # 其它训练参数:学习率、优化器、学习率下降有关 96 | #------------------------------------------------------------------# 97 | #------------------------------------------------------------------# 98 | # Init_lr 模型的最大学习率 99 | # 当使用Adam优化器时建议设置 Init_lr=1e-4 100 | # 当使用AdamW优化器时建议设置 Init_lr=1e-4 101 | # Transformer系列不建议使用SGD 102 | # Min_lr 模型的最小学习率,默认为最大学习率的0.01 103 | #------------------------------------------------------------------# 104 | Init_lr = 1e-4 105 | Min_lr = Init_lr * 0.01 106 | #------------------------------------------------------------------# 107 | # optimizer_type 使用到的优化器种类,可选的有adam、adamw、sgd 108 | # momentum 优化器内部使用到的momentum参数 109 | # weight_decay 权值衰减,可防止过拟合 110 | # adam会导致weight_decay错误,使用adam时建议设置为0。 111 | #------------------------------------------------------------------# 112 | optimizer_type = "adamw" 113 | momentum = 0.9 114 | weight_decay = 1e-2 115 | #------------------------------------------------------------------# 116 | # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' 117 | #------------------------------------------------------------------# 118 | lr_decay_type = 'cos' 119 | #------------------------------------------------------------------# 120 | # save_period 多少个epoch保存一次权值 121 | #------------------------------------------------------------------# 122 | save_period = 100 123 | #------------------------------------------------------------------# 124 | # save_dir 权值与日志文件保存的文件夹 125 | #------------------------------------------------------------------# 126 | save_dir = 'logs' 127 | #------------------------------------------------------------------# 128 | # eval_flag 是否在训练时进行评估,评估对象为验证集 129 | # eval_period 代表多少个epoch评估一次,不建议频繁的评估 130 | # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 131 | # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: 132 | # (一)此处获得的mAP为验证集的mAP。 133 | # (二)此处设置评估参数较为保守,目的是加快评估速度。 134 | #------------------------------------------------------------------# 135 | eval_flag = False 136 | eval_period = 10 137 | 138 | #------------------------------------------------------------------# 139 | # VOCdevkit_path 数据集路径 140 | #------------------------------------------------------------------# 141 | VOCdevkit_path = './VOCdevkit' 142 | #------------------------------------------------------------------# 143 | # 建议选项: 144 | # 种类少(几类)时,设置为True 145 | # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True 146 | # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False 147 | #------------------------------------------------------------------# 148 | dice_loss = True 149 | #------------------------------------------------------------------# 150 | # 是否使用focal loss来防止正负样本不平衡 151 | #------------------------------------------------------------------# 152 | focal_loss = True 153 | #------------------------------------------------------------------# 154 | # 是否给不同种类赋予不同的损失权值,默认是平衡的。 155 | # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 156 | # 如: 157 | # num_classes = 3 158 | # cls_weights = np.array([1, 2, 3], np.float32) 159 | #------------------------------------------------------------------# 160 | cls_weights = np.ones([num_classes], np.float32) 161 | #------------------------------------------------------------------# 162 | # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 163 | # 开启后会加快数据读取速度,但是会占用更多内存 164 | # keras里开启多线程有些时候速度反而慢了许多 165 | # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 166 | #------------------------------------------------------------------# 167 | num_workers = 4 168 | 169 | #------------------------------------------------------# 170 | # 设置用到的显卡 171 | #------------------------------------------------------# 172 | ngpus_per_node = torch.cuda.device_count() 173 | if distributed: 174 | dist.init_process_group(backend="nccl") 175 | local_rank = int(os.environ["LOCAL_RANK"]) 176 | rank = int(os.environ["RANK"]) 177 | device = torch.device("cuda", local_rank) 178 | if local_rank == 0: 179 | print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") 180 | print("Gpu Device Count : ", ngpus_per_node) 181 | else: 182 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 183 | local_rank = 0 184 | 185 | #----------------------------------------------------# 186 | # 下载预训练权重 187 | #----------------------------------------------------# 188 | if pretrained: 189 | if distributed: 190 | if local_rank == 0: 191 | download_weights(phi) 192 | dist.barrier() 193 | else: 194 | download_weights(phi) 195 | 196 | model = SegFormer(num_classes=num_classes, phi=phi, pretrained=pretrained) 197 | if not pretrained: 198 | weights_init(model) 199 | if model_path != '': 200 | #------------------------------------------------------# 201 | # 权值文件请看README,百度网盘下载 202 | #------------------------------------------------------# 203 | if local_rank == 0: 204 | print('Load weights {}.'.format(model_path)) 205 | 206 | #------------------------------------------------------# 207 | # 根据预训练权重的Key和模型的Key进行加载 208 | #------------------------------------------------------# 209 | model_dict = model.state_dict() 210 | pretrained_dict = torch.load(model_path, map_location = device) 211 | load_key, no_load_key, temp_dict = [], [], {} 212 | for k, v in pretrained_dict.items(): 213 | if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v): 214 | temp_dict[k] = v 215 | load_key.append(k) 216 | else: 217 | no_load_key.append(k) 218 | model_dict.update(temp_dict) 219 | model.load_state_dict(model_dict) 220 | #------------------------------------------------------# 221 | # 显示没有匹配上的Key 222 | #------------------------------------------------------# 223 | if local_rank == 0: 224 | print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key)) 225 | print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key)) 226 | print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m") 227 | 228 | #----------------------# 229 | # 记录Loss 230 | #----------------------# 231 | if local_rank == 0: 232 | time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S') 233 | log_dir = os.path.join(save_dir, "loss_" + str(time_str)) 234 | loss_history = LossHistory(log_dir, model, input_shape=input_shape) 235 | else: 236 | loss_history = None 237 | 238 | #------------------------------------------------------------------# 239 | # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 240 | # 因此torch1.2这里显示"could not be resolve" 241 | #------------------------------------------------------------------# 242 | if fp16: 243 | from torch.cuda.amp import GradScaler as GradScaler 244 | scaler = GradScaler() 245 | else: 246 | scaler = None 247 | 248 | model_train = model.train() 249 | #----------------------------# 250 | # 多卡同步Bn 251 | #----------------------------# 252 | if sync_bn and ngpus_per_node > 1 and distributed: 253 | model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) 254 | elif sync_bn: 255 | print("Sync_bn is not support in one gpu or not distributed.") 256 | 257 | if Cuda: 258 | if distributed: 259 | #----------------------------# 260 | # 多卡平行运行 261 | #----------------------------# 262 | model_train = model_train.cuda(local_rank) 263 | model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) 264 | else: 265 | model_train = torch.nn.DataParallel(model) 266 | cudnn.benchmark = True 267 | model_train = model_train.cuda() 268 | 269 | #---------------------------# 270 | # 读取数据集对应的txt 271 | #---------------------------# 272 | with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: 273 | train_lines = f.readlines() 274 | with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: 275 | val_lines = f.readlines() 276 | num_train = len(train_lines) 277 | num_val = len(val_lines) 278 | 279 | if local_rank == 0: 280 | show_config( 281 | num_classes = num_classes, phi = phi, model_path = model_path, input_shape = input_shape, \ 282 | Init_Epoch = Init_Epoch, Freeze_Epoch = Freeze_Epoch, UnFreeze_Epoch = UnFreeze_Epoch, Freeze_batch_size = Freeze_batch_size, Unfreeze_batch_size = Unfreeze_batch_size, Freeze_Train = Freeze_Train, \ 283 | Init_lr = Init_lr, Min_lr = Min_lr, optimizer_type = optimizer_type, momentum = momentum, lr_decay_type = lr_decay_type, \ 284 | save_period = save_period, save_dir = save_dir, num_workers = num_workers, num_train = num_train, num_val = num_val 285 | ) 286 | #---------------------------------------------------------# 287 | # 总训练世代指的是遍历全部数据的总次数 288 | # 总训练步长指的是梯度下降的总次数 289 | # 每个训练世代包含若干训练步长,每个训练步长进行一次梯度下降。 290 | # 此处仅建议最低训练世代,上不封顶,计算时只考虑了解冻部分 291 | #----------------------------------------------------------# 292 | wanted_step = 1.5e4 if optimizer_type == "adamw" else 0.5e4 293 | total_step = num_train // Unfreeze_batch_size * UnFreeze_Epoch 294 | if total_step <= wanted_step: 295 | if num_train // Unfreeze_batch_size == 0: 296 | raise ValueError('数据集过小,无法进行训练,请扩充数据集。') 297 | wanted_epoch = wanted_step // (num_train // Unfreeze_batch_size) + 1 298 | print("\n\033[1;33;44m[Warning] 使用%s优化器时,建议将训练总步长设置到%d以上。\033[0m"%(optimizer_type, wanted_step)) 299 | print("\033[1;33;44m[Warning] 本次运行的总训练数据量为%d,Unfreeze_batch_size为%d,共训练%d个Epoch,计算出总训练步长为%d。\033[0m"%(num_train, Unfreeze_batch_size, UnFreeze_Epoch, total_step)) 300 | print("\033[1;33;44m[Warning] 由于总训练步长为%d,小于建议总步长%d,建议设置总世代为%d。\033[0m"%(total_step, wanted_step, wanted_epoch)) 301 | 302 | #------------------------------------------------------# 303 | # 主干特征提取网络特征通用,冻结训练可以加快训练速度 304 | # 也可以在训练初期防止权值被破坏。 305 | # Init_Epoch为起始世代 306 | # Interval_Epoch为冻结训练的世代 307 | # Epoch总训练世代 308 | # 提示OOM或者显存不足请调小Batch_size 309 | #------------------------------------------------------# 310 | if True: 311 | UnFreeze_flag = False 312 | #------------------------------------# 313 | # 冻结一定部分训练 314 | #------------------------------------# 315 | if Freeze_Train: 316 | for param in model.backbone.parameters(): 317 | param.requires_grad = False 318 | 319 | #-------------------------------------------------------------------# 320 | # 如果不冻结训练的话,直接设置batch_size为Unfreeze_batch_size 321 | #-------------------------------------------------------------------# 322 | batch_size = Freeze_batch_size if Freeze_Train else Unfreeze_batch_size 323 | 324 | #-------------------------------------------------------------------# 325 | # 判断当前batch_size,自适应调整学习率 326 | #-------------------------------------------------------------------# 327 | nbs = 16 328 | lr_limit_max = 1e-4 if optimizer_type in ['adam', 'adamw'] else 5e-2 329 | lr_limit_min = 3e-5 if optimizer_type in ['adam', 'adamw'] else 5e-4 330 | Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max) 331 | Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2) 332 | 333 | #---------------------------------------# 334 | # 根据optimizer_type选择优化器 335 | #---------------------------------------# 336 | optimizer = { 337 | 'adam' : optim.Adam(model.parameters(), Init_lr_fit, betas = (momentum, 0.999), weight_decay = weight_decay), 338 | 'adamw' : optim.AdamW(model.parameters(), Init_lr_fit, betas = (momentum, 0.999), weight_decay = weight_decay), 339 | 'sgd' : optim.SGD(model.parameters(), Init_lr_fit, momentum = momentum, nesterov=True, weight_decay = weight_decay) 340 | }[optimizer_type] 341 | 342 | #---------------------------------------# 343 | # 获得学习率下降的公式 344 | #---------------------------------------# 345 | lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch) 346 | 347 | #---------------------------------------# 348 | # 判断每一个世代的长度 349 | #---------------------------------------# 350 | epoch_step = num_train // batch_size 351 | epoch_step_val = num_val // batch_size 352 | 353 | if epoch_step == 0 or epoch_step_val == 0: 354 | raise ValueError("数据集过小,无法继续进行训练,请扩充数据集。") 355 | 356 | train_dataset = SegmentationDataset(train_lines, input_shape, num_classes, True, VOCdevkit_path) 357 | val_dataset = SegmentationDataset(val_lines, input_shape, num_classes, False, VOCdevkit_path) 358 | 359 | if distributed: 360 | train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True,) 361 | val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False,) 362 | batch_size = batch_size // ngpus_per_node 363 | shuffle = False 364 | else: 365 | train_sampler = None 366 | val_sampler = None 367 | shuffle = True 368 | 369 | gen = DataLoader(train_dataset, shuffle = shuffle, batch_size = batch_size, num_workers = num_workers, pin_memory=True, 370 | drop_last = True, collate_fn = seg_dataset_collate, sampler=train_sampler) 371 | gen_val = DataLoader(val_dataset , shuffle = shuffle, batch_size = batch_size, num_workers = num_workers, pin_memory=True, 372 | drop_last = True, collate_fn = seg_dataset_collate, sampler=val_sampler) 373 | 374 | #----------------------# 375 | # 记录eval的map曲线 376 | #----------------------# 377 | if local_rank == 0: 378 | eval_callback = EvalCallback(model, input_shape, num_classes, val_lines, VOCdevkit_path, log_dir, Cuda, \ 379 | eval_flag=eval_flag, period=eval_period) 380 | else: 381 | eval_callback = None 382 | 383 | #---------------------------------------# 384 | # 开始模型训练 385 | #---------------------------------------# 386 | for epoch in range(Init_Epoch, UnFreeze_Epoch): 387 | #---------------------------------------# 388 | # 如果模型有冻结学习部分 389 | # 则解冻,并设置参数 390 | #---------------------------------------# 391 | if epoch >= Freeze_Epoch and not UnFreeze_flag and Freeze_Train: 392 | batch_size = Unfreeze_batch_size 393 | 394 | #-------------------------------------------------------------------# 395 | # 判断当前batch_size,自适应调整学习率 396 | #-------------------------------------------------------------------# 397 | nbs = 16 398 | lr_limit_max = 1e-4 if optimizer_type in ['adam', 'adamw'] else 5e-2 399 | lr_limit_min = 3e-5 if optimizer_type in ['adam', 'adamw'] else 5e-4 400 | Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max) 401 | Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2) 402 | #---------------------------------------# 403 | # 获得学习率下降的公式 404 | #---------------------------------------# 405 | lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch) 406 | 407 | for param in model.backbone.parameters(): 408 | param.requires_grad = True 409 | 410 | epoch_step = num_train // batch_size 411 | epoch_step_val = num_val // batch_size 412 | 413 | if epoch_step == 0 or epoch_step_val == 0: 414 | raise ValueError("数据集过小,无法继续进行训练,请扩充数据集。") 415 | 416 | gen = DataLoader(train_dataset, shuffle = shuffle, batch_size = batch_size, num_workers = num_workers, pin_memory=True, 417 | drop_last = True, collate_fn = seg_dataset_collate, sampler=train_sampler) 418 | gen_val = DataLoader(val_dataset , shuffle = shuffle, batch_size = batch_size, num_workers = num_workers, pin_memory=True, 419 | drop_last = True, collate_fn = seg_dataset_collate, sampler=val_sampler) 420 | 421 | UnFreeze_flag = True 422 | 423 | if distributed: 424 | train_sampler.set_epoch(epoch) 425 | 426 | set_optimizer_lr(optimizer, lr_scheduler_func, epoch) 427 | 428 | fit_one_epoch(model_train, model, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, UnFreeze_Epoch, Cuda, \ 429 | dice_loss, focal_loss, cls_weights, num_classes, fp16, scaler, save_period, save_dir, local_rank) 430 | 431 | if distributed: 432 | dist.barrier() 433 | 434 | if local_rank == 0: 435 | loss_history.writer.close() 436 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/callbacks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/utils/__pycache__/callbacks.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/dataloader.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/utils/__pycache__/dataloader.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/utils/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/utils_fit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/utils/__pycache__/utils_fit.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/utils_metrics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tiany-zhang/MF-SegFormer/009901d87c3911b51d3c3b6b9b7f976c6f8f3d72/utils/__pycache__/utils_metrics.cpython-38.pyc -------------------------------------------------------------------------------- /utils/callbacks.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import matplotlib 4 | import torch 5 | import torch.nn.functional as F 6 | 7 | matplotlib.use('Agg') 8 | from matplotlib import pyplot as plt 9 | import scipy.signal 10 | 11 | import cv2 12 | import shutil 13 | import numpy as np 14 | 15 | from PIL import Image 16 | from tqdm import tqdm 17 | from torch.utils.tensorboard import SummaryWriter 18 | from .utils import cvtColor, preprocess_input, resize_image 19 | from .utils_metrics import compute_mIoU 20 | 21 | 22 | class LossHistory(): 23 | def __init__(self, log_dir, model, input_shape): 24 | self.log_dir = log_dir 25 | self.losses = [] 26 | self.val_loss = [] 27 | 28 | os.makedirs(self.log_dir) 29 | self.writer = SummaryWriter(self.log_dir) 30 | try: 31 | dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1]) 32 | self.writer.add_graph(model, dummy_input) 33 | except: 34 | pass 35 | 36 | def append_loss(self, epoch, loss, val_loss): 37 | if not os.path.exists(self.log_dir): 38 | os.makedirs(self.log_dir) 39 | 40 | self.losses.append(loss) 41 | self.val_loss.append(val_loss) 42 | 43 | with open(os.path.join(self.log_dir, "epoch_loss.txt"), 'a') as f: 44 | f.write(str(loss)) 45 | f.write("\n") 46 | with open(os.path.join(self.log_dir, "epoch_val_loss.txt"), 'a') as f: 47 | f.write(str(val_loss)) 48 | f.write("\n") 49 | 50 | self.writer.add_scalar('loss', loss, epoch) 51 | self.writer.add_scalar('val_loss', val_loss, epoch) 52 | self.loss_plot() 53 | 54 | def loss_plot(self): 55 | iters = range(len(self.losses)) 56 | 57 | plt.figure() 58 | plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss') 59 | plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss') 60 | try: 61 | if len(self.losses) < 25: 62 | num = 5 63 | else: 64 | num = 15 65 | 66 | plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss') 67 | plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss') 68 | except: 69 | pass 70 | 71 | plt.grid(True) 72 | plt.xlabel('Epoch') 73 | plt.ylabel('Loss') 74 | plt.legend(loc="upper right") 75 | 76 | plt.savefig(os.path.join(self.log_dir, "epoch_loss.png")) 77 | 78 | plt.cla() 79 | plt.close("all") 80 | 81 | class EvalCallback(): 82 | def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \ 83 | miou_out_path=".temp_miou_out", eval_flag=True, period=1): 84 | super(EvalCallback, self).__init__() 85 | 86 | self.net = net 87 | self.input_shape = input_shape 88 | self.num_classes = num_classes 89 | self.image_ids = image_ids 90 | self.dataset_path = dataset_path 91 | self.log_dir = log_dir 92 | self.cuda = cuda 93 | self.miou_out_path = miou_out_path 94 | self.eval_flag = eval_flag 95 | self.period = period 96 | 97 | self.image_ids = [image_id.split()[0] for image_id in image_ids] 98 | self.mious = [0] 99 | self.epoches = [0] 100 | if self.eval_flag: 101 | with open(os.path.join(self.log_dir, "epoch_miou.txt"), 'a') as f: 102 | f.write(str(0)) 103 | f.write("\n") 104 | 105 | def get_miou_png(self, image): 106 | #---------------------------------------------------------# 107 | # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。 108 | # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB 109 | #---------------------------------------------------------# 110 | image = cvtColor(image) 111 | orininal_h = np.array(image).shape[0] 112 | orininal_w = np.array(image).shape[1] 113 | #---------------------------------------------------------# 114 | # 给图像增加灰条,实现不失真的resize 115 | # 也可以直接resize进行识别 116 | #---------------------------------------------------------# 117 | image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0])) 118 | #---------------------------------------------------------# 119 | # 添加上batch_size维度 120 | #---------------------------------------------------------# 121 | image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0) 122 | 123 | with torch.no_grad(): 124 | images = torch.from_numpy(image_data) 125 | if self.cuda: 126 | images = images.cuda() 127 | 128 | #---------------------------------------------------# 129 | # 图片传入网络进行预测 130 | #---------------------------------------------------# 131 | pr = self.net(images)[0] 132 | #---------------------------------------------------# 133 | # 取出每一个像素点的种类 134 | #---------------------------------------------------# 135 | pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy() 136 | #--------------------------------------# 137 | # 将灰条部分截取掉 138 | #--------------------------------------# 139 | pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \ 140 | int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)] 141 | #---------------------------------------------------# 142 | # 进行图片的resize 143 | #---------------------------------------------------# 144 | pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR) 145 | #---------------------------------------------------# 146 | # 取出每一个像素点的种类 147 | #---------------------------------------------------# 148 | pr = pr.argmax(axis=-1) 149 | 150 | image = Image.fromarray(np.uint8(pr)) 151 | return image 152 | 153 | def on_epoch_end(self, epoch, model_eval): 154 | if epoch % self.period == 0 and self.eval_flag: 155 | self.net = model_eval 156 | gt_dir = os.path.join(self.dataset_path, "VOC2007/SegmentationClass/") 157 | pred_dir = os.path.join(self.miou_out_path, 'detection-results') 158 | if not os.path.exists(self.miou_out_path): 159 | os.makedirs(self.miou_out_path) 160 | if not os.path.exists(pred_dir): 161 | os.makedirs(pred_dir) 162 | print("Get miou.") 163 | for image_id in tqdm(self.image_ids): 164 | #-------------------------------# 165 | # 从文件中读取图像 166 | #-------------------------------# 167 | image_path = os.path.join(self.dataset_path, "VOC2007/JPEGImages/"+image_id+".png") 168 | image = Image.open(image_path) 169 | #------------------------------# 170 | # 获得预测txt 171 | #------------------------------# 172 | image = self.get_miou_png(image) 173 | image.save(os.path.join(pred_dir, image_id + ".png")) 174 | 175 | print("Calculate miou.") 176 | _, IoUs, _, _ = compute_mIoU(gt_dir, pred_dir, self.image_ids, self.num_classes, None) # 执行计算mIoU的函数 177 | temp_miou = np.nanmean(IoUs) * 100 178 | 179 | self.mious.append(temp_miou) 180 | self.epoches.append(epoch) 181 | 182 | with open(os.path.join(self.log_dir, "epoch_miou.txt"), 'a') as f: 183 | f.write(str(temp_miou)) 184 | f.write("\n") 185 | 186 | plt.figure() 187 | plt.plot(self.epoches, self.mious, 'red', linewidth = 2, label='train miou') 188 | 189 | plt.grid(True) 190 | plt.xlabel('Epoch') 191 | plt.ylabel('Miou') 192 | plt.title('A Miou Curve') 193 | plt.legend(loc="upper right") 194 | 195 | plt.savefig(os.path.join(self.log_dir, "epoch_miou.png")) 196 | plt.cla() 197 | plt.close("all") 198 | 199 | print("Get miou done.") 200 | shutil.rmtree(self.miou_out_path) 201 | -------------------------------------------------------------------------------- /utils/dataloader.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | from PIL import Image 7 | from torch.utils.data.dataset import Dataset 8 | from utils.utils import preprocess_input, cvtColor 9 | 10 | 11 | class SegmentationDataset(Dataset): 12 | def __init__(self, annotation_lines, input_shape, num_classes, train, dataset_path): 13 | super(SegmentationDataset, self).__init__() 14 | self.annotation_lines = annotation_lines 15 | self.length = len(annotation_lines) 16 | self.input_shape = input_shape 17 | self.num_classes = num_classes 18 | self.train = train 19 | self.dataset_path = dataset_path 20 | 21 | def __len__(self): 22 | return self.length 23 | 24 | def __getitem__(self, index): 25 | annotation_line = self.annotation_lines[index] 26 | name = annotation_line.split()[0] 27 | 28 | #-------------------------------# 29 | # 从文件中读取图像 30 | #-------------------------------# 31 | jpg = Image.open(os.path.join(os.path.join(self.dataset_path, "VOC2007/JPEGImages"), name + ".png")) 32 | png = Image.open(os.path.join(os.path.join(self.dataset_path, "VOC2007/SegmentationClass"), name + ".png")) 33 | #-------------------------------# 34 | # 数据增强 35 | #-------------------------------# 36 | jpg, png = self.get_random_data(jpg, png, self.input_shape, random = self.train) 37 | jpg = np.transpose(preprocess_input(np.array(jpg, np.float64)), [2,0,1]) 38 | png = np.array(png) 39 | 40 | 41 | png[png >= self.num_classes] = self.num_classes 42 | #-------------------------------------------------------# 43 | # 转化成one_hot的形式 44 | # 在这里需要+1是因为voc数据集有些标签具有白边部分 45 | # 我们需要将白边部分进行忽略,+1的目的是方便忽略。 46 | #-------------------------------------------------------# 47 | seg_labels = np.eye(self.num_classes + 1)[png.reshape([-1])] 48 | seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1)) 49 | 50 | return jpg, png, seg_labels 51 | 52 | def rand(self, a=0, b=1): 53 | return np.random.rand() * (b - a) + a 54 | 55 | def get_random_data(self, image, label, input_shape, jitter=.3, hue=.1, sat=0.7, val=0.3, random=True): 56 | image = cvtColor(image) 57 | label = np.array(label) 58 | label = label[:,:,0] 59 | label = label//255.0 60 | label = Image.fromarray(np.array(label)) 61 | #------------------------------# 62 | # 获得图像的高宽与目标高宽 63 | #------------------------------# 64 | iw, ih = image.size 65 | h, w = input_shape 66 | 67 | if not random: 68 | iw, ih = image.size 69 | scale = min(w/iw, h/ih) 70 | nw = int(iw*scale) 71 | nh = int(ih*scale) 72 | 73 | image = image.resize((nw,nh), Image.BICUBIC) 74 | new_image = Image.new('RGB', [w, h], (128,128,128)) 75 | new_image.paste(image, ((w-nw)//2, (h-nh)//2)) 76 | 77 | label = label.resize((nw,nh), Image.NEAREST) 78 | new_label = Image.new('L', [w, h], (0)) 79 | new_label.paste(label, ((w-nw)//2, (h-nh)//2)) 80 | return new_image, new_label 81 | 82 | #------------------------------------------# 83 | # 对图像进行缩放并且进行长和宽的扭曲 84 | #------------------------------------------# 85 | new_ar = iw/ih * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter) 86 | scale = self.rand(0.5, 2) 87 | if new_ar < 1: 88 | nh = int(scale*h) 89 | nw = int(nh*new_ar) 90 | else: 91 | nw = int(scale*w) 92 | nh = int(nw/new_ar) 93 | image = image.resize((nw,nh), Image.BICUBIC) 94 | label = label.resize((nw,nh), Image.NEAREST) 95 | 96 | #------------------------------------------# 97 | # 翻转图像 98 | #------------------------------------------# 99 | flip = self.rand()<.5 100 | if flip: 101 | image = image.transpose(Image.FLIP_LEFT_RIGHT) 102 | label = label.transpose(Image.FLIP_LEFT_RIGHT) 103 | 104 | #------------------------------------------# 105 | # 将图像多余的部分加上灰条 106 | #------------------------------------------# 107 | dx = int(self.rand(0, w-nw)) 108 | dy = int(self.rand(0, h-nh)) 109 | new_image = Image.new('RGB', (w,h), (128,128,128)) 110 | new_label = Image.new('L', (w,h), (0)) 111 | new_image.paste(image, (dx, dy)) 112 | new_label.paste(label, (dx, dy)) 113 | image = new_image 114 | label = new_label 115 | 116 | image_data = np.array(image, np.uint8) 117 | #------------------------------------------# 118 | # 高斯模糊 119 | #------------------------------------------# 120 | blur = self.rand() < 0.25 121 | if blur: 122 | image_data = cv2.GaussianBlur(image_data, (5, 5), 0) 123 | 124 | #------------------------------------------# 125 | # 旋转 126 | #------------------------------------------# 127 | rotate = self.rand() < 0.25 128 | if rotate: 129 | center = (w // 2, h // 2) 130 | rotation = np.random.randint(-10, 11) 131 | M = cv2.getRotationMatrix2D(center, -rotation, scale=1) 132 | image_data = cv2.warpAffine(image_data, M, (w, h), flags=cv2.INTER_CUBIC, borderValue=(128,128,128)) 133 | label = cv2.warpAffine(np.array(label, np.uint8), M, (w, h), flags=cv2.INTER_NEAREST, borderValue=(0)) 134 | 135 | #---------------------------------# 136 | # 对图像进行色域变换 137 | # 计算色域变换的参数 138 | #---------------------------------# 139 | r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1 140 | #---------------------------------# 141 | # 将图像转到HSV上 142 | #---------------------------------# 143 | hue, sat, val = cv2.split(cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV)) 144 | dtype = image_data.dtype 145 | #---------------------------------# 146 | # 应用变换 147 | #---------------------------------# 148 | x = np.arange(0, 256, dtype=r.dtype) 149 | lut_hue = ((x * r[0]) % 180).astype(dtype) 150 | lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) 151 | lut_val = np.clip(x * r[2], 0, 255).astype(dtype) 152 | 153 | image_data = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) 154 | image_data = cv2.cvtColor(image_data, cv2.COLOR_HSV2RGB) 155 | 156 | return image_data, label 157 | 158 | 159 | def seg_dataset_collate(batch): 160 | images = [] 161 | pngs = [] 162 | seg_labels = [] 163 | for img, png, labels in batch: 164 | images.append(img) 165 | pngs.append(png) 166 | seg_labels.append(labels) 167 | images = torch.from_numpy(np.array(images)).type(torch.FloatTensor) 168 | pngs = torch.from_numpy(np.array(pngs)).long() 169 | seg_labels = torch.from_numpy(np.array(seg_labels)).type(torch.FloatTensor) 170 | return images, pngs, seg_labels 171 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PIL import Image 3 | 4 | #---------------------------------------------------------# 5 | # 将图像转换成RGB图像,防止灰度图在预测时报错。 6 | # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB 7 | #---------------------------------------------------------# 8 | def cvtColor(image): 9 | if len(np.shape(image)) == 3 and np.shape(image)[2] == 3: 10 | return image 11 | else: 12 | image = image.convert('RGB') 13 | return image 14 | 15 | #---------------------------------------------------# 16 | # 对输入图像进行resize 17 | #---------------------------------------------------# 18 | def resize_image(image, size): 19 | iw, ih = image.size 20 | w, h = size 21 | 22 | scale = min(w/iw, h/ih) 23 | nw = int(iw*scale) 24 | nh = int(ih*scale) 25 | 26 | image = image.resize((nw,nh), Image.BICUBIC) 27 | new_image = Image.new('RGB', size, (128,128,128)) 28 | new_image.paste(image, ((w-nw)//2, (h-nh)//2)) 29 | 30 | return new_image, nw, nh 31 | 32 | #---------------------------------------------------# 33 | # 获得学习率 34 | #---------------------------------------------------# 35 | def get_lr(optimizer): 36 | for param_group in optimizer.param_groups: 37 | return param_group['lr'] 38 | 39 | def preprocess_input(image): 40 | image -= np.array([123.675, 116.28, 103.53], np.float32) 41 | image /= np.array([58.395, 57.12, 57.375], np.float32) 42 | return image 43 | 44 | def show_config(**kwargs): 45 | print('Configurations:') 46 | print('-' * 70) 47 | print('|%25s | %40s|' % ('keys', 'values')) 48 | print('-' * 70) 49 | for key, value in kwargs.items(): 50 | print('|%25s | %40s|' % (str(key), str(value))) 51 | print('-' * 70) 52 | 53 | def download_weights(phi, model_dir="./model_data"): 54 | import os 55 | from torch.hub import load_state_dict_from_url 56 | 57 | download_urls = { 58 | 'b0' : "https://github.com/bubbliiiing/segformer-pytorch/releases/download/v1.0/segformer_b0_backbone_weights.pth", 59 | 'b1' : "https://github.com/bubbliiiing/segformer-pytorch/releases/download/v1.0/segformer_b1_backbone_weights.pth", 60 | 'b2' : "https://github.com/bubbliiiing/segformer-pytorch/releases/download/v1.0/segformer_b2_backbone_weights.pth", 61 | 'b3' : "https://github.com/bubbliiiing/segformer-pytorch/releases/download/v1.0/segformer_b3_backbone_weights.pth", 62 | 'b4' : "https://github.com/bubbliiiing/segformer-pytorch/releases/download/v1.0/segformer_b4_backbone_weights.pth", 63 | 'b5' : "https://github.com/bubbliiiing/segformer-pytorch/releases/download/v1.0/segformer_b5_backbone_weights.pth", 64 | } 65 | url = download_urls[phi] 66 | 67 | if not os.path.exists(model_dir): 68 | os.makedirs(model_dir) 69 | load_state_dict_from_url(url, model_dir) -------------------------------------------------------------------------------- /utils/utils_fit.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from nets.segformer_training import (CE_Loss, Dice_loss, Focal_Loss, 5 | weights_init) 6 | from tqdm import tqdm 7 | 8 | from utils.utils import get_lr 9 | from utils.utils_metrics import f_score 10 | 11 | 12 | def fit_one_epoch(model_train, model, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, dice_loss, focal_loss, cls_weights, num_classes, fp16, scaler, save_period, save_dir, local_rank=0): 13 | total_loss = 0 14 | total_f_score = 0 15 | 16 | val_loss = 0 17 | val_f_score = 0 18 | 19 | if local_rank == 0: 20 | print('Start Train') 21 | pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) 22 | model_train.train() 23 | for iteration, batch in enumerate(gen): 24 | if iteration >= epoch_step: 25 | break 26 | imgs, pngs, labels = batch 27 | with torch.no_grad(): 28 | weights = torch.from_numpy(cls_weights) 29 | if cuda: 30 | imgs = imgs.cuda(local_rank) 31 | pngs = pngs.cuda(local_rank) 32 | labels = labels.cuda(local_rank) 33 | weights = weights.cuda(local_rank) 34 | 35 | optimizer.zero_grad() 36 | if not fp16: 37 | #----------------------# 38 | # 前向传播 39 | #----------------------# 40 | outputs = model_train(imgs) 41 | #----------------------# 42 | # 计算损失 43 | #----------------------# 44 | if focal_loss: 45 | loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes) 46 | else: 47 | loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes) 48 | 49 | if dice_loss: 50 | main_dice = Dice_loss(outputs, labels) 51 | loss = loss + main_dice 52 | 53 | with torch.no_grad(): 54 | #-------------------------------# 55 | # 计算f_score 56 | #-------------------------------# 57 | _f_score = f_score(outputs, labels) 58 | 59 | loss.backward() 60 | optimizer.step() 61 | else: 62 | from torch.cuda.amp import autocast 63 | with autocast(): 64 | #----------------------# 65 | # 前向传播 66 | #----------------------# 67 | outputs = model_train(imgs) 68 | #----------------------# 69 | # 计算损失 70 | #----------------------# 71 | if focal_loss: 72 | loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes) 73 | else: 74 | loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes) 75 | 76 | if dice_loss: 77 | main_dice = Dice_loss(outputs, labels) 78 | loss = loss + main_dice 79 | 80 | with torch.no_grad(): 81 | #-------------------------------# 82 | # 计算f_score 83 | #-------------------------------# 84 | _f_score = f_score(outputs, labels) 85 | 86 | #----------------------# 87 | # 反向传播 88 | #----------------------# 89 | scaler.scale(loss).backward() 90 | scaler.step(optimizer) 91 | scaler.update() 92 | 93 | total_loss += loss.item() 94 | total_f_score += _f_score.item() 95 | 96 | if local_rank == 0: 97 | pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1), 98 | 'f_score' : total_f_score / (iteration + 1), 99 | 'lr' : get_lr(optimizer)}) 100 | pbar.update(1) 101 | 102 | if local_rank == 0: 103 | pbar.close() 104 | print('Finish Train') 105 | print('Start Validation') 106 | pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) 107 | 108 | model_train.eval() 109 | for iteration, batch in enumerate(gen_val): 110 | if iteration >= epoch_step_val: 111 | break 112 | imgs, pngs, labels = batch 113 | with torch.no_grad(): 114 | weights = torch.from_numpy(cls_weights) 115 | if cuda: 116 | imgs = imgs.cuda(local_rank) 117 | pngs = pngs.cuda(local_rank) 118 | labels = labels.cuda(local_rank) 119 | weights = weights.cuda(local_rank) 120 | 121 | #----------------------# 122 | # 前向传播 123 | #----------------------# 124 | outputs = model_train(imgs) 125 | #----------------------# 126 | # 损失计算 127 | #----------------------# 128 | if focal_loss: 129 | loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes) 130 | else: 131 | loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes) 132 | 133 | if dice_loss: 134 | main_dice = Dice_loss(outputs, labels) 135 | loss = loss + main_dice 136 | #-------------------------------# 137 | # 计算f_score 138 | #-------------------------------# 139 | _f_score = f_score(outputs, labels) 140 | 141 | val_loss += loss.item() 142 | val_f_score += _f_score.item() 143 | 144 | if local_rank == 0: 145 | pbar.set_postfix(**{'val_loss' : val_loss / (iteration + 1), 146 | 'f_score' : val_f_score / (iteration + 1), 147 | 'lr' : get_lr(optimizer)}) 148 | pbar.update(1) 149 | 150 | if local_rank == 0: 151 | pbar.close() 152 | print('Finish Validation') 153 | loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val) 154 | eval_callback.on_epoch_end(epoch + 1, model_train) 155 | print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch)) 156 | print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val)) 157 | 158 | #-----------------------------------------------# 159 | # 保存权值 160 | #-----------------------------------------------# 161 | if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch: 162 | torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth'%((epoch + 1), total_loss / epoch_step, val_loss / epoch_step_val))) 163 | 164 | if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss): 165 | print('Save best model to best_epoch_weights.pth') 166 | torch.save(model.state_dict(), os.path.join(save_dir, "best_epoch_weights.pth")) 167 | 168 | torch.save(model.state_dict(), os.path.join(save_dir, "last_epoch_weights.pth")) 169 | -------------------------------------------------------------------------------- /utils/utils_metrics.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import os 3 | from os.path import join 4 | 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import torch 8 | import torch.nn.functional as F 9 | from PIL import Image 10 | 11 | 12 | def f_score(inputs, target, beta=1, smooth = 1e-5, threhold = 0.5): 13 | n, c, h, w = inputs.size() 14 | nt, ht, wt, ct = target.size() 15 | if h != ht and w != wt: 16 | inputs = F.interpolate(inputs, size=(ht, wt), mode="bilinear", align_corners=True) 17 | 18 | temp_inputs = torch.softmax(inputs.transpose(1, 2).transpose(2, 3).contiguous().view(n, -1, c),-1) 19 | temp_target = target.view(n, -1, ct) 20 | 21 | #--------------------------------------------# 22 | # 计算dice系数 23 | #--------------------------------------------# 24 | temp_inputs = torch.gt(temp_inputs, threhold).float() 25 | tp = torch.sum(temp_target[...,:-1] * temp_inputs, axis=[0,1]) 26 | fp = torch.sum(temp_inputs , axis=[0,1]) - tp 27 | fn = torch.sum(temp_target[...,:-1] , axis=[0,1]) - tp 28 | 29 | score = ((1 + beta ** 2) * tp + smooth) / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth) 30 | score = torch.mean(score) 31 | return score 32 | 33 | # 设标签宽W,长H 34 | def fast_hist(a, b, n): 35 | #--------------------------------------------------------------------------------# 36 | # a是转化成一维数组的标签,形状(H×W,);b是转化成一维数组的预测结果,形状(H×W,) 37 | #--------------------------------------------------------------------------------# 38 | k = (a >= 0) & (a < n) 39 | #--------------------------------------------------------------------------------# 40 | # np.bincount计算了从0到n**2-1这n**2个数中每个数出现的次数,返回值形状(n, n) 41 | # 返回中,写对角线上的为分类正确的像素点 42 | #--------------------------------------------------------------------------------# 43 | return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n) 44 | 45 | def per_class_iu(hist): 46 | return np.diag(hist) / np.maximum((hist.sum(1) + hist.sum(0) - np.diag(hist)), 1) 47 | 48 | def per_class_PA_Recall(hist): 49 | return np.diag(hist) / np.maximum(hist.sum(1), 1) 50 | 51 | def per_class_Precision(hist): 52 | return np.diag(hist) / np.maximum(hist.sum(0), 1) 53 | 54 | def per_Accuracy(hist): 55 | return np.sum(np.diag(hist)) / np.maximum(np.sum(hist), 1) 56 | 57 | def compute_mIoU(gt_dir, pred_dir, png_name_list, num_classes, name_classes=None): 58 | print('Num classes', num_classes) 59 | #-----------------------------------------# 60 | # 创建一个全是0的矩阵,是一个混淆矩阵 61 | #-----------------------------------------# 62 | hist = np.zeros((num_classes, num_classes)) 63 | 64 | #------------------------------------------------# 65 | # 获得验证集标签路径列表,方便直接读取 66 | # 获得验证集图像分割结果路径列表,方便直接读取 67 | #------------------------------------------------# 68 | gt_imgs = [join(gt_dir, x + ".png") for x in png_name_list] 69 | pred_imgs = [join(pred_dir, x + ".png") for x in png_name_list] 70 | 71 | #------------------------------------------------# 72 | # 读取每一个(图片-标签)对 73 | #------------------------------------------------# 74 | for ind in range(len(gt_imgs)): 75 | #------------------------------------------------# 76 | # 读取一张图像分割结果,转化成numpy数组 77 | #------------------------------------------------# 78 | pred = np.array(Image.open(pred_imgs[ind])) 79 | #------------------------------------------------# 80 | # 读取一张对应的标签,转化成numpy数组 81 | #------------------------------------------------# 82 | label = np.array(Image.open(gt_imgs[ind])) 83 | 84 | # 如果图像分割结果与标签的大小不一样,这张图片就不计算 85 | if len(label.flatten()) != len(pred.flatten()): 86 | print( 87 | 'Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format( 88 | len(label.flatten()), len(pred.flatten()), gt_imgs[ind], 89 | pred_imgs[ind])) 90 | continue 91 | 92 | #------------------------------------------------# 93 | # 对一张图片计算21×21的hist矩阵,并累加 94 | #------------------------------------------------# 95 | hist += fast_hist(label.flatten(), pred.flatten(), num_classes) 96 | # 每计算10张就输出一下目前已计算的图片中所有类别平均的mIoU值 97 | if name_classes is not None and ind > 0 and ind % 10 == 0: 98 | print('{:d} / {:d}: mIou-{:0.2f}%; mPA-{:0.2f}%; Accuracy-{:0.2f}%'.format( 99 | ind, 100 | len(gt_imgs), 101 | 100 * np.nanmean(per_class_iu(hist)), 102 | 100 * np.nanmean(per_class_PA_Recall(hist)), 103 | 100 * per_Accuracy(hist) 104 | ) 105 | ) 106 | #------------------------------------------------# 107 | # 计算所有验证集图片的逐类别mIoU值 108 | #------------------------------------------------# 109 | IoUs = per_class_iu(hist) 110 | PA_Recall = per_class_PA_Recall(hist) 111 | Precision = per_class_Precision(hist) 112 | #------------------------------------------------# 113 | # 逐类别输出一下mIoU值 114 | #------------------------------------------------# 115 | if name_classes is not None: 116 | for ind_class in range(num_classes): 117 | print('===>' + name_classes[ind_class] + ':\tIou-' + str(round(IoUs[ind_class] * 100, 2)) \ 118 | + '; Recall (equal to the PA)-' + str(round(PA_Recall[ind_class] * 100, 2))+ '; Precision-' + str(round(Precision[ind_class] * 100, 2))) 119 | 120 | #-----------------------------------------------------------------# 121 | # 在所有验证集图像上求所有类别平均的mIoU值,计算时忽略NaN值 122 | #-----------------------------------------------------------------# 123 | print('===> mIoU: ' + str(round(np.nanmean(IoUs) * 100, 2)) + '; mPA: ' + str(round(np.nanmean(PA_Recall) * 100, 2)) + '; Accuracy: ' + str(round(per_Accuracy(hist) * 100, 2))) 124 | return np.array(hist, np.int), IoUs, PA_Recall, Precision 125 | 126 | def adjust_axes(r, t, fig, axes): 127 | bb = t.get_window_extent(renderer=r) 128 | text_width_inches = bb.width / fig.dpi 129 | current_fig_width = fig.get_figwidth() 130 | new_fig_width = current_fig_width + text_width_inches 131 | propotion = new_fig_width / current_fig_width 132 | x_lim = axes.get_xlim() 133 | axes.set_xlim([x_lim[0], x_lim[1] * propotion]) 134 | 135 | def draw_plot_func(values, name_classes, plot_title, x_label, output_path, tick_font_size = 12, plt_show = True): 136 | fig = plt.gcf() 137 | axes = plt.gca() 138 | plt.barh(range(len(values)), values, color='royalblue') 139 | plt.title(plot_title, fontsize=tick_font_size + 2) 140 | plt.xlabel(x_label, fontsize=tick_font_size) 141 | plt.yticks(range(len(values)), name_classes, fontsize=tick_font_size) 142 | r = fig.canvas.get_renderer() 143 | for i, val in enumerate(values): 144 | str_val = " " + str(val) 145 | if val < 1.0: 146 | str_val = " {0:.2f}".format(val) 147 | t = plt.text(val, i, str_val, color='royalblue', va='center', fontweight='bold') 148 | if i == (len(values)-1): 149 | adjust_axes(r, t, fig, axes) 150 | 151 | fig.tight_layout() 152 | fig.savefig(output_path) 153 | if plt_show: 154 | plt.show() 155 | plt.close() 156 | 157 | def show_results(miou_out_path, hist, IoUs, PA_Recall, Precision, name_classes, tick_font_size = 12): 158 | draw_plot_func(IoUs, name_classes, "mIoU = {0:.2f}%".format(np.nanmean(IoUs)*100), "Intersection over Union", \ 159 | os.path.join(miou_out_path, "mIoU.png"), tick_font_size = tick_font_size, plt_show = True) 160 | print("Save mIoU out to " + os.path.join(miou_out_path, "mIoU.png")) 161 | 162 | draw_plot_func(PA_Recall, name_classes, "mPA = {0:.2f}%".format(np.nanmean(PA_Recall)*100), "Pixel Accuracy", \ 163 | os.path.join(miou_out_path, "mPA.png"), tick_font_size = tick_font_size, plt_show = False) 164 | print("Save mPA out to " + os.path.join(miou_out_path, "mPA.png")) 165 | 166 | draw_plot_func(PA_Recall, name_classes, "mRecall = {0:.2f}%".format(np.nanmean(PA_Recall)*100), "Recall", \ 167 | os.path.join(miou_out_path, "Recall.png"), tick_font_size = tick_font_size, plt_show = False) 168 | print("Save Recall out to " + os.path.join(miou_out_path, "Recall.png")) 169 | 170 | draw_plot_func(Precision, name_classes, "mPrecision = {0:.2f}%".format(np.nanmean(Precision)*100), "Precision", \ 171 | os.path.join(miou_out_path, "Precision.png"), tick_font_size = tick_font_size, plt_show = False) 172 | print("Save Precision out to " + os.path.join(miou_out_path, "Precision.png")) 173 | 174 | with open(os.path.join(miou_out_path, "confusion_matrix.csv"), 'w', newline='') as f: 175 | writer = csv.writer(f) 176 | writer_list = [] 177 | writer_list.append([' '] + [str(c) for c in name_classes]) 178 | for i in range(len(hist)): 179 | writer_list.append([name_classes[i]] + [str(x) for x in hist[i]]) 180 | writer.writerows(writer_list) 181 | print("Save confusion_matrix out to " + os.path.join(miou_out_path, "confusion_matrix.csv")) 182 | -------------------------------------------------------------------------------- /voc_annotation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | 4 | import numpy as np 5 | from PIL import Image 6 | from tqdm import tqdm 7 | 8 | #-------------------------------------------------------# 9 | # 想要增加测试集修改trainval_percent 10 | # 修改train_percent用于改变验证集的比例 9:1 11 | # 12 | # 当前该库将测试集当作验证集使用,不单独划分测试集 13 | #-------------------------------------------------------# 14 | trainval_percent = 1 15 | train_percent = 0.9 16 | #-------------------------------------------------------# 17 | # 指向VOC数据集所在的文件夹 18 | # 默认指向根目录下的VOC数据集 19 | #-------------------------------------------------------# 20 | VOCdevkit_path = './VOCdevkit' 21 | 22 | if __name__ == "__main__": 23 | random.seed(0) 24 | print("Generate txt in ImageSets.") 25 | segfilepath = os.path.join(VOCdevkit_path, 'VOC2007/SegmentationClass') 26 | saveBasePath = os.path.join(VOCdevkit_path, 'VOC2007/ImageSets/Segmentation') 27 | 28 | temp_seg = os.listdir(segfilepath) 29 | total_seg = [] 30 | for seg in temp_seg: 31 | if seg.endswith(".png"): 32 | total_seg.append(seg) 33 | 34 | num = len(total_seg) 35 | list = range(num) 36 | tv = int(num*trainval_percent) 37 | tr = int(tv*train_percent) 38 | trainval= random.sample(list,tv) 39 | train = random.sample(trainval,tr) 40 | 41 | print("train and val size",tv) 42 | print("traub suze",tr) 43 | ftrainval = open(os.path.join(saveBasePath,'trainval.txt'), 'w') 44 | ftest = open(os.path.join(saveBasePath,'test.txt'), 'w') 45 | ftrain = open(os.path.join(saveBasePath,'train.txt'), 'w') 46 | fval = open(os.path.join(saveBasePath,'val.txt'), 'w') 47 | 48 | for i in list: 49 | name = total_seg[i][:-4]+'\n' 50 | if i in trainval: 51 | ftrainval.write(name) 52 | if i in train: 53 | ftrain.write(name) 54 | else: 55 | fval.write(name) 56 | else: 57 | ftest.write(name) 58 | 59 | ftrainval.close() 60 | ftrain.close() 61 | fval.close() 62 | ftest.close() 63 | print("Generate txt in ImageSets done.") 64 | 65 | print("Check datasets format, this may take a while.") 66 | print("检查数据集格式是否符合要求,这可能需要一段时间。") 67 | classes_nums = np.zeros([256], np.int) 68 | for i in tqdm(list): 69 | name = total_seg[i] 70 | png_file_name = os.path.join(segfilepath, name) 71 | if not os.path.exists(png_file_name): 72 | raise ValueError("未检测到标签图片%s,请查看具体路径下文件是否存在以及后缀是否为png。"%(png_file_name)) 73 | 74 | png = np.array(Image.open(png_file_name), np.uint8) 75 | if len(np.shape(png)) > 2: 76 | print("标签图片%s的shape为%s,不属于灰度图或者八位彩图,请仔细检查数据集格式。"%(name, str(np.shape(png)))) 77 | print("标签图片需要为灰度图或者八位彩图,标签的每个像素点的值就是这个像素点所属的种类。"%(name, str(np.shape(png)))) 78 | 79 | classes_nums += np.bincount(np.reshape(png, [-1]), minlength=256) 80 | 81 | print("打印像素点的值与数量。") 82 | print('-' * 37) 83 | print("| %15s | %15s |"%("Key", "Value")) 84 | print('-' * 37) 85 | for i in range(256): 86 | if classes_nums[i] > 0: 87 | print("| %15s | %15s |"%(str(i), str(classes_nums[i]))) 88 | print('-' * 37) 89 | 90 | if classes_nums[255] > 0 and classes_nums[0] > 0 and np.sum(classes_nums[1:255]) == 0: 91 | print("检测到标签中像素点的值仅包含0与255,数据格式有误。") 92 | print("二分类问题需要将标签修改为背景的像素点值为0,目标的像素点值为1。") 93 | elif classes_nums[0] > 0 and np.sum(classes_nums[1:]) == 0: 94 | print("检测到标签中仅仅包含背景像素点,数据格式有误,请仔细检查数据集格式。") 95 | 96 | print("JPEGImages中的图片应当为.jpg文件、SegmentationClass中的图片应当为.png文件。") --------------------------------------------------------------------------------