├── .gitignore ├── EXPERIMENTS_CONBUILDMAT.sh ├── LICENSE.md ├── README.md ├── config.py ├── data ├── __init__.py ├── dataset.py ├── dataset_catalog.py ├── input_crack_segmentation.py ├── input_dagm.py ├── input_ksdd.py ├── input_ksdd2.py ├── input_sccdnet_dataset.py └── input_steel.py ├── datasets └── README.md ├── end2end.py ├── environment.yml ├── eval_speed.py ├── evaluate_output.py ├── models.py ├── requirements.txt ├── res ├── intro_wide.png └── segmentation.png ├── runs.txt ├── splits └── SCCDNet │ ├── test_CFD.txt │ ├── test_DeepCrack.txt │ ├── test_GAPS.txt │ ├── test_cracktree200.txt │ ├── test_forest.txt │ ├── test_noncrack.txt │ └── test_rissbilder.txt ├── train_net.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ -------------------------------------------------------------------------------- /EXPERIMENTS_CONBUILDMAT.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DATASET_ROOT=./SCCDNet-corrected 4 | RESULTS=./RESULTS 5 | 6 | TRAIN_ARGS=(--LEARNING_RATE=0.001 7 | --AUGMENTATION=True 8 | --BATCH_SIZE=10 9 | --BCE_LOSS_W=True 10 | --BEST_MODEL_TYPE=both 11 | --DELTA_CLS_LOSS=0.1 12 | --DILATE=1 13 | --DYN_BALANCED_LOSS=False 14 | --EPOCHS=200 15 | --NUM_SEGMENTED=6164 16 | --FREQUENCY_SAMPLING=False 17 | --GPU=0 18 | --GRADIENT_ADJUSTMENT=True 19 | --DATASET=sccdnet 20 | --DATASET_PATH="${DATASET_ROOT}" 21 | --ON_DEMAND_READ=False 22 | --PXL_DISTANCE=2 23 | --RESULTS_PATH="${RESULTS}" 24 | --SAVE_IMAGES=True 25 | --SEG_BLACK=False 26 | --USE_BEST_MODEL=True 27 | --VALIDATE=True 28 | --VALIDATE_ON_TEST=True 29 | --VALIDATION_N_EPOCHS=2 30 | --WEIGHTED_SEG_LOSS=False) 31 | 32 | mkdir -p ./RESULTS/sccdnet/ 33 | 34 | # shellcheck disable=SC2068 35 | CUDA_VISIBLE_DEVICES=0 python -u train_net.py --RUN_NAME=segdecnet++_run_1 --REPRODUCIBLE_RUN=101 ${TRAIN_ARGS[@]} > $RESULTS/sccdnet/paper_sccdnet_1.out 2>&1 & 36 | CUDA_VISIBLE_DEVICES=1 python -u train_net.py --RUN_NAME=segdecnet++_run_2 --REPRODUCIBLE_RUN=102 ${TRAIN_ARGS[@]} > $RESULTS/sccdnet/paper_sccdnet_2.out 2>&1 & 37 | CUDA_VISIBLE_DEVICES=2 python -u train_net.py --RUN_NAME=segdecnet++_run_3 --REPRODUCIBLE_RUN=103 ${TRAIN_ARGS[@]} > $RESULTS/sccdnet/paper_sccdnet_3.out 2>&1 & 38 | CUDA_VISIBLE_DEVICES=3 python -u train_net.py --RUN_NAME=segdecnet++_run_4 --REPRODUCIBLE_RUN=104 ${TRAIN_ARGS[@]} > $RESULTS/sccdnet/paper_sccdnet_4.out 2>&1 & 39 | CUDA_VISIBLE_DEVICES=4 python -u train_net.py --RUN_NAME=segdecnet++_run_5 --REPRODUCIBLE_RUN=105 ${TRAIN_ARGS[@]} > $RESULTS/sccdnet/paper_sccdnet_5.out 2>&1 & 40 | wait 41 | 42 | ######################################################################################################################################## 43 | # evaluate the best model based on stored output 44 | 45 | echo "SegDecNet++ for Segmentation (segdecnet++_run_1/seg_pred_bin)" 46 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -pred_dir $RESULTS/sccdnet/segdecnet++_run_1/seg_pred_bin -threshold 0.5 47 | 48 | echo "SegDecNet++ for Segmentation (segdecnet++_run_2/seg_pred_bin)" 49 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -pred_dir $RESULTS/sccdnet/segdecnet++_run_2/seg_pred_bin -threshold 0.5 50 | 51 | echo "SegDecNet++ for Segmentation (segdecnet++_run_3/seg_pred_bin)" 52 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -pred_dir $RESULTS/sccdnet/segdecnet++_run_3/seg_pred_bin -threshold 0.5 53 | 54 | echo "SegDecNet++ for Segmentation (segdecnet++_run_4/seg_pred_bin)" 55 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -pred_dir $RESULTS/sccdnet/segdecnet++_run_4/seg_pred_bin -threshold 0.5 56 | 57 | echo "SegDecNet++ for Segmentation (segdecnet++_run_5/seg_pred_bin)" 58 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -pred_dir $RESULTS/sccdnet/segdecnet++_run_5/seg_pred_bin -threshold 0.5 59 | 60 | ######################################################################################################################################## 61 | # evaluate by dataset source 62 | 63 | for subset in CFD cracktree200 DeepCrack forest GAPS rissbilder noncrack; do 64 | echo "########################################################## $subset ##########################################################" 65 | 66 | echo "SegDecNet for Segmentation (segdecnet++_run_1/seg_pred_bin)" 67 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -gt_filenames ./splits/SCCDNet/test_$subset.txt -pred_dir $RESULTS/sccdnet/segdecnet++_run_1/seg_pred_bin -threshold 0.5 68 | 69 | echo "SegDecNet for Segmentation (segdecnet++_run_2/seg_pred_bin)" 70 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -gt_filenames ./splits/SCCDNet/test_$subset.txt -pred_dir $RESULTS/sccdnet/segdecnet++_run_2/seg_pred_bin -threshold 0.5 71 | 72 | echo "SegDecNet for Segmentation (segdecnet++_run_3/seg_pred_bin)" 73 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -gt_filenames ./splits/SCCDNet/test_$subset.txt -pred_dir $RESULTS/sccdnet/segdecnet++_run_3/seg_pred_bin -threshold 0.5 74 | 75 | echo "SegDecNet for Segmentation (segdecnet++_run_4/seg_pred_bin)" 76 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -gt_filenames ./splits/SCCDNet/test_$subset.txt -pred_dir $RESULTS/sccdnet/segdecnet++_run_4/seg_pred_bin -threshold 0.5 77 | 78 | echo "SegDecNet for Segmentation (segdecnet++_run_5/seg_pred_bin)" 79 | python evaluate_output.py -ground_truth_dir $DATASET_ROOT/test/masks -gt_filenames ./splits/SCCDNet/test_$subset.txt -pred_dir $RESULTS/sccdnet/segdecnet++_run_5/seg_pred_bin -threshold 0.5 80 | 81 | done 82 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 4 | 5 | ### Using Creative Commons Public Licenses 6 | 7 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 8 | 9 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). 10 | 11 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). 12 | 13 | ## Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License 14 | 15 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 16 | 17 | ### Section 1 – Definitions. 18 | 19 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 20 | 21 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 22 | 23 | c. __BY-NC-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License. 24 | 25 | d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 26 | 27 | e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 28 | 29 | f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 30 | 31 | g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. 32 | 33 | h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 34 | 35 | i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 36 | 37 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. 38 | 39 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 40 | 41 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 42 | 43 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 44 | 45 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 46 | 47 | ### Section 2 – Scope. 48 | 49 | a. ___License grant.___ 50 | 51 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 52 | 53 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 54 | 55 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 56 | 57 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 58 | 59 | 3. __Term.__ The term of this Public License is specified in Section 6(a). 60 | 61 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 62 | 63 | 5. __Downstream recipients.__ 64 | 65 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 66 | 67 | B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 68 | 69 | C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 70 | 71 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 72 | 73 | b. ___Other rights.___ 74 | 75 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 76 | 77 | 2. Patent and trademark rights are not licensed under this Public License. 78 | 79 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 80 | 81 | ### Section 3 – License Conditions. 82 | 83 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 84 | 85 | a. ___Attribution.___ 86 | 87 | 1. If You Share the Licensed Material (including in modified form), You must: 88 | 89 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 90 | 91 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 92 | 93 | ii. a copyright notice; 94 | 95 | iii. a notice that refers to this Public License; 96 | 97 | iv. a notice that refers to the disclaimer of warranties; 98 | 99 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 100 | 101 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 102 | 103 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 104 | 105 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 106 | 107 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 108 | 109 | b. ___ShareAlike.___ 110 | 111 | In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 112 | 113 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 114 | 115 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 116 | 117 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 118 | 119 | ### Section 4 – Sui Generis Database Rights. 120 | 121 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 122 | 123 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 124 | 125 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 126 | 127 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 128 | 129 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 130 | 131 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability. 132 | 133 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ 134 | 135 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ 136 | 137 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 138 | 139 | ### Section 6 – Term and Termination. 140 | 141 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 142 | 143 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 144 | 145 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 146 | 147 | 2. upon express reinstatement by the Licensor. 148 | 149 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 150 | 151 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 152 | 153 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 154 | 155 | ### Section 7 – Other Terms and Conditions. 156 | 157 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 158 | 159 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 160 | 161 | ### Section 8 – Interpretation. 162 | 163 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 164 | 165 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 166 | 167 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 168 | 169 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 170 | 171 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 172 | > 173 | > Creative Commons may be contacted at creativecommons.org -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SegDecNet++ for concrete crack segmentation 2 | An official PyTorch implementation for ["Automated detection and segmentation of cracks in concrete surfaces using joined segmentation and classification deep neural network"](http://prints.vicos.si/publications/430) published in journal Construction and Building Materials 2023. 3 | 4 | ![](res/intro_wide.png) 5 | 6 | [![CC BY-NC-SA 4.0][cc-by-nc-sa-shield]][cc-by-nc-sa] 7 | 8 | Code is licensed under [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License][cc-by-nc-sa]. For comerical use please contact danijel.skocaj@fri.uni-lj.si. 9 | 10 | [![CC BY-NC-SA 4.0][cc-by-nc-sa-image]][cc-by-nc-sa] 11 | 12 | [cc-by-nc-sa]: http://creativecommons.org/licenses/by-nc-sa/4.0/ 13 | [cc-by-nc-sa-image]: https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png 14 | [cc-by-nc-sa-shield]: https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg 15 | 16 | 17 | ## Citation 18 | Please cite our [Construction and Building Materials 2023 paper](http://prints.vicos.si/publications/430) when using this code: 19 | 20 | ``` 21 | @article{Tabernik2023CONBUILDMAT, 22 | author = {Tabernik, Domen and {\v{S}}uc, Matic and 23 | Sko{\v{c}}aj, Danijel}, 24 | journal = {Construction and Building Materials}, 25 | title = {{Automated detection and segmentation of cracks in concrete surfaces using joined segmentation and classification deep neural network}}, 26 | year = {2023]} 27 | } 28 | ``` 29 | 30 | ## How to run: 31 | 32 | ### Requirements 33 | Code has been tested to work on: 34 | + Python 3.8 35 | + PyTorch 1.8 36 | + CUDA 11.1 37 | + using additional packages as listed in requirements.txt 38 | 39 | Deploy enviroment using conda: 40 | 41 | ```bash 42 | conda create env --name SegDecNet++ --file=environment.yml 43 | ``` 44 | 45 | ### Datasets 46 | We use dataset from [SCCDNet paper](https://www.mdpi.com/2076-3417/11/11/5074), which consists of the following image sets: 47 | * CFD 48 | * CRACK500 49 | * CrackTree200 50 | * DeepCrack 51 | * GAPs384 52 | * Rissbilder 53 | * non-crack images 54 | 55 | However, since the dataset contains major issues for Rissbilder groundtruth, we provide a corrected groundtruth for the whole SCCDNet dataset 56 | * [DOWNLOAD corrected SCCDNet dataset here.](https://go.vicos.si/sccdnetdbcorrected) 57 | 58 | ### Replicating paper results 59 | 60 | To replicate the results published in the paper run: 61 | ```bash 62 | ./EXPERIMENTS_CONBUILDMAT.sh 63 | ``` 64 | 65 | Results will be written to `./RESULTS` folders. 66 | 67 | ### Usage of training/evaluation code 68 | The following python files are used to train/evaluate the model: 69 | + `train_net.py` Main entry for training and evaluation 70 | + `models.py` Model file for network 71 | + `data/dataset_catalog.py` Contains currently supported datasets 72 | 73 | ### Examples 74 | 75 | ![](res/segmentation.png) 76 | 77 | Examples of crack segmentation with our proposed method. We depict false positive pixels in red, and false negatives in yellow, while the correct 78 | background segmentation is in black and the correct foreground in white. 79 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | class Config: 2 | GPU = None 3 | 4 | RUN_NAME = None 5 | 6 | DATASET = None # KSDD, DAGM, STEEL, KSDD2, sccdnet 7 | DATASET_PATH = None 8 | 9 | EPOCHS = None 10 | 11 | LEARNING_RATE = None 12 | DELTA_CLS_LOSS = None 13 | 14 | BATCH_SIZE = None 15 | 16 | WEIGHTED_SEG_LOSS = None 17 | WEIGHTED_SEG_LOSS_P = None 18 | WEIGHTED_SEG_LOSS_MAX = None 19 | DYN_BALANCED_LOSS = None 20 | GRADIENT_ADJUSTMENT = None 21 | FREQUENCY_SAMPLING = True 22 | 23 | # Default values 24 | ARCHITECTURE = 'SegDecNet++' 25 | FOLD = None 26 | TRAIN_NUM = None 27 | NUM_SEGMENTED = None 28 | RESULTS_PATH = "./RESULTS" 29 | SPLITS_PATH = None 30 | 31 | VALIDATE = True 32 | VALIDATE_ON_TEST = True 33 | VALIDATION_N_EPOCHS = 5 34 | USE_BEST_MODEL = False 35 | 36 | ON_DEMAND_READ = False 37 | REPRODUCIBLE_RUN = None 38 | MEMORY_FIT = 1 39 | SAVE_IMAGES = True 40 | DILATE = 1 41 | 42 | # Auto filled 43 | INPUT_WIDTH = None 44 | INPUT_HEIGHT = None 45 | INPUT_CHANNELS = None 46 | 47 | BEST_MODEL_TYPE = "dec" 48 | AUGMENTATION = False 49 | USE_NEGATIVES = None 50 | VAL_NEG = None 51 | OPTIMIZER = "sgd" 52 | SCHEDULER = None 53 | HARD_NEG_MINING = None 54 | PXL_DISTANCE = 2 55 | THR_ADJUSTMENT = None 56 | SEG_BLACK = False 57 | BCE_LOSS_W = False 58 | TRAIN_SPLIT = None 59 | 60 | def init_extra(self): 61 | if self.WEIGHTED_SEG_LOSS and (self.WEIGHTED_SEG_LOSS_P is None or self.WEIGHTED_SEG_LOSS_MAX is None): 62 | raise Exception("You also need to specify p and scaling factor for weighted segmentation loss!") 63 | if self.NUM_SEGMENTED is None: 64 | raise Exception("Missing NUM_SEGMENTED!") 65 | if self.DATASET == 'KSDD': 66 | self.INPUT_WIDTH = 512 67 | self.INPUT_HEIGHT = 1408 68 | self.INPUT_CHANNELS = 1 69 | 70 | if self.TRAIN_NUM is None: 71 | raise Exception("Missing TRAIN_NUM for KSDD dataset!") 72 | if self.NUM_SEGMENTED is None: 73 | raise Exception("Missing NUM_SEGMENTED for KSDD dataset!") 74 | if self.FOLD is None: 75 | raise Exception("Missing FOLD for KSDD dataset!") 76 | 77 | elif self.DATASET == 'DAGM': 78 | self.INPUT_WIDTH = 512 79 | self.INPUT_HEIGHT = 512 80 | self.INPUT_CHANNELS = 1 81 | if self.NUM_SEGMENTED is None: 82 | raise Exception("Missing NUM_SEGMENTED for DAGM dataset!") 83 | if self.FOLD is None: 84 | raise Exception("Missing FOLD for DAGM dataset!") 85 | elif self.DATASET == 'STEEL': 86 | self.INPUT_WIDTH = 1600 87 | self.INPUT_HEIGHT = 256 88 | self.INPUT_CHANNELS = 1 89 | 90 | self.VALIDATE_ON_TEST = False 91 | self.USE_BEST_MODEL = True 92 | print("Will use best model according to validation loss, validation is not performed on test set!") 93 | if not self.ON_DEMAND_READ: 94 | print("Will use ON_DEMAND_READ even though it is set on False!") 95 | self.ON_DEMAND_READ = True 96 | if self.TRAIN_NUM is None: 97 | raise Exception("Missing TRAIN_NUM for STEEL dataset!") 98 | if self.NUM_SEGMENTED is None: 99 | raise Exception("Missing NUM_SEGMENTED for STEEL dataset!") 100 | elif self.DATASET == 'KSDD2': 101 | self.INPUT_WIDTH = 448 102 | self.INPUT_HEIGHT = 448 103 | self.INPUT_CHANNELS = 3 104 | if self.NUM_SEGMENTED is None: 105 | raise Exception("Missing NUM_SEGMENTED for KSDD2 dataset!") 106 | elif self.DATASET in ['crack_segmentation', 'sccdnet']: 107 | self.INPUT_WIDTH = 448 108 | self.INPUT_HEIGHT = 448 109 | self.INPUT_CHANNELS = 3 110 | if self.NUM_SEGMENTED is None: 111 | raise Exception("Missing NUM_SEGMENTED for crack_segmentation dataset!") 112 | elif self.DATASET == 'CFD': 113 | self.INPUT_WIDTH = 480 114 | self.INPUT_HEIGHT = 320 115 | self.INPUT_CHANNELS = 3 116 | if self.NUM_SEGMENTED is None: 117 | raise Exception("Missing NUM_SEGMENTED for CFD dataset!") 118 | elif self.DATASET == 'CRACK500': 119 | self.INPUT_WIDTH = 640 120 | self.INPUT_HEIGHT = 360 121 | self.INPUT_CHANNELS = 3 122 | if self.NUM_SEGMENTED is None: 123 | raise Exception("Missing NUM_SEGMENTED for CRACK500 dataset!") 124 | elif self.DATASET == 'DeepCrack': 125 | self.INPUT_WIDTH = 544 126 | self.INPUT_HEIGHT = 384 127 | self.INPUT_CHANNELS = 3 128 | if self.NUM_SEGMENTED is None: 129 | raise Exception("Missing NUM_SEGMENTED for DeepCrack dataset!") 130 | else: 131 | raise Exception('Unknown dataset {}'.format(self.DATASET)) 132 | 133 | def merge_from_args(self, args): 134 | self.ARCHITECTURE = args.ARCHITECTURE 135 | self.GPU = args.GPU 136 | self.RUN_NAME = args.RUN_NAME 137 | self.DATASET = args.DATASET 138 | self.DATASET_PATH = args.DATASET_PATH 139 | self.EPOCHS = args.EPOCHS 140 | self.LEARNING_RATE = args.LEARNING_RATE 141 | self.DELTA_CLS_LOSS = args.DELTA_CLS_LOSS 142 | self.BATCH_SIZE = args.BATCH_SIZE 143 | self.WEIGHTED_SEG_LOSS = args.WEIGHTED_SEG_LOSS 144 | self.WEIGHTED_SEG_LOSS_P = args.WEIGHTED_SEG_LOSS_P 145 | self.WEIGHTED_SEG_LOSS_MAX = args.WEIGHTED_SEG_LOSS_MAX 146 | self.DYN_BALANCED_LOSS = args.DYN_BALANCED_LOSS 147 | self.GRADIENT_ADJUSTMENT = args.GRADIENT_ADJUSTMENT 148 | self.FREQUENCY_SAMPLING = args.FREQUENCY_SAMPLING 149 | self.NUM_SEGMENTED = args.NUM_SEGMENTED 150 | 151 | self.BEST_MODEL_TYPE = args.BEST_MODEL_TYPE 152 | self.AUGMENTATION = args.AUGMENTATION 153 | self.USE_NEGATIVES = args.USE_NEGATIVES 154 | self.VAL_NEG = args.VAL_NEG 155 | self.OPTIMIZER = args.OPTIMIZER 156 | self.SCHEDULER = args.SCHEDULER 157 | self.HARD_NEG_MINING = args.HARD_NEG_MINING 158 | self.PXL_DISTANCE = args.PXL_DISTANCE 159 | self.THR_ADJUSTMENT = args.THR_ADJUSTMENT 160 | self.SEG_BLACK = args.SEG_BLACK 161 | self.BCE_LOSS_W = args.BCE_LOSS_W 162 | self.TRAIN_SPLIT = args.TRAIN_SPLIT 163 | 164 | if args.FOLD is not None: self.FOLD = args.FOLD 165 | if args.TRAIN_NUM is not None: self.TRAIN_NUM = args.TRAIN_NUM 166 | if args.RESULTS_PATH is not None: self.RESULTS_PATH = args.RESULTS_PATH 167 | if args.VALIDATE is not None: self.VALIDATE = args.VALIDATE 168 | if args.VALIDATE_ON_TEST is not None: self.VALIDATE_ON_TEST = args.VALIDATE_ON_TEST 169 | if args.VALIDATION_N_EPOCHS is not None: self.VALIDATION_N_EPOCHS = args.VALIDATION_N_EPOCHS 170 | if args.USE_BEST_MODEL is not None: self.USE_BEST_MODEL = args.USE_BEST_MODEL 171 | if args.ON_DEMAND_READ is not None: self.ON_DEMAND_READ = args.ON_DEMAND_READ 172 | if args.REPRODUCIBLE_RUN is not None: self.REPRODUCIBLE_RUN = args.REPRODUCIBLE_RUN 173 | if args.MEMORY_FIT is not None: self.MEMORY_FIT = args.MEMORY_FIT 174 | if args.SAVE_IMAGES is not None: self.SAVE_IMAGES = args.SAVE_IMAGES 175 | if args.DILATE is not None: self.DILATE = args.DILATE 176 | 177 | def get_as_dict(self): 178 | params = { 179 | "ARCHITECTURE": self.ARCHITECTURE, 180 | "GPU": self.GPU, 181 | "DATASET": self.DATASET, 182 | "DATASET_PATH": self.DATASET_PATH, 183 | "EPOCHS": self.EPOCHS, 184 | "LEARNING_RATE": self.LEARNING_RATE, 185 | "DELTA_CLS_LOSS": self.DELTA_CLS_LOSS, 186 | "BATCH_SIZE": self.BATCH_SIZE, 187 | "WEIGHTED_SEG_LOSS": self.WEIGHTED_SEG_LOSS, 188 | "WEIGHTED_SEG_LOSS_P": self.WEIGHTED_SEG_LOSS_P, 189 | "WEIGHTED_SEG_LOSS_MAX": self.WEIGHTED_SEG_LOSS_MAX, 190 | "DYN_BALANCED_LOSS": self.DYN_BALANCED_LOSS, 191 | "GRADIENT_ADJUSTMENT": self.GRADIENT_ADJUSTMENT, 192 | "FREQUENCY_SAMPLING": self.FREQUENCY_SAMPLING, 193 | "FOLD": self.FOLD, 194 | "TRAIN_NUM": self.TRAIN_NUM, 195 | "NUM_SEGMENTED": self.NUM_SEGMENTED, 196 | "RESULTS_PATH": self.RESULTS_PATH, 197 | "VALIDATE": self.VALIDATE, 198 | "VALIDATE_ON_TEST": self.VALIDATE_ON_TEST, 199 | "VALIDATION_N_EPOCHS": self.VALIDATION_N_EPOCHS, 200 | "USE_BEST_MODEL": self.USE_BEST_MODEL, 201 | "ON_DEMAND_READ": self.ON_DEMAND_READ, 202 | "REPRODUCIBLE_RUN": self.REPRODUCIBLE_RUN, 203 | "MEMORY_FIT": self.MEMORY_FIT, 204 | "INPUT_WIDTH": self.INPUT_WIDTH, 205 | "INPUT_HEIGHT": self.INPUT_HEIGHT, 206 | "INPUT_CHANNELS": self.INPUT_CHANNELS, 207 | "SAVE_IMAGES": self.SAVE_IMAGES, 208 | "DILATE": self.DILATE, 209 | "BEST_MODEL_TYPE": self.BEST_MODEL_TYPE, 210 | "AUGMENTATION": self.AUGMENTATION, 211 | "USE_NEGATIVES": self.USE_NEGATIVES, 212 | "VAL_NEG": self.VAL_NEG, 213 | "OPTIMIZER": self.OPTIMIZER, 214 | "SCHEDULER": self.SCHEDULER, 215 | "HARD_NEG_MINING": self.HARD_NEG_MINING, 216 | "PXL_DISTANCE": self.PXL_DISTANCE, 217 | "THR_ADJUSTMENT": self.THR_ADJUSTMENT, 218 | "SEG_BLACK": self.SEG_BLACK, 219 | "BCE_LOSS_W": self.BCE_LOSS_W, 220 | "TRAIN_SPLIT": self.TRAIN_SPLIT 221 | } 222 | return params 223 | 224 | 225 | def load_from_dict(dictionary): 226 | cfg = Config() 227 | 228 | cfg.ARCHITECTURE = dictionary.get("ARCHITECTURE", None) 229 | cfg.GPU = dictionary.get("GPU", None) 230 | cfg.DATASET = dictionary.get("DATASET", None) 231 | cfg.DATASET_PATH = dictionary.get("DATASET_PATH", None) 232 | cfg.EPOCHS = dictionary.get("EPOCHS", None) 233 | cfg.LEARNING_RATE = dictionary.get("LEARNING_RATE", None) 234 | cfg.DELTA_CLS_LOSS = dictionary.get("DELTA_CLS_LOSS", None) 235 | cfg.BATCH_SIZE = dictionary.get("BATCH_SIZE", None) 236 | cfg.WEIGHTED_SEG_LOSS = dictionary.get("WEIGHTED_SEG_LOSS", None) 237 | cfg.WEIGHTED_SEG_LOSS_P = dictionary.get("WEIGHTED_SEG_LOSS_P", None) 238 | cfg.WEIGHTED_SEG_LOSS_MAX = dictionary.get("WEIGHTED_SEG_LOSS_MAX", None) 239 | cfg.DYN_BALANCED_LOSS = dictionary.get("DYN_BALANCED_LOSS", None) 240 | cfg.GRADIENT_ADJUSTMENT = dictionary.get("GRADIENT_ADJUSTMENT", None) 241 | cfg.FREQUENCY_SAMPLING = dictionary.get("FREQUENCY_SAMPLING", None) 242 | cfg.FOLD = dictionary.get("FOLD", None) 243 | cfg.TRAIN_NUM = dictionary.get("TRAIN_NUM", None) 244 | cfg.NUM_SEGMENTED = dictionary.get("NUM_SEGMENTED", None) 245 | cfg.RESULTS_PATH = dictionary.get("RESULTS_PATH", None) 246 | cfg.VALIDATE = dictionary.get("VALIDATE", None) 247 | cfg.VALIDATE_ON_TEST = dictionary.get("VALIDATE_ON_TEST", None) 248 | cfg.VALIDATION_N_EPOCHS = dictionary.get("VALIDATION_N_EPOCHS", None) 249 | cfg.USE_BEST_MODEL = dictionary.get("USE_BEST_MODEL", None) 250 | cfg.ON_DEMAND_READ = dictionary.get("ON_DEMAND_READ", None) 251 | cfg.REPRODUCIBLE_RUN = dictionary.get("REPRODUCIBLE_RUN", None) 252 | cfg.MEMORY_FIT = dictionary.get("MEMORY_FIT", None) 253 | cfg.INPUT_WIDTH = dictionary.get("INPUT_WIDTH", None) 254 | cfg.INPUT_HEIGHT = dictionary.get("INPUT_HEIGHT", None) 255 | cfg.INPUT_CHANNELS = dictionary.get("INPUT_CHANNELS", None) 256 | cfg.SAVE_IMAGES = dictionary.get("SAVE_IMAGES", None) 257 | cfg.DILATE = dictionary.get("DILATE", None) 258 | cfg.BEST_MODEL_TYPE = dictionary.get("BEST_MODEL_TYPE", None) 259 | cfg.AUGMENTATION = dictionary.get("AUGMENTATION", None) 260 | cfg.USE_NEGATIVES = dictionary.get("USE_NEGATIVES", None) 261 | cfg.VAL_NEG = dictionary.get("VAL_NEG", None) 262 | cfg.OPTIMIZER = dictionary.get("OPTIMIZER", None) 263 | cfg.SCHEDULER = dictionary.get("SCHEDULER", None) 264 | cfg.HARD_NEG_MINING = dictionary.get("HARD_NEG_MINING", None) 265 | cfg.PXL_DISTANCE = dictionary.get("PXL_DISTANCE", None) 266 | cfg.THR_ADJUSTMENT = dictionary.get("THR_ADJUSTMENT", None) 267 | cfg.SEG_BLACK = dictionary.get("SEG_BLACK", None) 268 | cfg.BCE_LOSS_W = dictionary.get("BCE_LOSS_W", None) 269 | cfg.TRAIN_SPLIT = dictionary.get("TRAIN_SPLIT", None) 270 | 271 | return cfg 272 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicoslab/segdec-net-plusplus-conbuildmat2023/b0c553bfa2dcb31d703da1f49406790bbda51992/data/__init__.py -------------------------------------------------------------------------------- /data/dataset.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import torch 4 | from scipy.ndimage.morphology import distance_transform_edt 5 | from scipy.signal import convolve2d 6 | from config import Config 7 | from torchvision.transforms import functional as F 8 | from torchvision.transforms import transforms as T 9 | 10 | from torch.utils.data import Sampler 11 | import pickle 12 | #from datasets import LockableSeedRandomAccess 13 | 14 | class Dataset(torch.utils.data.Dataset): 15 | def __init__(self, path: str, cfg: Config, kind: str): 16 | super(Dataset, self).__init__() 17 | self.path: str = path 18 | self.cfg: Config = cfg 19 | self.kind: str = kind 20 | self.image_size: (int, int) = (self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT) 21 | self.grayscale: bool = self.cfg.INPUT_CHANNELS == 1 22 | 23 | self.num_negatives_per_one_positive: int = 1 24 | self.frequency_sampling: bool = self.cfg.FREQUENCY_SAMPLING and self.kind == 'TRAIN' 25 | 26 | if self.cfg.REPRODUCIBLE_RUN is not None: 27 | torch.random.manual_seed(self.cfg.REPRODUCIBLE_RUN) 28 | 29 | def init_extra(self): 30 | self.counter = 0 31 | self.neg_imgs_permutation = np.random.permutation(self.num_neg) 32 | 33 | self.neg_retrieval_freq = np.zeros(shape=self.num_neg) 34 | 35 | def count_pixels(self, pixel_type): 36 | return sum([(s[1] == pixel_type).sum().item() for s in self.pos_samples]) + sum([(s[1] == pixel_type).sum().item() for s in self.neg_samples]) 37 | 38 | def __getitem__(self, index) -> (torch.Tensor, torch.Tensor, bool, str, bool): 39 | 40 | if self.counter >= self.len: 41 | self.counter = 0 42 | if self.frequency_sampling: 43 | sample_probability = 1 - (self.neg_retrieval_freq / np.max(self.neg_retrieval_freq)) 44 | sample_probability = sample_probability - np.median(sample_probability) + 1 45 | sample_probability = sample_probability ** (np.log(len(sample_probability)) * 4) 46 | sample_probability = sample_probability / np.sum(sample_probability) 47 | 48 | # use replace=False for to get only unique values 49 | try: 50 | self.neg_imgs_permutation = np.random.choice(range(self.num_neg), size=self.num_negatives_per_one_positive * self.num_pos, p=sample_probability, replace=False) 51 | except: 52 | self.neg_imgs_permutation = np.random.choice(range(self.num_neg), size=self.num_negatives_per_one_positive * self.num_pos, p=sample_probability, replace=True) 53 | else: 54 | self.neg_imgs_permutation = np.random.permutation(self.num_neg) 55 | 56 | 57 | if self.kind == 'TRAIN': 58 | if index >= self.num_pos: 59 | ix = index % self.num_pos 60 | ix = self.neg_imgs_permutation[ix] 61 | item = self.neg_samples[ix] 62 | self.neg_retrieval_freq[ix] = self.neg_retrieval_freq[ix] + 1 63 | 64 | else: 65 | ix = index 66 | item = self.pos_samples[ix] 67 | else: 68 | if index < self.num_neg: 69 | ix = index 70 | item = self.neg_samples[ix] 71 | else: 72 | ix = index - self.num_neg 73 | item = self.pos_samples[ix] 74 | 75 | image, seg_mask, is_segmented, image_path, seg_mask_path, sample_name, is_pos = item 76 | 77 | if self.cfg.ON_DEMAND_READ: 78 | image = self.read_img_resize(image_path, self.grayscale, self.image_size) 79 | image = self.to_tensor(image) 80 | seg_mask, _ = self.read_label_resize(seg_mask_path, self.image_size, self.cfg.DILATE) 81 | seg_mask = np.array((seg_mask > 0.5), dtype=np.float32) 82 | seg_mask = self.to_tensor(seg_mask) 83 | 84 | self.counter = self.counter + 1 85 | 86 | # Augmentacija 87 | if self.cfg.AUGMENTATION and self.kind == 'TRAIN' and is_pos: 88 | p1 = 0.5 89 | p2 = 0.5 90 | if torch.rand(1) < p1: 91 | # Horizontal flip 92 | if torch.rand(1) < p2: 93 | image = F.hflip(image) 94 | seg_mask = F.hflip(seg_mask) 95 | # Vertical flip 96 | if torch.rand(1) < p2: 97 | image = F.vflip(image) 98 | seg_mask = F.vflip(seg_mask) 99 | # 180 Rotation 100 | if torch.rand(1) < p2: 101 | image = F.rotate(image, 180) 102 | seg_mask = F.rotate(seg_mask, 180) 103 | # Color Jittering 104 | if torch.rand(1) < p2: 105 | color_jitter = T.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2) 106 | image = color_jitter(image) 107 | 108 | return image, seg_mask, is_segmented, sample_name, is_pos, index 109 | 110 | def __len__(self): 111 | return self.len 112 | 113 | def read_contents(self): 114 | pass 115 | 116 | def read_img_resize(self, path, grayscale, resize_dim) -> np.ndarray: 117 | img = cv2.imread(path, cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR) 118 | if resize_dim is not None: 119 | img = cv2.resize(img, dsize=resize_dim) 120 | return np.array(img, dtype=np.float32) / 255.0 121 | 122 | def read_label_resize(self, path, resize_dim, dilate=None) -> (np.ndarray, bool): 123 | lbl = cv2.imread(path, cv2.IMREAD_GRAYSCALE) 124 | if dilate is not None and dilate > 1: 125 | lbl = cv2.dilate(lbl, np.ones((dilate, dilate))) 126 | if resize_dim is not None: 127 | lbl = cv2.resize(lbl, dsize=resize_dim) 128 | return np.array((lbl / 255.0), dtype=np.float32), np.max(lbl) > 0 129 | 130 | def to_tensor(self, x) -> torch.Tensor: 131 | if x.dtype != np.float32: 132 | x = (x / 255.0).astype(np.float32) 133 | 134 | if len(x.shape) == 3: 135 | x = np.transpose(x, axes=(2, 0, 1)) 136 | else: 137 | x = np.expand_dims(x, axis=0) 138 | 139 | x = torch.from_numpy(x) 140 | return x 141 | 142 | def distance_transform(self, mask: np.ndarray, max_val: float, p: float) -> np.ndarray: 143 | h, w = mask.shape[:2] 144 | dst_trf = np.zeros((h, w)) 145 | 146 | num_labels, labels = cv2.connectedComponents((mask * 255.0).astype(np.uint8), connectivity=8) 147 | for idx in range(1, num_labels): 148 | mask_roi= np.zeros((h, w)) 149 | k = labels == idx 150 | mask_roi[k] = 255 151 | dst_trf_roi = distance_transform_edt(mask_roi) 152 | if dst_trf_roi.max() > 0: 153 | dst_trf_roi = (dst_trf_roi / dst_trf_roi.max()) 154 | dst_trf_roi = (dst_trf_roi ** p) * max_val 155 | dst_trf += dst_trf_roi 156 | 157 | dst_trf[mask == 0] = 1 158 | return np.array(dst_trf, dtype=np.float32) 159 | 160 | def downsize(self, image: np.ndarray, downsize_factor: int = 8) -> np.ndarray: 161 | img_t = torch.from_numpy(np.expand_dims(image, 0 if len(image.shape) == 3 else (0, 1)).astype(np.float32)) 162 | img_t = torch.nn.ReflectionPad2d(padding=(downsize_factor))(img_t) 163 | image_np = torch.nn.AvgPool2d(kernel_size=2 * downsize_factor + 1, stride=downsize_factor)(img_t).detach().numpy() 164 | return image_np[0] if len(image.shape) == 3 else image_np[0, 0] 165 | 166 | def rle_to_mask(self, rle, image_size): 167 | if len(rle) % 2 != 0: 168 | raise Exception('Suspicious') 169 | 170 | w, h = image_size 171 | mask_label = np.zeros(w * h, dtype=np.float32) 172 | 173 | positions = rle[0::2] 174 | length = rle[1::2] 175 | for pos, le in zip(positions, length): 176 | mask_label[pos - 1:pos + le - 1] = 1 177 | mask = np.reshape(mask_label, (h, w), order='F').astype(np.uint8) 178 | return mask 179 | 180 | class HardExamplesBatchSampler(Sampler): 181 | 182 | def __init__(self, dataset, default_sampler, batch_size, hard_sample_size, drop_last, hard_samples_selected_min_percent=0.0, 183 | device=None, world_size=None, rank=None, is_distributed=False): 184 | if not isinstance(default_sampler, Sampler): 185 | raise ValueError("default_sampler should be an instance of " 186 | "torch.utils.data.Sampler, but got default_sampler={}" 187 | .format(default_sampler)) 188 | if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \ 189 | batch_size <= 0: 190 | raise ValueError("batch_size should be a positive integer value, " 191 | "but got batch_size={}".format(batch_size)) 192 | if not (isinstance(hard_sample_size, int) or hard_sample_size is None) or \ 193 | hard_sample_size < 0 or hard_sample_size >= batch_size : 194 | raise ValueError("hard_sample_size should be a positive integer value smaller than batch_size, " 195 | "but got hard_sample_size={}".format(hard_sample_size)) 196 | if not isinstance(drop_last, bool): 197 | raise ValueError("drop_last should be a boolean value, but got " 198 | "drop_last={}".format(drop_last)) 199 | 200 | self.is_distributed = is_distributed and world_size > 1 201 | self.world_size = world_size if self.is_distributed else 1 202 | self.rank = rank if self.is_distributed else 0 203 | self.device = device 204 | 205 | self.dataset = dataset 206 | self.default_sampler = default_sampler 207 | if self.is_distributed: 208 | self.hard_sampler = DistributedSubsetRandomSampler(list(range(len(default_sampler))),device=device) 209 | else: 210 | self.hard_sampler = torch.utils.data.SubsetRandomSampler(list(range(len(default_sampler)))) 211 | self.hard_sample_size = hard_sample_size if hard_sample_size is not None else 0 212 | self.hard_samples_selected_min_percent = hard_samples_selected_min_percent if hard_samples_selected_min_percent is not None else 0 213 | self.batch_size = batch_size 214 | self.drop_last = drop_last 215 | 216 | 217 | self.sample_losses = dict() 218 | self.sample_storage = dict() 219 | self.sample_storage_tmp = dict() 220 | 221 | def update_sample_loss_batch(self, gt_sample, losses, index_key='index', storage_keys=[]): 222 | #assert index_key in gt_sample, "Index key %s is not present in gt_sample" % index_key 223 | 224 | indices = gt_sample[index_key] 225 | 226 | # convert to numpy 227 | indices = indices.detach().cpu().numpy() if isinstance(indices, torch.Tensor) else indices 228 | losses = losses.detach().cpu().numpy() if isinstance(losses, torch.Tensor) else losses 229 | 230 | for i,l in enumerate(losses): 231 | # get id of the sample (i.e. its index key) 232 | id = indices[i] 233 | 234 | # store its loss value 235 | self.sample_losses[id] = l 236 | # store any additional info required to pass along for hard examples 237 | # (save to temporary array which will be used for next epoch) 238 | self.sample_storage_tmp[id] = {k:gt_sample[k][i] for k in storage_keys} 239 | 240 | def retrieve_hard_sample_storage_batch(self, ids, key=None): 241 | # convert to numpy 242 | ids = ids.detach().cpu().numpy() if isinstance(ids, torch.Tensor) else ids 243 | # return matching sample_storage value for hard examples (i.e. for first N samples, where N=self.hard_sample_size) 244 | return [self.sample_storage[id][key] if n < self.hard_sample_size and id in self.sample_storage else None for n,id in enumerate(ids)] 245 | 246 | def _synchronize_dict(self, array): 247 | return distributed_sync_dict(array, self.world_size, self.rank, self.device) 248 | 249 | def _recompute_hard_samples_list(self): 250 | if self.is_distributed: 251 | self.sample_losses = self._synchronize_dict(self.sample_losses) 252 | if len(self.sample_losses) > 0: 253 | k = np.array(list(self.sample_losses.keys())) 254 | v = np.array([self.sample_losses[i] for i in k]) 255 | v = (v - v.mean()) / v.std() 256 | hard_ids = list(k) 257 | for std_thr in [2, 1, 0.5, 0]: 258 | new_hard_ids = list(k[v > std_thr]) 259 | if len(new_hard_ids) > len(v)*self.hard_samples_selected_min_percent: 260 | hard_ids = new_hard_ids 261 | break 262 | self.hard_sampler.indices = hard_ids if len(hard_ids) > 0 else list(k) 263 | if self.rank == 0: 264 | print('Number of hard samples present: %d/%d' % (len(hard_ids), len(self.sample_losses))) 265 | 266 | """ 267 | if isinstance(self.dataset,LockableSeedRandomAccess): 268 | # lock seeds for hard samples BUT not for the whole dataset i.e. 90% of the whole dataset 269 | # (otherwise this will fully lock seeds for all samples and prevent new random augmentation of samples) 270 | self.dataset.lock_samples_seed(self.hard_sampler.indices if len(self.hard_sampler.indices) < len(self.sample_losses)*0.9 else []) 271 | """ 272 | 273 | # update storage for next iteration 274 | self.sample_storage = self._synchronize_dict(self.sample_storage_tmp) if self.is_distributed else self.sample_storage_tmp 275 | self.sample_storage_tmp = dict() 276 | 277 | def __iter__(self): 278 | from itertools import islice 279 | self._recompute_hard_samples_list() 280 | max_index = len(self.default_sampler) 281 | if self.drop_last: 282 | total_batch_size = self.batch_size * self.world_size 283 | max_index = (max_index // total_batch_size) * total_batch_size 284 | 285 | batch = [] 286 | hard_iter = iter(self.hard_sampler) 287 | self.usage_freq = {i: 0 for i in range(len(self.default_sampler))} 288 | for idx in islice(self.default_sampler,self.rank,max_index,self.world_size): 289 | batch.append(idx) 290 | # stop when spaces for normal samples filled 291 | if len(batch) == self.batch_size-self.hard_sample_size: 292 | # fill remaining places with hard examples 293 | # (does not need to be sync for distributed since sampling is random with replacement) 294 | while len(batch) < self.batch_size: 295 | try: 296 | batch.insert(0,next(hard_iter)) 297 | except StopIteration: # reset iter if no more samples 298 | hard_iter = iter(self.hard_sampler) 299 | 300 | for b in batch: self.usage_freq[b] += 1 301 | yield batch 302 | batch = [] 303 | if len(batch) > 0 and not self.drop_last: 304 | for b in batch: self.usage_freq[b] += 1 305 | yield batch 306 | 307 | def get_avg_sample_loss(self): 308 | return np.array(list(self.sample_losses.values())).mean() 309 | 310 | def get_sample_losses(self): 311 | return self.sample_losses.copy() 312 | 313 | def get_sample_frequency_use(self): 314 | return self.usage_freq.copy() 315 | 316 | def __len__(self): 317 | size_default = len(self.default_sampler) 318 | 319 | if self.is_distributed: 320 | size_default = size_default // self.world_size 321 | 322 | actual_batch_size = self.batch_size-self.hard_sample_size 323 | if self.drop_last: 324 | return size_default // actual_batch_size 325 | else: 326 | return (size_default + actual_batch_size - 1) // actual_batch_size 327 | 328 | 329 | import torch.distributed as dist 330 | 331 | class DistributedRandomSampler(Sampler): 332 | def __init__(self, data_source, replacement=False, num_samples=None, device=None): 333 | self.data_source = data_source 334 | self.replacement = replacement 335 | self._num_samples = num_samples 336 | self.device = device 337 | 338 | if not isinstance(self.replacement, bool): 339 | raise ValueError("replacement should be a boolean value, but got " 340 | "replacement={}".format(self.replacement)) 341 | 342 | if self._num_samples is not None and not replacement: 343 | raise ValueError("With replacement=False, num_samples should not be specified, " 344 | "since a random permute will be performed.") 345 | 346 | if not isinstance(self.num_samples, int) or self.num_samples <= 0: 347 | raise ValueError("num_samples should be a positive integer " 348 | "value, but got num_samples={}".format(self.num_samples)) 349 | 350 | @property 351 | def num_samples(self): 352 | # dataset size might change at runtime 353 | if self._num_samples is None: 354 | return len(self.data_source) 355 | return self._num_samples 356 | 357 | def __iter__(self): 358 | n = len(self.data_source) 359 | if self.replacement: 360 | iter_order = torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).to(self.device) 361 | else: 362 | iter_order = torch.randperm(n).to(self.device) 363 | 364 | # ensure order is the same for all processes (use iter from rank-0) 365 | dist.broadcast(iter_order,0) 366 | 367 | return iter(iter_order.tolist()) 368 | 369 | def __len__(self): 370 | return self.num_samples 371 | 372 | 373 | class DistributedSubsetRandomSampler(Sampler): 374 | def __init__(self, indices, device=None): 375 | self.indices = indices 376 | self.device = device 377 | 378 | def __iter__(self): 379 | iter_order = torch.randperm(len(self.indices)).to(self.device) 380 | 381 | # ensure order is the same for all processes (use iter from rank-0) 382 | dist.broadcast(iter_order,0) 383 | 384 | return (self.indices[i.item()] for i in iter_order) 385 | 386 | def __len__(self): 387 | return len(self.indices) 388 | 389 | def distributed_sync_dict(array, world_size, rank, device, MAX_LENGTH=10*2**20): # default MAX_LENGTH = 10MB 390 | def _pack_data(_array): 391 | data = pickle.dumps(_array) 392 | data_length = int(len(data)) 393 | data = data_length.to_bytes(4, "big") + data 394 | assert len(data) < MAX_LENGTH 395 | data += bytes(MAX_LENGTH - len(data)) 396 | data = np.frombuffer(data, dtype=np.uint8) 397 | assert len(data) == MAX_LENGTH 398 | return torch.from_numpy(data) 399 | def _unpack_data(_array): 400 | data = _array.to(torch.uint8).cpu().numpy().tobytes() 401 | data_length = int.from_bytes(data[:4], 'big') 402 | return pickle.loads(data[4:data_length+4]) 403 | def _unpack_size(_array): 404 | print(_array.shape, _array[:4]) 405 | data = _array.to(torch.uint8).cpu().numpy().tobytes() 406 | data_length = int.from_bytes(data[:4], 'big') 407 | print(data_length,data[:4]) 408 | return data_length 409 | 410 | # prepare output buffer 411 | output_tensors = [torch.zeros(MAX_LENGTH, dtype=torch.uint8, device=device) for _ in range(world_size)] 412 | # pack data using pickle into input/output 413 | output_tensors[rank][:] = _pack_data(array) 414 | 415 | # sync data 416 | dist.all_gather(output_tensors, output_tensors[rank]) 417 | 418 | # unpack data and merge into single dict 419 | return {id:val for array_tensor in output_tensors for id,val in _unpack_data(array_tensor).items()} -------------------------------------------------------------------------------- /data/dataset_catalog.py: -------------------------------------------------------------------------------- 1 | from .input_ksdd import KSDDDataset 2 | from .input_dagm import DagmDataset 3 | from .input_steel import SteelDataset 4 | from .input_ksdd2 import KSDD2Dataset 5 | from .input_crack_segmentation import CrackSegmentationDataset 6 | from .input_sccdnet_dataset import SccdnetDataset 7 | from config import Config 8 | from torch.utils.data import DataLoader 9 | from typing import Optional 10 | 11 | from torch.utils.data import RandomSampler 12 | from torch.utils.data import SequentialSampler 13 | from data.dataset import HardExamplesBatchSampler 14 | 15 | def get_dataset(kind: str, cfg: Config) -> Optional[DataLoader]: 16 | if kind == "VAL" and not cfg.VALIDATE: 17 | return None 18 | if kind == "VAL" and cfg.VALIDATE_ON_TEST: 19 | kind = "TEST" 20 | if cfg.DATASET == "KSDD": 21 | ds = KSDDDataset(kind, cfg) 22 | elif cfg.DATASET == "DAGM": 23 | ds = DagmDataset(kind, cfg) 24 | elif cfg.DATASET == "STEEL": 25 | ds = SteelDataset(kind, cfg) 26 | elif cfg.DATASET == "KSDD2": 27 | ds = KSDD2Dataset(kind, cfg) 28 | elif cfg.DATASET == "crack_segmentation" or cfg.DATASET == 'CFD' or cfg.DATASET == 'CRACK500' or cfg.DATASET == 'DeepCrack': 29 | ds = CrackSegmentationDataset(kind, cfg) 30 | elif cfg.DATASET == "sccdnet": 31 | ds = SccdnetDataset(kind, cfg) 32 | else: 33 | raise Exception(f"Unknown dataset {cfg.DATASET}") 34 | 35 | shuffle = kind == "TRAIN" 36 | batch_size = cfg.BATCH_SIZE if kind == "TRAIN" else 1 37 | num_workers = 0 38 | drop_last = kind == "TRAIN" 39 | pin_memory = False 40 | 41 | if kind == "TRAIN" and cfg.HARD_NEG_MINING is not None: 42 | 43 | hard_sample_size, hard_samples_selected_min_percent, difficulty_score_type = cfg.HARD_NEG_MINING 44 | 45 | if shuffle: 46 | default_sampler = RandomSampler(ds) 47 | else: 48 | default_sampler = SequentialSampler(ds) 49 | 50 | batch_sampler = HardExamplesBatchSampler(ds, 51 | default_sampler, 52 | batch_size=batch_size, 53 | hard_sample_size=int(hard_sample_size), 54 | drop_last=True, 55 | hard_samples_selected_min_percent=hard_samples_selected_min_percent) 56 | 57 | return DataLoader(dataset=ds, batch_sampler=batch_sampler, num_workers=num_workers, pin_memory=pin_memory) 58 | 59 | else: 60 | return DataLoader(dataset=ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=drop_last, pin_memory=pin_memory) -------------------------------------------------------------------------------- /data/input_crack_segmentation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from data.dataset import Dataset 4 | from config import Config 5 | from datetime import datetime 6 | 7 | class CrackSegmentationDataset(Dataset): 8 | def __init__(self, kind: str, cfg: Config): 9 | super(CrackSegmentationDataset, self).__init__(cfg.DATASET_PATH, cfg, kind) 10 | self.read_contents() 11 | 12 | def read_samples(self, path_to_samples, sample_kind, path_to_GTs=None): 13 | samples = [i for i in sorted(os.listdir(path_to_samples)) if 'GT' not in i] 14 | 15 | if path_to_GTs is not None: 16 | gt_file_type = os.listdir(path_to_GTs)[0].split('.')[1] if len(os.listdir(path_to_GTs)) > 0 else None 17 | else: 18 | gt_file_type = [i for i in os.listdir(path_to_samples) if 'GT' in i][0].split('.')[1] if len([i for i in os.listdir(path_to_samples) if 'GT' in i]) > 0 else None 19 | 20 | for sample in samples: 21 | part, _ = sample.split(".") 22 | 23 | image_path = os.path.join(path_to_samples, sample) 24 | if path_to_GTs is not None: 25 | seg_mask_path = os.path.join(path_to_GTs, f"{part}.{gt_file_type}") 26 | else: 27 | seg_mask_path = os.path.join(path_to_samples, f"{part}_GT.{gt_file_type}") 28 | 29 | image = self.read_img_resize(image_path, self.grayscale, self.image_size) 30 | image = self.to_tensor(image) 31 | 32 | seg_mask, positive = self.read_label_resize(seg_mask_path, self.image_size, self.cfg.DILATE) 33 | 34 | seg_mask = self.to_tensor(seg_mask) 35 | 36 | if sample_kind == 'pos': 37 | self.pos_samples.append((image, seg_mask, True, image_path, seg_mask_path, part, True)) 38 | else: 39 | self.neg_samples.append((image, seg_mask, True, image_path, seg_mask_path, part, False)) 40 | 41 | def read_contents(self): 42 | 43 | self.pos_samples = list() 44 | self.neg_samples = list() 45 | 46 | if self.cfg.DATASET == 'CRACK500': 47 | if self.kind == 'TEST': 48 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'test'), 'pos') 49 | elif self.kind == 'TRAIN': 50 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'train'), 'pos') 51 | if self.cfg.USE_NEGATIVES is not None: 52 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, self.cfg.USE_NEGATIVES), 'neg') 53 | elif self.kind == 'VAL': 54 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'val'), 'pos') 55 | if self.cfg.VAL_NEG is not None: 56 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, self.cfg.VAL_NEG), 'neg') 57 | elif self.cfg.DATASET == 'DeepCrack': 58 | if self.kind == 'TEST': 59 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'test_img'), 'pos', path_to_GTs=os.path.join(self.cfg.DATASET_PATH, 'test_lab')) 60 | elif self.kind == 'TRAIN': 61 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'train_img'), 'pos', path_to_GTs=os.path.join(self.cfg.DATASET_PATH, 'train_lab')) 62 | else: 63 | if self.kind == 'TEST': 64 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'test_positive'), 'pos') 65 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'test_negative'), 'neg') 66 | elif self.kind == 'TRAIN': 67 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'train_positive'), 'pos') 68 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'train_negative'), 'neg') 69 | elif self.kind == 'VAL': 70 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'val_positive'), 'pos') 71 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'val_negative'), 'neg') 72 | 73 | self.num_pos = len(self.pos_samples) 74 | self.num_neg = len(self.neg_samples) 75 | 76 | self.len = self.num_pos + self.num_neg 77 | 78 | time = datetime.now().strftime("%d-%m-%y %H:%M") 79 | 80 | self.pos_weight = None 81 | 82 | if self.kind == 'TRAIN' and self.cfg.BCE_LOSS_W: 83 | neg = self.count_pixels(0) 84 | pos = self.count_pixels(1) 85 | self.pos_weight = neg / pos 86 | print(f"{time} {self.kind}: Number of positives: {self.num_pos}, Number of negatives: {self.num_neg}, Sum: {self.len}, pos_weight: {self.pos_weight}") 87 | else: 88 | print(f"{time} {self.kind}: Number of positives: {self.num_pos}, Number of negatives: {self.num_neg}, Sum: {self.len}") 89 | 90 | self.init_extra() -------------------------------------------------------------------------------- /data/input_dagm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import pickle 4 | from data.dataset import Dataset 5 | 6 | 7 | def read_split(num_segmented: int, fold: int, kind: str): 8 | fn = f"DAGM/split_{num_segmented}.pyb" 9 | with open(f"splits/{fn}", "rb") as f: 10 | train_samples, test_samples = pickle.load(f) 11 | if kind == 'TRAIN': 12 | return train_samples[fold - 1] 13 | elif kind == 'TEST': 14 | return test_samples[fold - 1] 15 | else: 16 | raise Exception('Unknown') 17 | 18 | 19 | class DagmDataset(Dataset): 20 | def __init__(self, kind: str, cfg): 21 | super(DagmDataset, self).__init__(os.path.join(cfg.DATASET_PATH, f"Class{cfg.FOLD}"), cfg, kind) 22 | self.read_contents() 23 | 24 | def read_contents(self): 25 | pos_samples, neg_samples = [], [] 26 | 27 | samples = read_split(self.cfg.NUM_SEGMENTED, self.cfg.FOLD, self.kind) 28 | 29 | sub_dir = self.kind.lower().capitalize() 30 | 31 | for image_name, is_segmented in samples: 32 | image_path = os.path.join(self.path, sub_dir, image_name) 33 | image = self.read_img_resize(image_path, self.grayscale, self.image_size) 34 | img_name_short = image_name[:-4] 35 | seg_mask_path = os.path.join(self.path, sub_dir, "Label", f"{img_name_short}_label.PNG") 36 | 37 | if os.path.exists(seg_mask_path): 38 | seg_mask, _ = self.read_label_resize(seg_mask_path, self.image_size, dilate=self.cfg.DILATE) 39 | image = self.to_tensor(image) 40 | seg_loss_mask = self.distance_transform(seg_mask, self.cfg.WEIGHTED_SEG_LOSS_MAX, self.cfg.WEIGHTED_SEG_LOSS_P) 41 | seg_mask = self.to_tensor(self.downsize(seg_mask)) 42 | seg_loss_mask = self.to_tensor(self.downsize(seg_loss_mask)) 43 | pos_samples.append((image, seg_mask, seg_loss_mask, is_segmented, image_path, None, img_name_short)) 44 | 45 | else: 46 | seg_mask = np.zeros_like(image) 47 | image = self.to_tensor(image) 48 | seg_loss_mask = self.to_tensor(self.downsize(np.ones_like(seg_mask))) 49 | seg_mask = self.to_tensor(self.downsize(seg_mask)) 50 | neg_samples.append((image, seg_mask, seg_loss_mask, True, image_path, seg_mask_path, img_name_short)) 51 | 52 | self.pos_samples = pos_samples 53 | self.neg_samples = neg_samples 54 | 55 | self.num_pos = len(pos_samples) 56 | self.num_neg = len(neg_samples) 57 | self.len = 2 * len(pos_samples) if self.kind in ['TRAIN'] else len(pos_samples) + len(neg_samples) 58 | 59 | self.init_extra() 60 | -------------------------------------------------------------------------------- /data/input_ksdd.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pickle 3 | import os 4 | from data.dataset import Dataset 5 | from config import Config 6 | 7 | 8 | def read_split(train_num: int, num_segmented: int, fold: int, kind: str): 9 | fn = f"KSDD/split_{train_num}_{num_segmented}.pyb" 10 | with open(f"splits/{fn}", "rb") as f: 11 | train_samples, test_samples = pickle.load(f) 12 | if kind == 'TRAIN': 13 | return train_samples[fold] 14 | elif kind == 'TEST': 15 | return test_samples[fold] 16 | else: 17 | raise Exception('Unknown') 18 | 19 | 20 | class KSDDDataset(Dataset): 21 | def __init__(self, kind: str, cfg: Config): 22 | super(KSDDDataset, self).__init__(cfg.DATASET_PATH, cfg, kind) 23 | self.read_contents() 24 | 25 | def read_contents(self): 26 | pos_samples, neg_samples = [], [] 27 | 28 | folders = read_split(self.cfg.TRAIN_NUM, self.cfg.NUM_SEGMENTED, self.cfg.FOLD, self.kind) 29 | for f, is_segmented in folders: 30 | for sample in sorted(os.listdir(os.path.join(self.path, f))): 31 | if not sample.__contains__('label'): 32 | image_path = self.path + '/' + f + '/' + sample 33 | seg_mask_path = f"{image_path[:-4]}_label.bmp" 34 | image = self.read_img_resize(image_path, self.grayscale, self.image_size) 35 | seg_mask, positive = self.read_label_resize(seg_mask_path, self.image_size, dilate=self.cfg.DILATE) 36 | sample_name = f"{f}_{sample}"[:-4] 37 | if sample_name == 'kos21_Part7': 38 | continue 39 | if positive: 40 | image = self.to_tensor(image) 41 | seg_loss_mask = self.distance_transform(seg_mask, self.cfg.WEIGHTED_SEG_LOSS_MAX, self.cfg.WEIGHTED_SEG_LOSS_P) 42 | seg_loss_mask = self.to_tensor(self.downsize(seg_loss_mask)) 43 | seg_mask = self.to_tensor(self.downsize(seg_mask)) 44 | pos_samples.append((image, seg_mask, seg_loss_mask, is_segmented, image_path, seg_mask_path, sample_name)) 45 | else: 46 | image = self.to_tensor(image) 47 | seg_loss_mask = self.to_tensor(self.downsize(np.ones_like(seg_mask))) 48 | seg_mask = self.to_tensor(self.downsize(seg_mask)) 49 | neg_samples.append((image, seg_mask, seg_loss_mask, True, image_path, seg_mask_path, sample_name)) 50 | 51 | self.pos_samples = pos_samples 52 | self.neg_samples = neg_samples 53 | 54 | self.num_pos = len(pos_samples) 55 | self.num_neg = len(neg_samples) 56 | self.len = 2 * len(pos_samples) if self.kind in ['TRAIN'] else len(pos_samples) + len(neg_samples) 57 | 58 | self.init_extra() 59 | -------------------------------------------------------------------------------- /data/input_ksdd2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pickle 3 | import os 4 | from data.dataset import Dataset 5 | from config import Config 6 | 7 | 8 | def read_split(num_segmented: int, kind: str): 9 | fn = f"KSDD2/split_{num_segmented}.pyb" 10 | with open(f"splits/{fn}", "rb") as f: 11 | train_samples, test_samples = pickle.load(f) 12 | if kind == 'TRAIN': 13 | return train_samples 14 | elif kind == 'TEST': 15 | return test_samples 16 | else: 17 | raise Exception('Unknown') 18 | 19 | 20 | class KSDD2Dataset(Dataset): 21 | def __init__(self, kind: str, cfg: Config): 22 | super(KSDD2Dataset, self).__init__(cfg.DATASET_PATH, cfg, kind) 23 | self.read_contents() 24 | 25 | def read_contents(self): 26 | pos_samples, neg_samples = [], [] 27 | 28 | data_points = read_split(self.cfg.NUM_SEGMENTED, self.kind) 29 | 30 | for part, is_segmented in data_points: 31 | image_path = os.path.join(self.path, self.kind.lower(), f"{part}.png") 32 | seg_mask_path = os.path.join(self.path, self.kind.lower(), f"{part}_GT.png") 33 | 34 | image = self.read_img_resize(image_path, self.grayscale, self.image_size) 35 | seg_mask, positive = self.read_label_resize(seg_mask_path, self.image_size, self.cfg.DILATE) 36 | 37 | if positive: 38 | image = self.to_tensor(image) 39 | seg_loss_mask = self.distance_transform(seg_mask, self.cfg.WEIGHTED_SEG_LOSS_MAX, self.cfg.WEIGHTED_SEG_LOSS_P) 40 | seg_loss_mask = self.to_tensor(self.downsize(seg_loss_mask)) 41 | seg_mask = self.to_tensor(self.downsize(seg_mask)) 42 | pos_samples.append((image, seg_mask, seg_loss_mask, is_segmented, image_path, seg_mask_path, part)) 43 | else: 44 | image = self.to_tensor(image) 45 | seg_loss_mask = self.to_tensor(self.downsize(np.ones_like(seg_mask))) 46 | seg_mask = self.to_tensor(self.downsize(seg_mask)) 47 | neg_samples.append((image, seg_mask, seg_loss_mask, True, image_path, seg_mask_path, part)) 48 | 49 | self.pos_samples = pos_samples 50 | self.neg_samples = neg_samples 51 | 52 | self.num_pos = len(pos_samples) 53 | self.num_neg = len(neg_samples) 54 | self.len = 2 * len(pos_samples) if self.kind in ['TRAIN'] else len(pos_samples) + len(neg_samples) 55 | 56 | self.init_extra() 57 | -------------------------------------------------------------------------------- /data/input_sccdnet_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from data.dataset import Dataset 4 | from config import Config 5 | from datetime import datetime 6 | 7 | from tqdm import tqdm 8 | class SccdnetDataset(Dataset): 9 | def __init__(self, kind: str, cfg: Config): 10 | super(SccdnetDataset, self).__init__(cfg.DATASET_PATH, cfg, kind) 11 | self.read_contents() 12 | 13 | def read_samples(self, path_to_samples): 14 | 15 | samples = [i for i in sorted(os.listdir(os.path.join(path_to_samples, 'images')))] 16 | 17 | if self.cfg.TRAIN_SPLIT is not None: 18 | if self.kind == 'TRAIN': 19 | samples = [] 20 | for f in range(1, 6): 21 | if f != self.cfg.TRAIN_SPLIT: 22 | samples += [s.strip() for s in open(os.path.join(self.cfg.DATASET_PATH, "splits", f"split_{f}.txt"), "r").readlines()] 23 | 24 | elif self.kind == 'VAL': 25 | samples = [s.strip() for s in open(os.path.join(self.cfg.DATASET_PATH, "splits", f"split_{self.cfg.TRAIN_SPLIT}.txt"), "r").readlines()] 26 | 27 | for sample in tqdm(samples): 28 | id, file_type = sample.rsplit(".", 1) 29 | 30 | image_path = os.path.join(path_to_samples, 'images', sample) 31 | seg_mask_path = os.path.join(path_to_samples, 'masks', sample) 32 | 33 | seg_mask, _ = self.read_label_resize(seg_mask_path, self.image_size, self.cfg.DILATE) 34 | seg_mask = np.array((seg_mask > 0.5), dtype=np.float32) 35 | positive = seg_mask.max() > 0 36 | 37 | self.pos_pixels += (seg_mask == 1).sum().item() 38 | self.neg_pixels += (seg_mask == 0).sum().item() 39 | 40 | if not self.cfg.ON_DEMAND_READ: 41 | image = self.read_img_resize(image_path, self.grayscale, self.image_size) 42 | image = self.to_tensor(image) 43 | seg_mask = self.to_tensor(seg_mask) 44 | else: 45 | image = None 46 | seg_mask = None 47 | 48 | if positive: 49 | self.pos_samples.append((image, seg_mask, True, image_path, seg_mask_path, id, True)) 50 | else: 51 | self.neg_samples.append((image, seg_mask, True, image_path, seg_mask_path, id, False)) 52 | 53 | def read_contents(self): 54 | 55 | self.pos_samples = list() 56 | self.neg_samples = list() 57 | 58 | self.neg_pixels = 0 59 | self.pos_pixels = 0 60 | 61 | if self.kind == 'TRAIN' or self.kind == 'VAL': 62 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'train')) 63 | elif self.kind == 'TEST': 64 | self.read_samples(os.path.join(self.cfg.DATASET_PATH, 'test')) 65 | 66 | self.num_pos = len(self.pos_samples) 67 | self.num_neg = len(self.neg_samples) 68 | 69 | self.len = self.num_pos + self.num_neg 70 | 71 | time = datetime.now().strftime("%d-%m-%y %H:%M") 72 | 73 | self.pos_weight_seg = self.neg_pixels / self.pos_pixels if self.pos_pixels else 0 74 | self.pos_weight_dec = self.num_neg / self.num_pos if self.num_pos else 0 75 | 76 | if self.kind == 'TRAIN' and self.cfg.BCE_LOSS_W: 77 | print(f"{time} {self.kind}: Number of positives: {self.num_pos}, Number of negatives: {self.num_neg}, Sum: {self.len}, Seg pos_weight: {round(self.pos_weight_seg, 3)}, Dec pos_weight: {round(self.pos_weight_dec, 3)}") 78 | else: 79 | print(f"{time} {self.kind}: Number of positives: {self.num_pos}, Number of negatives: {self.num_neg}, Sum: {self.len}") 80 | 81 | self.init_extra() -------------------------------------------------------------------------------- /data/input_steel.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from data.dataset import Dataset 4 | import pickle 5 | import pandas as pd 6 | 7 | 8 | def read_split(train_num: int, num_segmented: int, kind: str): 9 | fn = f"STEEL/split_{train_num}_{num_segmented}.pyb" 10 | with open(f"splits/{fn}", "rb") as f: 11 | train_samples, test_samples, validation_samples = pickle.load(f) 12 | if kind == 'TRAIN': 13 | return train_samples 14 | elif kind == 'TEST': 15 | return test_samples 16 | elif kind == 'VAL': 17 | return validation_samples 18 | else: 19 | raise Exception('Unknown') 20 | 21 | 22 | def read_annotations(fn): 23 | arr = np.array(pd.read_csv(fn), dtype=np.object) 24 | annotations_dict = {} 25 | for sample, _, rle in arr: 26 | img_name = sample[:-4] 27 | annotations_dict[img_name] = rle 28 | 29 | return annotations_dict 30 | 31 | 32 | class SteelDataset(Dataset): 33 | def __init__(self, kind, cfg): 34 | super(SteelDataset, self).__init__(cfg.DATASET_PATH, cfg, kind) 35 | self.read_contents() 36 | 37 | def read_contents(self): 38 | if not self.cfg.ON_DEMAND_READ: 39 | raise Exception("Need to implement eager loading!") 40 | 41 | pos_samples, neg_samples = [], [] 42 | 43 | fn = os.path.join(self.path, "train.csv") 44 | annotations = read_annotations(fn) 45 | 46 | samples = read_split(self.cfg.TRAIN_NUM, self.cfg.NUM_SEGMENTED, self.kind) 47 | for sample, is_segmented in samples: 48 | img_name = f"{sample}.jpg" 49 | img_path = os.path.join(self.path, "train_images", img_name) 50 | 51 | if sample in annotations: 52 | rle = list(map(int, annotations[sample].split(" "))) 53 | pos_samples.append((None, None, None, is_segmented, img_path, rle, sample)) 54 | else: 55 | neg_samples.append((None, None, None, True, img_path, None, sample)) 56 | 57 | self.pos_samples = pos_samples 58 | self.neg_samples = neg_samples 59 | 60 | self.num_pos = len(pos_samples) 61 | self.num_neg = len(neg_samples) 62 | self.len = 2 * len(pos_samples) if self.kind in ['TRAIN'] else len(pos_samples) + len(neg_samples) 63 | 64 | self.init_extra() 65 | -------------------------------------------------------------------------------- /datasets/README.md: -------------------------------------------------------------------------------- 1 | ### Corrected SCCDNet Datasets 2 | You will need to download the datasets yourself. Corrected SCCDNet dataset is available [here.](https://go.vicos.si/sccdnetdbcorrected) 3 | After downloading and extracting the dataset, the file structure should look like this: 4 | 5 | SCCDNet-corrected 6 | ├── train 7 | ├──── images 8 | ├──── masks 9 | ├── test 10 | ├──── images 11 | ├──── masks 12 | 13 | -------------------------------------------------------------------------------- /end2end.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import matplotlib 4 | matplotlib.use("Agg") 5 | import matplotlib.pyplot as plt 6 | from models import SegDecNetPlusPlus, SegDecNetOriginalJIM 7 | import numpy as np 8 | import os 9 | from torch import nn as nn 10 | import torch 11 | import utils 12 | import pandas as pd 13 | from data.dataset_catalog import get_dataset 14 | import random 15 | import cv2 16 | from config import Config 17 | from torch.utils.tensorboard import SummaryWriter 18 | from datetime import datetime 19 | from timeit import default_timer as timer 20 | from datetime import timedelta 21 | from torchinfo import summary 22 | from tqdm import tqdm 23 | LVL_ERROR = 10 24 | LVL_INFO = 5 25 | LVL_DEBUG = 1 26 | 27 | LOG = 1 # Will log all mesages with lvl greater than this 28 | SAVE_LOG = True 29 | 30 | WRITE_TENSORBOARD = False 31 | 32 | 33 | class End2End: 34 | def __init__(self, cfg: Config): 35 | self.cfg: Config = cfg 36 | self.storage_path: str = os.path.join(self.cfg.RESULTS_PATH, self.cfg.DATASET) 37 | 38 | def _log(self, message, lvl=LVL_INFO): 39 | time = datetime.now().strftime("%d-%m-%y %H:%M") 40 | n_msg = f"{time} {self.run_name} {message}" 41 | if lvl >= LOG: 42 | print(n_msg) 43 | 44 | def train(self): 45 | self._set_results_path() 46 | self._create_results_dirs() 47 | self.print_run_params() 48 | self.set_seed() 49 | 50 | device = self._get_device() 51 | model = self._get_model().to(device) 52 | optimizer = self._get_optimizer(model) 53 | scheduler = self._get_scheduler(optimizer) 54 | 55 | # Save current learning method to model's directory 56 | utils.save_current_learning_method(save_path=self.run_path) 57 | 58 | train_loader = get_dataset("TRAIN", self.cfg) 59 | validation_loader = get_dataset("VAL", self.cfg) 60 | 61 | loss_seg, loss_dec = self._get_loss(is_seg=True, pos_weight=train_loader.dataset.pos_weight_seg), self._get_loss(is_seg=False, pos_weight=train_loader.dataset.pos_weight_dec) 62 | 63 | tensorboard_writer = SummaryWriter(log_dir=self.tensorboard_path) if WRITE_TENSORBOARD else None 64 | 65 | train_start = timer() 66 | losses, validation_data, best_model_metrics, validation_metrics, lrs, difficulty_score_dict = self._train_model(device, model, train_loader, loss_seg, loss_dec, optimizer, scheduler, validation_loader, tensorboard_writer) 67 | end = timer() 68 | self._log(f"Training time: {timedelta(seconds=end-train_start)}") 69 | train_results = (losses, validation_data, validation_metrics, lrs) 70 | self._save_train_results(train_results) 71 | self._save_model(model) 72 | 73 | # Save difficulty_score_dict 74 | np.save(os.path.join(self.run_path, "difficulty_score_dict.npy"), difficulty_score_dict) 75 | 76 | self.eval(model=model, device=device, save_images=self.cfg.SAVE_IMAGES, plot_seg=False, reload_final=False, best_model_metrics=best_model_metrics) 77 | 78 | self._save_params() 79 | 80 | # Print model's trainable parameters # and save model's summary to file 81 | self._log(f"Model's trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}") 82 | print(summary(model, input_size=torch.Size([self.cfg.BATCH_SIZE, self.cfg.INPUT_CHANNELS, self.cfg.INPUT_HEIGHT, self.cfg.INPUT_WIDTH]), verbose=0), file=open(os.path.join(self.run_path, "model_summary.txt"), 'w', encoding="utf-8")) 83 | 84 | def eval(self, model, device, save_images, plot_seg, reload_final, eval_loader=None, best_model_metrics=None): 85 | self.reload_model(model, reload_final) 86 | is_validation = True 87 | if eval_loader is None: 88 | eval_loader = get_dataset("TEST", self.cfg) 89 | is_validation = False 90 | eval_start = timer() 91 | self.eval_model(device, model, eval_loader, save_folder=self.outputs_path, save_images=save_images, is_validation=is_validation, plot_seg=plot_seg, thresholds=best_model_metrics) 92 | end = timer() 93 | self._log(f"Evaluation time: {timedelta(seconds=end-eval_start)}") 94 | 95 | def training_iteration(self, data, device, model, criterion_seg, criterion_dec, optimizer, weight_loss_seg, weight_loss_dec, 96 | tensorboard_writer, iter_index): 97 | images, seg_masks, is_segmented, sample_names, is_pos, _ = data 98 | 99 | batch_size = self.cfg.BATCH_SIZE 100 | memory_fit = self.cfg.MEMORY_FIT # Not supported yet for >1 101 | 102 | num_subiters = int(batch_size / memory_fit) 103 | 104 | total_loss = 0 105 | total_correct = 0 106 | 107 | optimizer.zero_grad() 108 | 109 | total_loss_seg = 0 110 | total_loss_dec = 0 111 | 112 | difficulty_score = np.zeros(batch_size) 113 | 114 | for sub_iter in range(num_subiters): 115 | images_ = images[sub_iter * memory_fit:(sub_iter + 1) * memory_fit, :, :, :].to(device) 116 | seg_mask_ = seg_masks[sub_iter * memory_fit:(sub_iter + 1) * memory_fit, :, :, :].to(device) 117 | is_pos_ = seg_mask_.max().reshape((memory_fit, 1)).to(device) 118 | 119 | if tensorboard_writer is not None and iter_index % 100 == 0: 120 | tensorboard_writer.add_image(f"{iter_index}/image", images_[0, :, :, :]) 121 | 122 | decision, seg_mask_predicted = model(images_) 123 | 124 | if is_segmented[sub_iter]: 125 | loss_seg = criterion_seg(seg_mask_predicted, seg_mask_) 126 | loss_dec = criterion_dec(decision, is_pos_) 127 | 128 | if self.cfg.HARD_NEG_MINING is not None: 129 | _, _, difficulty_score_mode = self.cfg.HARD_NEG_MINING 130 | if difficulty_score_mode == 1: 131 | difficulty_score[sub_iter] = loss_seg.item() 132 | elif difficulty_score_mode == 2: 133 | threshold = 0.5 134 | y_true = seg_mask_.detach().cpu().numpy()[0][0].astype(np.uint8) 135 | y_pred = (seg_mask_predicted.detach().cpu().numpy()[0][0]>threshold).astype(np.uint8) 136 | 137 | fp = sum(sum((y_true==0)&(y_pred==1))).item() 138 | fn = sum(sum((y_true==1)&(y_pred==0))).item() 139 | 140 | difficulty_score[sub_iter] = loss_seg.item() * ((2 * fp) + fn + 1) 141 | elif difficulty_score_mode == 3: 142 | seg_mask_predicted = nn.Sigmoid()(seg_mask_predicted) 143 | seg_mask_predicted_max = seg_mask_predicted.detach().cpu().numpy()[0][0].max() 144 | classification = nn.Sigmoid()(decision).item() 145 | difficulty_score[sub_iter] = abs(seg_mask_predicted_max - classification) 146 | 147 | 148 | total_loss_seg += loss_seg.item() 149 | total_loss_dec += loss_dec.item() 150 | 151 | total_correct += (decision > 0.0).item() == is_pos_.item() 152 | loss = weight_loss_seg * loss_seg + weight_loss_dec * loss_dec 153 | else: 154 | loss_dec = criterion_dec(decision, is_pos_) 155 | total_loss_dec += loss_dec.item() 156 | 157 | total_correct += (decision > 0.0).item() == is_pos_.item() 158 | loss = weight_loss_dec * loss_dec 159 | 160 | total_loss += loss.item() 161 | 162 | loss.backward() 163 | 164 | # Backward and optimize 165 | optimizer.step() 166 | optimizer.zero_grad() 167 | 168 | return total_loss_seg, total_loss_dec, total_loss, total_correct, difficulty_score 169 | 170 | def _train_model(self, device, model, train_loader, criterion_seg, criterion_dec, optimizer, scheduler, validation_set, tensorboard_writer): 171 | losses = [] 172 | validation_data = [] 173 | validation_metrics = [] 174 | lrs = [] 175 | max_validation = -1 176 | max_f_measure = -1 177 | best_dice = -1 178 | best_f1 = -1 179 | validation_step = self.cfg.VALIDATION_N_EPOCHS 180 | 181 | num_epochs = self.cfg.EPOCHS 182 | samples_per_epoch = len(train_loader) * self.cfg.BATCH_SIZE 183 | 184 | difficulty_score_dict = dict() 185 | 186 | self.set_dec_gradient_multiplier(model, 0.0) 187 | 188 | for epoch in range(num_epochs): 189 | if epoch % 5 == 0: 190 | self._save_model(model, f"ep_{epoch:02}.pth") 191 | 192 | model.train() 193 | 194 | weight_loss_seg, weight_loss_dec = self.get_loss_weights(epoch) 195 | dec_gradient_multiplier = self.get_dec_gradient_multiplier() 196 | self.set_dec_gradient_multiplier(model, dec_gradient_multiplier) 197 | 198 | epoch_loss_seg, epoch_loss_dec, epoch_loss = 0, 0, 0 199 | epoch_correct = 0 200 | 201 | difficulty_score_dict[epoch] = [] 202 | 203 | from timeit import default_timer as timer 204 | 205 | time_acc = 0 206 | start = timer() 207 | for iter_index, (data) in enumerate(tqdm(train_loader)): 208 | start_1 = timer() 209 | curr_loss_seg, curr_loss_dec, curr_loss, correct, difficulty_score = self.training_iteration(data, device, model, 210 | criterion_seg, 211 | criterion_dec, 212 | optimizer, weight_loss_seg, 213 | weight_loss_dec, 214 | tensorboard_writer, (epoch * samples_per_epoch + iter_index)) 215 | 216 | end_1 = timer() 217 | time_acc = time_acc + (end_1 - start_1) 218 | 219 | epoch_loss_seg += curr_loss_seg 220 | epoch_loss_dec += curr_loss_dec 221 | epoch_loss += curr_loss 222 | 223 | epoch_correct += correct 224 | 225 | if self.cfg.HARD_NEG_MINING is not None: 226 | train_loader.batch_sampler.update_sample_loss_batch(data, difficulty_score, index_key=5) 227 | 228 | difficulty_score_dict[epoch].append({index.item(): round(score, 2) for index, score in zip(data[-1], difficulty_score)}) 229 | 230 | end = timer() 231 | 232 | 233 | epoch_loss_seg = epoch_loss_seg / samples_per_epoch 234 | epoch_loss_dec = epoch_loss_dec / samples_per_epoch 235 | epoch_loss = epoch_loss / samples_per_epoch 236 | losses.append((epoch_loss_seg, epoch_loss_dec, epoch_loss, epoch)) 237 | 238 | self._log(f"Epoch {epoch + 1}/{num_epochs} ==> avg_loss_seg={epoch_loss_seg:.5f}, avg_loss_dec={epoch_loss_dec:.5f}, avg_loss={epoch_loss:.5f}, correct={epoch_correct}/{samples_per_epoch}, in {end - start:.2f}s/epoch (fwd/bck in {time_acc:.2f}s/epoch)") 239 | 240 | if self.cfg.SCHEDULER is not None: 241 | scheduler.step() 242 | last_learning_rate = scheduler.get_last_lr()[-1] 243 | self._log(f"Last computing learning rate by scheduler: {last_learning_rate}") 244 | lrs.append((epoch, last_learning_rate)) 245 | else: 246 | lrs.append((epoch, self._get_learning_rate(optimizer=optimizer))) 247 | 248 | self._log(f"Last computing learning rate by optimizer: {self._get_learning_rate(optimizer=optimizer)}") 249 | 250 | if tensorboard_writer is not None: 251 | tensorboard_writer.add_scalar("Loss/Train/segmentation", epoch_loss_seg, epoch) 252 | tensorboard_writer.add_scalar("Loss/Train/classification", epoch_loss_dec, epoch) 253 | tensorboard_writer.add_scalar("Loss/Train/joined", epoch_loss, epoch) 254 | tensorboard_writer.add_scalar("Accuracy/Train/", epoch_correct / samples_per_epoch, epoch) 255 | 256 | if self.cfg.VALIDATE and (epoch % validation_step == 0 or epoch == num_epochs - 1): 257 | validation_ap, validation_accuracy, val_metrics = self.eval_model(device=device, model=model, eval_loader=validation_set, save_folder=None, save_images=False, is_validation=True, plot_seg=False) 258 | validation_data.append((validation_ap, epoch)) 259 | validation_metrics.append((epoch, val_metrics)) 260 | 261 | if val_metrics['Dice'] > best_dice: 262 | best_dice = val_metrics['Dice'] 263 | best_seg_model_metrics = val_metrics 264 | self._save_model(model, "best_seg_dict.pth") 265 | 266 | if val_metrics['best_f_measure'] > max_f_measure: 267 | max_f_measure = val_metrics['best_f_measure'] 268 | best_dec_model_metrics = val_metrics 269 | self._save_model(model, "best_dec_dict.pth") 270 | 271 | if val_metrics['Dice'] >= best_dice and val_metrics['best_f_measure'] >= max_f_measure: 272 | best_dice = val_metrics['Dice'] 273 | max_f_measure = val_metrics['best_f_measure'] 274 | best_model_metrics = val_metrics 275 | self._save_model(model, "best_state_dict.pth") 276 | 277 | model.train() 278 | if tensorboard_writer is not None: 279 | tensorboard_writer.add_scalar("Accuracy/Validation/", validation_accuracy, epoch) 280 | 281 | if self.cfg.BEST_MODEL_TYPE == "dec": 282 | best_model_metrics = best_dec_model_metrics 283 | elif self.cfg.BEST_MODEL_TYPE == "seg": 284 | best_model_metrics = best_seg_model_metrics 285 | 286 | return losses, validation_data, best_model_metrics, validation_metrics, lrs, difficulty_score_dict 287 | 288 | def eval_model_speed(self, device, model, eval_loader): 289 | model.eval() 290 | 291 | cuda_time = [] 292 | cuda_mem_usage = [] 293 | cpu_time = [] 294 | from itertools import chain 295 | 296 | iter = list(chain(eval_loader.dataset.neg_samples,eval_loader.dataset.pos_samples)) 297 | 298 | from torchvision.transforms import functional as F 299 | 300 | N = 1000 301 | start = time.time() 302 | for index, data_point in enumerate(tqdm(iter)): 303 | _, _, _, image_path, _, _, _ = data_point 304 | image = cv2.imread(image_path, cv2.IMREAD_COLOR) 305 | image = F.to_tensor(cv2.resize(image, dsize=(self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT))).unsqueeze(0) 306 | 307 | image = image.to(device) 308 | # ensure all work is done before next loop 309 | torch.cuda.synchronize() 310 | if index % 20 == 0 and False: 311 | from torch.profiler import profile, ProfilerActivity 312 | with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], 313 | profile_memory=False, record_shapes=True, with_flops=True) as prof: 314 | prediction, seg_mask_predicted = model(image) 315 | # ensure all work is done before next loop 316 | torch.cuda.synchronize() 317 | 318 | cuda_events = [n for n in prof.key_averages() if n.device_type == torch.autograd.DeviceType.CUDA] 319 | cuda_time.append(sum([n.self_cuda_time_total for n in cuda_events])) 320 | cpu_time.append(sum([n.self_cpu_time_total for n in prof.key_averages()])) 321 | cuda_mem_usage.append(sum([n.cuda_memory_usage for n in prof.key_averages()])) 322 | print(cuda_time[-1]) 323 | print(cpu_time[-1]) 324 | else: 325 | prediction, seg_mask_predicted = model(image) 326 | 327 | prediction = torch.sigmoid(prediction) 328 | seg_mask_predicted = torch.sigmoid(seg_mask_predicted) 329 | 330 | # ensure all work is done before next loop 331 | torch.cuda.synchronize() 332 | 333 | end = time.time() 334 | time_python_total = end-start 335 | time_python_per_img_ms = time_python_total*1000/N 336 | 337 | print(f'total python time: {time_python_total} sec, per image: {time_python_per_img_ms} ms') 338 | print(f'total python FPS: {1000.0/time_python_per_img_ms}') 339 | if len(cuda_time) > 0: 340 | print('avg cuda time: ', np.mean(cuda_time)/1000, ' ms') 341 | if len(cuda_mem_usage) > 0: 342 | print('avg cuda mem usage: ', np.mean(cuda_mem_usage) / 10**6, ' MB') 343 | print("num_params:", sum([p.numel() for p in model.parameters() if p.requires_grad]) / 10 ** 6) 344 | 345 | def eval_model(self, device, model, eval_loader, save_folder, save_images, is_validation, plot_seg, thresholds=None): 346 | model.eval() 347 | 348 | dsize = self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT 349 | 350 | res = [] 351 | predictions, predictions_truths = [], [] 352 | 353 | predicted_segs, true_segs = [], [] 354 | samples = {"images": list(), "image_names": list()} 355 | 356 | for iii, data_point in enumerate(tqdm(eval_loader)): 357 | image, seg_mask, _, sample_name, is_pos, _ = data_point 358 | image, seg_mask = image.to(device), seg_mask.to(device) 359 | is_pos = is_pos.item() 360 | prediction, seg_mask_predicted = model(image) 361 | 362 | prediction = nn.Sigmoid()(prediction) 363 | seg_mask_predicted = nn.Sigmoid()(seg_mask_predicted) 364 | 365 | prediction = prediction.item() 366 | image = image.detach().cpu().numpy() 367 | seg_mask = seg_mask.detach().cpu().numpy() 368 | seg_mask_predicted = seg_mask_predicted.detach().cpu().numpy() 369 | 370 | image = cv2.resize(np.transpose(image[0, :, :, :], (1, 2, 0)), dsize) 371 | image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) 372 | 373 | predictions.append(prediction) 374 | predictions_truths.append(is_pos) 375 | res.append((prediction, None, None, is_pos, sample_name[0])) 376 | 377 | seg_mask_predicted = seg_mask_predicted[0][0] 378 | seg_mask = seg_mask[0][0] 379 | predicted_segs.append(seg_mask_predicted) 380 | samples["image_names"].append(sample_name[0]) 381 | samples["images"].append(image) 382 | true_segs.append(seg_mask) 383 | 384 | if not is_validation and save_images: 385 | utils.plot_sample(sample_name[0], image, seg_mask_predicted, seg_mask, save_folder, decision=prediction, plot_seg=plot_seg) 386 | utils.save_predicted_segmentation(seg_mask_predicted, sample_name[0], self.run_path) 387 | 388 | if is_validation: 389 | val_metrics = dict() 390 | metrics = utils.get_metrics(np.array(predictions_truths), np.array(predictions)) 391 | FP, FN, TP, TN = list(map(sum, [metrics["FP"], metrics["FN"], metrics["TP"], metrics["TN"]])) 392 | self._log(f"VALIDATION on {eval_loader.dataset.kind} set || AUC={metrics['AUC']:f}, and AP={metrics['AP']:f}, with best thr={metrics['best_thr']:f} sat f-measure={metrics['best_f_measure']:.3f} and FP={FP:d}, FN={FN:d}, TOTAL SAMPLES={FP + FN + TP + TN:d}") 393 | 394 | decisions = np.array(predictions) >= metrics['best_thr'] 395 | 396 | if self.cfg.SEG_BLACK: 397 | black_seg_counter = 0 398 | black_seg = np.zeros(predicted_segs[0].shape) 399 | for i, decision in enumerate(decisions): 400 | if decision == False: 401 | predicted_segs[i] = black_seg 402 | black_seg_counter += 1 403 | self._log(f"Black Segmentations: {black_seg_counter}") 404 | 405 | # Dice 406 | step = 0.01 407 | dice = (0,0) 408 | iou = (0, 0) 409 | f1 = (0, 0) 410 | for i in range(len(predicted_segs)): 411 | true_segs[i] = np.array(true_segs[i]).astype(np.uint8) 412 | 413 | for thr in tqdm(np.arange(0.1, 1, step)): 414 | result_dice = [] 415 | result_precision = [] 416 | result_recall = [] 417 | result_iou = [] 418 | 419 | for i in range(len(predicted_segs)): 420 | #y_true = np.array(true_segs[i]).astype(np.uint8) 421 | y_true = true_segs[i] 422 | y_pred = (np.array(predicted_segs[i])>thr).astype(np.uint8) 423 | 424 | result_dice += [utils.dice(y_true, y_pred)] 425 | result_precision += [utils.precision(y_true, y_pred)] 426 | result_recall += [utils.recall(y_true, y_pred)] 427 | result_iou += [utils.iou(y_true, y_pred)] 428 | 429 | if np.mean(result_dice) > dice[0]: 430 | dice = (np.mean(result_dice), thr) 431 | 432 | if np.mean(result_iou) > iou[0]: 433 | iou = (np.mean(result_iou), thr) 434 | 435 | f1_tmp = 2 * np.mean(result_precision) * np.mean(result_recall) / (np.mean(result_precision) + np.mean(result_recall)) 436 | 437 | if f1_tmp > f1[0]: 438 | f1 = (f1_tmp, thr) 439 | val_metrics['Pr'] = np.mean(result_precision) 440 | val_metrics['Re'] = np.mean(result_recall) 441 | 442 | self._log(f"Validation best Dice: {dice[0]:.4f} at {dice[1]:.3f}") 443 | self._log(f"Validation best IoU: {iou[0]:.4f} at {iou[1]:.3f}") 444 | self._log(f"Validation best F1: {f1[0]:.4f} at {f1[1]:.3f}") 445 | 446 | val_metrics['dec_threshold'] = metrics['best_thr'] 447 | val_metrics['F1'], val_metrics['f1_threshold'] = f1 448 | val_metrics['Dice'], val_metrics['dice_threshold'] = dice 449 | val_metrics['IoU'], val_metrics['iou_threshold'] = iou 450 | val_metrics['best_f_measure'] = metrics['best_f_measure'] 451 | 452 | return metrics["AP"], metrics["accuracy"], val_metrics 453 | else: 454 | decisions = np.array(predictions) >= thresholds["dec_threshold"] 455 | samples["decisions"] = list(decisions) 456 | FP, FN, TN, TP = utils.calc_confusion_mat(decisions, np.array(predictions_truths)) 457 | 458 | fp = sum(FP).item() 459 | fn = sum(FN).item() 460 | tn = sum(TN).item() 461 | tp = sum(TP).item() 462 | 463 | pr = tp / (tp + fp) if tp else 0 464 | re = tp / (tp + fn) if tp else 0 465 | f1 = (2 * pr * re) / (pr + re) if pr and re else 0 466 | accuracy = (tp + tn) / (tp + tn + fp + fn) 467 | 468 | self._log(f"Decision EVAL on {eval_loader.dataset.kind}. Pr: {pr:f}, Re: {re:f}, F1: {f1:f}, Accuracy: {accuracy:f}, Threshold: {thresholds['dec_threshold']}") 469 | self._log(f"TP: {tp}, FP: {fp}, FN: {fn}, TN: {tn}") 470 | 471 | # Max(S) classification 472 | seg_fp = 0 473 | seg_fn = 0 474 | seg_tp = 0 475 | seg_tn = 0 476 | for i in range(len(predictions_truths)): 477 | max_s = (predicted_segs[i] > thresholds["dice_threshold"]).astype(np.uint8).max() 478 | 479 | if max_s == 1: 480 | if predictions_truths[i] == 1: 481 | seg_tp += 1 482 | elif predictions_truths[i] == 0: 483 | seg_fp += 1 484 | elif max_s == 0: 485 | if predictions_truths[i] == 1: 486 | seg_fn += 1 487 | elif predictions_truths[i] == 0: 488 | seg_tn += 1 489 | self._log(f"Max(S) classification Pred crnenjem: TP: {seg_tp}, FP: {seg_fp}, FN: {seg_fn}, TN: {seg_tn}") 490 | 491 | if self.cfg.SEG_BLACK: 492 | black_seg_counter = 0 493 | black_seg = np.zeros(predicted_segs[0].shape) 494 | for i, decision in enumerate(decisions): 495 | if decision == False: 496 | if (predicted_segs[i] > thresholds["dice_threshold"]).astype(np.uint8).max() > 0: 497 | black_seg_counter += 1 498 | self._log(f"Blacked: {samples['image_names'][i]}\t{predictions_truths[i]}\t{true_segs[i].astype(np.uint8).max()}") 499 | predicted_segs[i] = black_seg 500 | self._log(f"Black Segmentations: {black_seg_counter}") 501 | 502 | # Max(S) classification 503 | seg_fp = 0 504 | seg_fn = 0 505 | seg_tp = 0 506 | seg_tn = 0 507 | for i in range(len(predictions_truths)): 508 | max_s = (predicted_segs[i] > thresholds["dice_threshold"]).astype(np.uint8).max() 509 | 510 | if max_s == 1: 511 | if predictions_truths[i] == 1: 512 | seg_tp += 1 513 | elif predictions_truths[i] == 0: 514 | seg_fp += 1 515 | elif max_s == 0: 516 | if predictions_truths[i] == 1: 517 | seg_fn += 1 518 | elif predictions_truths[i] == 0: 519 | seg_tn += 1 520 | self._log(f"Max(S) classification Po crnenjem: TP: {seg_tp}, FP: {seg_fp}, FN: {seg_fn}, TN: {seg_tn}") 521 | 522 | # Dice, IoU in F1 523 | mean_dice, std_dice, mean_iou, std_iou, mean_pr, std_pr, mean_re, std_re, adj_thr_c = utils.dice_iou(predicted_segs, true_segs, thresholds, samples["images"], samples["image_names"], self.run_path, decisions, save_images=self.cfg.SAVE_IMAGES, adjusted_threshold=self.cfg.THR_ADJUSTMENT) 524 | 525 | # Adjusted threshold 526 | if self.cfg.THR_ADJUSTMENT: 527 | self._log(f"Adjusted thresholds: {adj_thr_c}") 528 | 529 | self._log(f"{eval_loader.dataset.kind} set. Precision mean = {mean_pr:f}, std = {std_pr:f}") 530 | self._log(f"{eval_loader.dataset.kind} set. Recall mean = {mean_re:f}, std = {std_re:f}") 531 | self._log(f"{eval_loader.dataset.kind} set. F1 mean = {2 * mean_pr * mean_re / (mean_pr + mean_re):f}, std = {2 * std_pr * std_re / (std_pr + std_re):f} at {thresholds['f1_threshold']:f}") 532 | self._log(f"{eval_loader.dataset.kind} set. Dice mean = {mean_dice:f}, std = {std_dice:f} at {thresholds['dice_threshold']:f}") 533 | self._log(f"{eval_loader.dataset.kind} set. IoU mean = {mean_iou:f}, std = {std_iou:f} at {thresholds['iou_threshold']:f}") 534 | 535 | def get_dec_gradient_multiplier(self): 536 | if self.cfg.GRADIENT_ADJUSTMENT: 537 | grad_m = 0 538 | else: 539 | grad_m = 1 540 | 541 | self._log(f"Returning dec_gradient_multiplier {grad_m}", LVL_DEBUG) 542 | return grad_m 543 | 544 | def set_dec_gradient_multiplier(self, model, multiplier): 545 | model.set_gradient_multipliers(multiplier) 546 | 547 | def get_loss_weights(self, epoch): 548 | total_epochs = float(self.cfg.EPOCHS) 549 | 550 | if self.cfg.DYN_BALANCED_LOSS: 551 | seg_loss_weight = 1 - (epoch / total_epochs) 552 | dec_loss_weight = self.cfg.DELTA_CLS_LOSS * (epoch / total_epochs) 553 | else: 554 | seg_loss_weight = 1 555 | dec_loss_weight = self.cfg.DELTA_CLS_LOSS 556 | 557 | self._log(f"Returning seg_loss_weight {seg_loss_weight} and dec_loss_weight {dec_loss_weight}", LVL_DEBUG) 558 | return seg_loss_weight, dec_loss_weight 559 | 560 | def reload_model(self, model, load_final=False): 561 | if self.cfg.USE_BEST_MODEL: 562 | if self.cfg.BEST_MODEL_TYPE == "dec": 563 | path = os.path.join(self.model_path, "best_dec_dict.pth") 564 | elif self.cfg.BEST_MODEL_TYPE == "seg": 565 | path = os.path.join(self.model_path, "best_seg_dict.pth") 566 | else: 567 | path = os.path.join(self.model_path, "best_state_dict.pth") 568 | model.load_state_dict(torch.load(path, map_location=f"cuda:{self.cfg.GPU}")) 569 | self._log(f"Loading model state from {path}") 570 | elif load_final: 571 | path = os.path.join(self.model_path, "final_state_dict.pth") 572 | model.load_state_dict(torch.load(path, map_location=f"cuda:{self.cfg.GPU}")) 573 | self._log(f"Loading model state from {path}") 574 | else: 575 | self._log("Keeping same model state") 576 | 577 | def _save_params(self): 578 | params = self.cfg.get_as_dict() 579 | params_lines = sorted(map(lambda e: e[0] + ":" + str(e[1]) + "\n", params.items())) 580 | fname = os.path.join(self.run_path, "run_params.txt") 581 | with open(fname, "w+") as f: 582 | f.writelines(params_lines) 583 | 584 | def _save_train_results(self, results): 585 | losses, validation_data, validation_metrics, lrs = results 586 | ls, ld, l, le = map(list, zip(*losses)) 587 | plt.plot(le, l, label="Loss", color="red") 588 | plt.plot(le, ls, label="Loss seg") 589 | plt.plot(le, ld, label="Loss dec") 590 | plt.ylim(bottom=0) 591 | plt.grid() 592 | plt.xlabel("Epochs") 593 | if self.cfg.VALIDATE: 594 | v, ve = map(list, zip(*validation_data)) 595 | plt.twinx() 596 | plt.plot(ve, v, label="Validation AP", color="Green") 597 | plt.ylim((0, 1)) 598 | plt.legend() 599 | plt.savefig(os.path.join(self.run_path, "loss_val"), dpi=200) 600 | 601 | df_loss = pd.DataFrame(data={"loss_seg": ls, "loss_dec": ld, "loss": l, "epoch": le}) 602 | df_loss.to_csv(os.path.join(self.run_path, "losses.csv"), index=False) 603 | 604 | if self.cfg.VALIDATE: 605 | df_loss = pd.DataFrame(data={"validation_data": ls, "loss_dec": ld, "loss": l, "epoch": le}) 606 | df_loss.to_csv(os.path.join(self.run_path, "losses.csv"), index=False) 607 | 608 | # Dice & IOU plot 609 | if len(validation_metrics) != 0: 610 | epochs, metrics = map(list, zip(*validation_metrics)) 611 | f1 = [i['F1'] for i in metrics] 612 | pr = [i['Pr'] for i in metrics] 613 | re = [i['Re'] for i in metrics] 614 | dice = [i['Dice'] for i in metrics] 615 | iou = [i['IoU'] for i in metrics] 616 | plt.clf() 617 | plt.plot(epochs, f1, label="F1") 618 | plt.plot(epochs, pr, label="Pr") 619 | plt.plot(epochs, re, label="Re") 620 | plt.plot(epochs, dice, label="Dice") 621 | plt.plot(epochs, iou, label="IoU") 622 | plt.xlabel("Epochs") 623 | plt.ylabel("Score") 624 | plt.legend() 625 | plt.savefig(os.path.join(self.run_path, "scores"), dpi=200) 626 | 627 | # Loss plot 628 | # Loss 629 | plt.clf() 630 | plt.plot(le, l) 631 | plt.xlabel("Epochs") 632 | plt.ylabel("Loss") 633 | plt.savefig(os.path.join(self.run_path, "loss"), dpi=200) 634 | 635 | # Loss Segmentation 636 | plt.clf() 637 | plt.plot(le, ls) 638 | plt.xlabel("Epochs") 639 | plt.ylabel("Loss Segmentation") 640 | plt.savefig(os.path.join(self.run_path, "loss_seg"), dpi=200) 641 | 642 | # Loss Dec 643 | plt.clf() 644 | plt.plot(le, ld) 645 | plt.xlabel("Epochs") 646 | plt.ylabel("Loss Dec") 647 | plt.savefig(os.path.join(self.run_path, "loss_dec"), dpi=200) 648 | 649 | # Learning rate plot 650 | epochs, lr = map(list, zip(*lrs)) 651 | plt.clf() 652 | plt.plot(epochs, lr) 653 | plt.xlabel("Epochs") 654 | plt.ylabel("Learning rate") 655 | plt.savefig(os.path.join(self.run_path, "learning_rate"), dpi=200) 656 | 657 | def _save_model(self, model, name="final_state_dict.pth"): 658 | output_name = os.path.join(self.model_path, name) 659 | self._log(f"Saving current model state to {output_name}") 660 | if os.path.exists(output_name): 661 | os.remove(output_name) 662 | 663 | torch.save(model.state_dict(), output_name) 664 | 665 | def _get_optimizer(self, model): 666 | if self.cfg.OPTIMIZER == "sgd": 667 | return torch.optim.SGD(model.parameters(), self.cfg.LEARNING_RATE) 668 | elif self.cfg.OPTIMIZER == "adam": 669 | return torch.optim.Adam(model.parameters(), self.cfg.LEARNING_RATE) 670 | 671 | def _get_scheduler(self, optimizer): 672 | if self.cfg.SCHEDULER is None: 673 | return None 674 | else: 675 | self._log(f"Using Learning Rate Scheduler: StepLR, Step size: {int(self.cfg.SCHEDULER[0])}, Gamma: {self.cfg.SCHEDULER[1]}") 676 | return torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(self.cfg.SCHEDULER[0]), gamma=self.cfg.SCHEDULER[1]) 677 | 678 | def _get_learning_rate(self, optimizer): 679 | for p in optimizer.param_groups: 680 | return p["lr"] 681 | 682 | def _get_loss(self, is_seg, pos_weight=None): 683 | reduction = "none" if self.cfg.WEIGHTED_SEG_LOSS and is_seg else "mean" 684 | if self.cfg.BCE_LOSS_W and pos_weight is not None: 685 | return nn.BCEWithLogitsLoss(reduction=reduction, pos_weight=torch.Tensor([pos_weight])).to(self._get_device()) 686 | else: 687 | return nn.BCEWithLogitsLoss(reduction=reduction).to(self._get_device()) 688 | 689 | def _get_device(self): 690 | return f"cuda:{self.cfg.GPU}" 691 | 692 | def _set_results_path(self): 693 | self.run_name = f"{self.cfg.RUN_NAME}_FOLD_{self.cfg.FOLD}" if self.cfg.DATASET in ["KSDD", "DAGM"] else self.cfg.RUN_NAME 694 | 695 | results_path = os.path.join(self.cfg.RESULTS_PATH, self.cfg.DATASET) 696 | self.tensorboard_path = os.path.join(results_path, "tensorboard", self.run_name) 697 | 698 | run_path = os.path.join(results_path, self.cfg.RUN_NAME) 699 | if self.cfg.DATASET in ["KSDD", "DAGM"]: 700 | run_path = os.path.join(run_path, f"FOLD_{self.cfg.FOLD}") 701 | 702 | self._log(f"Executing run with path {run_path}") 703 | 704 | self.run_path = run_path 705 | self.model_path = os.path.join(run_path, "models") 706 | self.outputs_path = os.path.join(run_path, "test_outputs") 707 | 708 | def _create_results_dirs(self): 709 | list(map(utils.create_folder, [self.run_path, self.model_path, self.outputs_path, ])) 710 | 711 | def _get_model(self): 712 | if self.cfg.ARCHITECTURE == 'SegDecNetOriginalJIM': 713 | seg_net = SegDecNetOriginalJIM(self._get_device(), self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT, self.cfg.INPUT_CHANNELS) 714 | elif self.cfg.ARCHITECTURE == 'SegDecNet++': 715 | seg_net = SegDecNetPlusPlus(self._get_device(), self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT, self.cfg.INPUT_CHANNELS) 716 | else: 717 | raise Exception('Invalid architecture requested') 718 | return seg_net 719 | 720 | def print_run_params(self): 721 | for l in sorted(map(lambda e: e[0] + ":" + str(e[1]) + "\n", self.cfg.get_as_dict().items())): 722 | k, v = l.split(":") 723 | self._log(f"{k:25s} : {str(v.strip())}") 724 | 725 | def set_seed(self): 726 | if self.cfg.REPRODUCIBLE_RUN is not None: 727 | self._log(f"Reproducible run, fixing all seeds to: {self.cfg.REPRODUCIBLE_RUN}", LVL_DEBUG) 728 | np.random.seed(self.cfg.REPRODUCIBLE_RUN) 729 | torch.manual_seed(self.cfg.REPRODUCIBLE_RUN) 730 | random.seed(self.cfg.REPRODUCIBLE_RUN) 731 | torch.cuda.manual_seed(self.cfg.REPRODUCIBLE_RUN) 732 | torch.cuda.manual_seed_all(self.cfg.REPRODUCIBLE_RUN) 733 | torch.backends.cudnn.deterministic = True 734 | torch.backends.cudnn.benchmark = False 735 | 736 | # def seg_val_metrics(self, truth_segmentations, predicted_segmentations, dataset_kind, threshold_step=0.005, pxl_distance=2): 737 | # n_samples = len(truth_segmentations) 738 | # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (1 + pxl_distance * 2, 1 + pxl_distance * 2)) 739 | # thresholds, pr_results, re_results, f1_results = [], [], [], [] 740 | # metrics = dict() 741 | # 742 | # self._log(f"Validation metrics on {dataset_kind} set. {pxl_distance} pixel distance used. Threshold step: {threshold_step}") 743 | # 744 | # for threshold in np.arange(0.1, 1, threshold_step): 745 | # results = [] 746 | # for i in range(n_samples): 747 | # y_true = np.array(truth_segmentations[i]).astype(np.uint8) 748 | # y_true_d = cv2.dilate(y_true, kernel) if pxl_distance > 0 else y_true 749 | # y_pred = (np.array(predicted_segmentations[i])>threshold).astype(np.uint8) 750 | # 751 | # tp_d = sum(sum((y_true_d==1)&(y_pred==1))).item() 752 | # fp_d = sum(sum((y_true_d==0)&(y_pred==1))).item() 753 | # fn = sum(sum((y_true==1)&(y_pred==0))).item() 754 | # 755 | # pr = tp_d / (tp_d + fp_d) if tp_d else 0 756 | # re = tp_d / (tp_d + fn) if tp_d else 0 757 | # f1 = (2 * pr * re) / (pr + re) if pr and re else 0 758 | # 759 | # results.append((pr, re, f1)) 760 | # 761 | # thresholds.append(threshold) 762 | # pr_results.append(np.mean(np.array(results)[:, 0])) 763 | # re_results.append(np.mean(np.array(results)[:, 1])) 764 | # f1_results.append(np.mean(np.array(results)[:, 2])) 765 | # 766 | # f1_max_index = f1_results.index(max(f1_results)) 767 | # metrics['Pr'] = pr_results[f1_max_index] 768 | # metrics['Re'] = re_results[f1_max_index] 769 | # metrics['F1'] = max(f1_results) 770 | # metrics['f1_threshold'] = thresholds[f1_max_index] 771 | # 772 | # self._log(f"Best F1: {metrics['F1']:f} at {thresholds[f1_max_index]:f}. Pr: {metrics['Pr']:f}, Re: {metrics['Re']:f}") 773 | # 774 | # return metrics -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: segdecnet++ 2 | channels: 3 | - defaults 4 | dependencies: 5 | - _libgcc_mutex=0.1=main 6 | - ca-certificates=2021.1.19=h06a4308_1 7 | - certifi=2020.12.5=py38h06a4308_0 8 | - ld_impl_linux-64=2.33.1=h53a641e_7 9 | - libffi=3.3=he6710b0_2 10 | - libgcc-ng=9.1.0=hdf63c60_0 11 | - libstdcxx-ng=9.1.0=hdf63c60_0 12 | - ncurses=6.2=he6710b0_1 13 | - openssl=1.1.1k=h27cfd23_0 14 | - pip=21.0.1=py38h06a4308_0 15 | - python=3.8.8=hdb3f193_4 16 | - readline=8.1=h27cfd23_0 17 | - setuptools=52.0.0=py38h06a4308_0 18 | - sqlite=3.35.4=hdfb4753_0 19 | - tk=8.6.10=hbc83047_0 20 | - wheel=0.36.2=pyhd3eb1b0_0 21 | - xz=5.2.5=h7b6447c_0 22 | - zlib=1.2.11=h7b6447c_3 23 | - pip: 24 | - absl-py==0.12.0 25 | - cachetools==4.2.1 26 | - chardet==4.0.0 27 | - cycler==0.10.0 28 | - filelock==3.10.0 29 | - future==0.18.2 30 | - google-auth==1.28.0 31 | - google-auth-oauthlib==0.4.4 32 | - grpcio==1.37.0 33 | - huggingface-hub==0.13.2 34 | - idna==2.10 35 | - joblib==1.0.1 36 | - kiwisolver==1.3.1 37 | - markdown==3.3.4 38 | - matplotlib==3.2.2 39 | - numpy==1.18.5 40 | - oauthlib==3.1.0 41 | - opencv-contrib-python==4.5.1.48 42 | - opencv-python==4.5.1.48 43 | - packaging==23.0 44 | - pandas==1.0.5 45 | - pillow==8.2.0 46 | - protobuf==3.15.7 47 | - pyasn1==0.4.8 48 | - pyasn1-modules==0.2.8 49 | - pyparsing==2.4.7 50 | - python-dateutil==2.8.1 51 | - pytz==2021.1 52 | - pyyaml==6.0 53 | - requests==2.25.1 54 | - requests-oauthlib==1.3.0 55 | - rsa==4.7.2 56 | - scikit-learn==0.23.2 57 | - scipy==1.5.4 58 | - six==1.15.0 59 | - tensorboard==2.4.1 60 | - tensorboard-plugin-wit==1.8.0 61 | - threadpoolctl==2.1.0 62 | - timm==0.6.12 63 | - torch==1.8.1 64 | - torchinfo==1.7.1 65 | - torchvision==0.9.1 66 | - tqdm==4.64.1 67 | - typing-extensions==3.7.4.3 68 | - urllib3==1.26.4 69 | - werkzeug==1.0.1 70 | -------------------------------------------------------------------------------- /eval_speed.py: -------------------------------------------------------------------------------- 1 | from config import Config 2 | from end2end import End2End 3 | from data.dataset_catalog import get_dataset 4 | from utils import create_folder 5 | import sys, os, torch 6 | 7 | def get_config(): 8 | gpu, run_name, dataset, dataset_path, eval_name = sys.argv[1:6] 9 | print(gpu, run_name, dataset, dataset_path, eval_name) 10 | # Konfiguracija 11 | configuration = Config() 12 | 13 | params = [i.replace('\n', '') for i in open(os.path.join('RESULTS', dataset, run_name, 'run_params.txt'), 'r')] 14 | 15 | for p in params: 16 | p, v = p.split(":") 17 | try: 18 | v = int(v) 19 | except: 20 | try: 21 | v = float(v) 22 | except: 23 | pass 24 | 25 | if v == 'True': 26 | v = True 27 | elif v == 'False': 28 | v = False 29 | elif v == "None": 30 | v = None 31 | 32 | setattr(configuration, p, v) 33 | 34 | configuration.RUN_NAME = run_name 35 | configuration.GPU = gpu 36 | configuration.DATASET_PATH = dataset_path 37 | configuration.SAVE_IMAGES = True 38 | configuration.ON_DEMAND_READ = True 39 | 40 | if 'ARCHITECTURE' in os.environ: 41 | configuration.ARCHITECTURE = os.environ['ARCHITECTURE'] 42 | 43 | if len(sys.argv) >= 8: 44 | print(sys.argv) 45 | thr_adjust, seg_black = sys.argv[6:9] 46 | 47 | configuration.THR_ADJUSTMENT = float(thr_adjust) if thr_adjust != "None" else None 48 | configuration.SEG_BLACK = bool(seg_black) 49 | else: 50 | configuration.THR_ADJUSTMENT = None #0.9 51 | configuration.SEG_BLACK = False #True 52 | 53 | configuration.init_extra() 54 | 55 | return configuration, eval_name 56 | 57 | if __name__ == "__main__": 58 | cfg, eval_name = get_config() 59 | 60 | # Model 61 | 62 | end2end = End2End(cfg=cfg) 63 | end2end._set_results_path() 64 | end2end.print_run_params() 65 | end2end.set_seed() 66 | device = end2end._get_device() 67 | model = end2end._get_model().to(device) 68 | end2end.set_dec_gradient_multiplier(model, 0.0) 69 | 70 | if len(sys.argv) > 8: 71 | path = os.path.join(end2end.model_path, str(sys.argv[6])) 72 | model.load_state_dict(torch.load(path, map_location=f"cuda:{cfg.GPU}")) 73 | end2end._log(f"Loading model state from {path}") 74 | else: 75 | end2end.reload_model(model=model, load_final=False) 76 | 77 | # Make new eval save folder 78 | 79 | end2end.run_path = os.path.join(end2end.cfg.RESULTS_PATH, cfg.DATASET, eval_name) 80 | end2end.outputs_path = os.path.join(end2end.run_path, "test_outputs") 81 | create_folder(end2end.run_path) 82 | create_folder(end2end.outputs_path) 83 | 84 | end2end._log(f"Dataset: {cfg.DATASET}, Path: {cfg.DATASET_PATH}") 85 | 86 | with torch.no_grad(): 87 | validation_loader = get_dataset("VAL", end2end.cfg) 88 | end2end.eval_model_speed(device=device, model=model, eval_loader=validation_loader) 89 | -------------------------------------------------------------------------------- /evaluate_output.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from pathlib import Path 3 | import argparse 4 | import cv2 5 | import numpy as np 6 | 7 | import torch 8 | 9 | def FP(y_true, y_pred): 10 | return ((1 - y_true) * y_pred).sum() 11 | 12 | def FN(y_true, y_pred): 13 | return (y_true*(1 - y_pred)).sum() 14 | 15 | def precision(y_true, y_pred): 16 | one = torch.ones_like(torch.Tensor(y_true)) 17 | TP = (y_true * y_pred).sum() 18 | FP = ((one-y_true)*y_pred).sum() 19 | return (TP + 1e-15) / (TP + FP + 1e-15) 20 | 21 | def general_precision(y_true, y_pred): 22 | if y_true.sum() == 0: 23 | if y_pred.sum() == 0: 24 | return 1 25 | else: 26 | return 0 27 | 28 | return precision(y_true, y_pred) 29 | 30 | def recall(y_true, y_pred): 31 | one = torch.ones_like(torch.Tensor(y_pred)) 32 | one = one.numpy() 33 | TP = (y_true * y_pred).sum() 34 | FN = (y_true*(one - y_pred)).sum() 35 | return (TP + 1e-15) / (TP + FN + 1e-15) 36 | 37 | def general_recall(y_true, y_pred): 38 | if y_true.sum() == 0: 39 | if y_pred.sum() == 0: 40 | return 1 41 | else: 42 | return 0 43 | 44 | return recall(y_true, y_pred) 45 | 46 | def dice(y_true, y_pred): 47 | return (2 * (y_true * y_pred).sum() + 1e-15) / (y_true.sum() + y_pred.sum() + 1e-15) 48 | 49 | def general_dice(y_true, y_pred): 50 | if y_true.sum() == 0: 51 | if y_pred.sum() == 0: 52 | return 1 53 | else: 54 | return 0 55 | 56 | return dice(y_true, y_pred) 57 | 58 | def jaccard(y_true, y_pred): 59 | intersection = (y_true * y_pred).sum() 60 | union = y_true.sum() + y_pred.sum() - intersection 61 | return (intersection + 1e-15) / (union + 1e-15) 62 | 63 | def general_jaccard(y_true, y_pred): 64 | if y_true.sum() == 0: 65 | if y_pred.sum() == 0: 66 | return 1 67 | else: 68 | return 0 69 | 70 | return jaccard(y_true, y_pred) 71 | 72 | if __name__ == '__main__': 73 | torch.set_num_threads(1) 74 | 75 | parser = argparse.ArgumentParser() 76 | parser.add_argument('-ground_truth_dir', type=str, help='path where ground truth images are located') 77 | parser.add_argument('-gt_filenames', type=str, default=None, help='txt with filenames for validation ', action='append') 78 | parser.add_argument('-pred_dir', type=str, default='prediction_output', help='path with predictions') 79 | parser.add_argument('-threshold', type=float, default=0.2, help='crack threshold detection') 80 | args = parser.parse_args() 81 | 82 | result_precision = [] 83 | result_recall = [] 84 | result_f1 = [] 85 | result_dice = [] 86 | result_jaccard = [] 87 | 88 | cls_pred = [] 89 | cls_gt = [] 90 | 91 | if args.gt_filenames: 92 | img_names = [] 93 | for data_filename in args.gt_filenames: 94 | assert os.path.exists(data_filename), f'{data_filename} does not exist' 95 | with open(data_filename) as f: 96 | img_names += [l.strip() for l in f.readlines()] 97 | 98 | paths = [Path(args.ground_truth_dir, f) for f in sorted(img_names)] 99 | else: 100 | paths = [path for path in Path(args.ground_truth_dir).glob('*')] 101 | 102 | for file_name in paths: 103 | y_true = (cv2.imread(str(file_name), 0) > 128).astype(np.uint8) 104 | cls_gt.append(y_true.any()) 105 | 106 | pred_file_name = Path(args.pred_dir) / file_name.name 107 | if not pred_file_name.exists(): 108 | 109 | pred_file_name = Path(args.pred_dir) / (os.path.splitext(file_name.name)[0] + ".png") 110 | if not pred_file_name.exists(): 111 | print(f'missing prediction for file {file_name.name} (.jpg or .png not found)') 112 | continue 113 | 114 | pred_image = cv2.imread(str(pred_file_name), 0) 115 | y_pred = (pred_image > 255 * args.threshold).astype(np.uint8) 116 | cls_pred.append(np.max(pred_image)/255.0) 117 | 118 | result_precision += [precision(y_true, y_pred).item()] 119 | if y_true.any(): # ignore images without cracks for recall 120 | result_recall += [recall(y_true, y_pred).item()] 121 | result_dice += [dice(y_true, y_pred).item()] 122 | result_jaccard += [jaccard(y_true, y_pred).item()] 123 | 124 | print('Precision = ', np.mean(result_precision), np.std(result_precision)) 125 | print('recall = ', np.mean(result_recall), np.std(result_recall)) 126 | print('f1 = ', 2*np.mean(result_precision)*np.mean(result_recall)/(np.mean(result_precision)+np.mean(result_recall)), 2*np.std(result_precision)*np.std(result_recall)/(np.std(result_precision)+np.std(result_recall))) 127 | print('Dice = ', np.mean(result_dice), np.std(result_dice)) 128 | print('Jaccard = ', np.mean(result_jaccard), np.std(result_jaccard)) 129 | 130 | if len(np.unique(cls_gt)) > 1: 131 | cls_gt = np.array(cls_gt) 132 | cls_pred = np.array(cls_pred) 133 | 134 | # run AP and AUC on scores 135 | from sklearn.metrics import average_precision_score, roc_auc_score 136 | print('CLASS ONLY: AP = ', average_precision_score(cls_gt, cls_pred)) 137 | print('CLASS ONLY: AUC = ', roc_auc_score(cls_gt, cls_pred)) 138 | 139 | # and run other metrics on binary output 140 | cls_pred = (cls_pred > args.threshold).astype(np.uint8) 141 | 142 | cls_precision = precision(cls_gt, cls_pred).item() 143 | cls_recall = recall(cls_gt, cls_pred).item() 144 | 145 | print('CLASS ONLY: Precision = ', cls_precision, 'FP =', FP(cls_gt, cls_pred)) 146 | print('CLASS ONLY: recall = ', cls_recall, 'FN =', FN(cls_gt, cls_pred)) 147 | print('CLASS ONLY: F1 = ', 2*cls_precision*cls_recall/(cls_precision+cls_recall)) 148 | 149 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | from torch.nn import init 5 | import torch.nn.functional as F 6 | 7 | BATCHNORM_TRACK_RUNNING_STATS = False 8 | BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997 9 | 10 | 11 | class BNorm_init(nn.BatchNorm2d): 12 | def reset_parameters(self): 13 | init.uniform_(self.weight, 0, 1) 14 | init.zeros_(self.bias) 15 | 16 | 17 | class Conv2d_init(nn.Conv2d): 18 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros"): 19 | super(Conv2d_init, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode) 20 | 21 | def reset_parameters(self): 22 | init.xavier_normal_(self.weight) 23 | if self.bias is not None: 24 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) 25 | bound = 1 / math.sqrt(fan_in) 26 | init.uniform_(self.bias, -bound, bound) 27 | 28 | 29 | def _conv_block(in_chanels, out_chanels, kernel_size, padding): 30 | return nn.Sequential(Conv2d_init(in_channels=in_chanels, out_channels=out_chanels, 31 | kernel_size=kernel_size, padding=padding, bias=False), 32 | FeatureNorm(num_features=out_chanels, eps=0.001), 33 | nn.ReLU()) 34 | 35 | class FeatureNorm(nn.Module): 36 | def __init__(self, num_features, feature_index=1, rank=4, reduce_dims=(2, 3), eps=0.001, include_bias=True): 37 | super(FeatureNorm, self).__init__() 38 | self.shape = [1] * rank 39 | self.shape[feature_index] = num_features 40 | self.reduce_dims = reduce_dims 41 | 42 | self.scale = nn.Parameter(torch.ones(self.shape, requires_grad=True, dtype=torch.float)) 43 | self.bias = nn.Parameter(torch.zeros(self.shape, requires_grad=True, dtype=torch.float)) if include_bias else nn.Parameter( 44 | torch.zeros(self.shape, requires_grad=False, dtype=torch.float)) 45 | 46 | self.eps = eps 47 | 48 | def forward(self, features): 49 | f_std = torch.std(features, dim=self.reduce_dims, keepdim=True) 50 | f_mean = torch.mean(features, dim=self.reduce_dims, keepdim=True) 51 | return self.scale * ((features - f_mean) / (f_std + self.eps).sqrt()) + self.bias 52 | 53 | class UpSampling(nn.Module): 54 | """ 55 | UpSampling blok - dekonvolucija(2x povečava resolucije) + konvolucija 56 | """ 57 | def __init__(self, n_conv_blocks, in_channels, out_channels, n_channels_connected, kernel_size, padding, stride=2): 58 | super(UpSampling, self).__init__() 59 | self.upsample = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride) 60 | self.conv = nn.Sequential() 61 | 62 | for i in range(n_conv_blocks): 63 | if i == 0: 64 | self.conv.add_module(f'conv_block_{i+1}', _conv_block(n_channels_connected + out_channels, out_channels, kernel_size, padding)) 65 | else: 66 | self.conv.add_module(f'conv_block_{i+1}', _conv_block(out_channels, out_channels, kernel_size, padding)) 67 | 68 | def forward(self, x1, x2): 69 | x1 = self.upsample(x1) 70 | diffY = x2.size()[2] - x1.size()[2] 71 | diffX = x2.size()[3] - x1.size()[3] 72 | x1 = F.pad(x1, (diffX // 2, diffX - diffX//2, diffY // 2, diffY - diffY//2)) 73 | 74 | x = torch.cat([x2, x1], dim=1) 75 | x = self.conv(x) 76 | return x 77 | 78 | class Se_module_diff(nn.Module): 79 | def __init__(self, inp, oup, Avg_size = 1, se_ratio = 1): 80 | super().__init__() 81 | self.avg = nn.AdaptiveAvgPool2d((Avg_size, Avg_size)) 82 | num_squeezed_channels = max(1,int(inp / se_ratio)) 83 | self._se_reduce = nn.Conv2d(in_channels=inp, out_channels=num_squeezed_channels, kernel_size=1) 84 | self._se_expand = nn.Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) 85 | self.Avg_size = Avg_size 86 | self.reset_parameters() 87 | 88 | #x and z are different conv layer and z pass through more convs 89 | def reset_parameters(self): 90 | for m in self.modules(): 91 | if isinstance(m, nn.Conv2d): 92 | n = m.kernel_size[0] * m.kernel_size[0] * m.out_channels 93 | m.weight.data.normal_(0, math.sqrt(2. / n)) 94 | if m.bias is not None: 95 | m.bias.data.zero_() 96 | elif isinstance(m, nn.BatchNorm2d): 97 | m.weight.data.fill_(1) 98 | m.bias.data.zero_() 99 | elif isinstance(m, nn.Linear): 100 | n = m.weight.size(1) 101 | m.weight.data.normal_(0, 0.01) 102 | m.bias.data.zero_() 103 | 104 | 105 | def forward(self, x, z): 106 | SIZE = z.size() 107 | y = self.avg(x) 108 | y = self._se_reduce(y) 109 | y = y * torch.sigmoid(y) 110 | y = self._se_expand(y) 111 | if self.Avg_size != 1: 112 | y = F.upsample_bilinear(y, size=[SIZE[2], SIZE[3]]) 113 | z = torch.sigmoid(y) * z 114 | return z 115 | 116 | class DownSampling(nn.Module): 117 | """ 118 | DownSampling blok - na zacetku lahko dodamo MaxPooling 119 | """ 120 | def __init__(self, pooling, n_conv_blocks, in_channels, out_channels, kernel_size, padding): 121 | super(DownSampling, self).__init__() 122 | self.downsample = nn.Sequential() 123 | 124 | if pooling: 125 | self.downsample.add_module('max_pooling', nn.MaxPool2d(2)) 126 | 127 | for i in range(n_conv_blocks): 128 | self.downsample.add_module(f'conv_block_{i+1}', _conv_block(in_channels, out_channels, kernel_size, padding)) 129 | in_channels = out_channels 130 | 131 | def forward(self, x): 132 | x = self.downsample(x) 133 | return x 134 | 135 | class SegDecNetPlusPlus(nn.Module): 136 | def __init__(self, device, input_width, input_height, input_channels): 137 | super(SegDecNetPlusPlus, self).__init__() 138 | if input_width % 8 != 0 or input_height % 8 != 0: 139 | raise Exception(f"Input size must be divisible by 8! width={input_width}, height={input_height}") 140 | self.input_width = input_width 141 | self.input_height = input_height 142 | self.input_channels = input_channels 143 | 144 | self.volume1 = DownSampling(pooling=False, n_conv_blocks=1, in_channels=self.input_channels, out_channels=32, kernel_size=5, padding=2) 145 | self.volume2 = DownSampling(pooling=True, n_conv_blocks=3, in_channels=32, out_channels=64, kernel_size=5, padding=2) 146 | self.volume3 = DownSampling(pooling=True, n_conv_blocks=4, in_channels=64, out_channels=64, kernel_size=5, padding=2) 147 | self.volume4 = DownSampling(pooling=True, n_conv_blocks=1, in_channels=64, out_channels=1024, kernel_size=15, padding=7) 148 | 149 | self.extractor = nn.Sequential(nn.MaxPool2d(kernel_size=2), 150 | _conv_block(in_chanels=1025, out_chanels=8, kernel_size=5, padding=2), 151 | nn.MaxPool2d(kernel_size=2), 152 | _conv_block(in_chanels=8, out_chanels=16, kernel_size=5, padding=2), 153 | nn.MaxPool2d(kernel_size=2), 154 | _conv_block(in_chanels=16, out_chanels=32, kernel_size=5, padding=2)) 155 | 156 | self.global_max_pool_feat = nn.MaxPool2d(kernel_size=32) 157 | self.global_avg_pool_feat = nn.AvgPool2d(kernel_size=32) 158 | self.global_max_pool_seg = nn.MaxPool2d(kernel_size=(self.input_height / 8, self.input_width / 8)) 159 | self.global_avg_pool_seg = nn.AvgPool2d(kernel_size=(self.input_height / 8, self.input_width / 8)) 160 | 161 | self.fc = nn.Linear(in_features=66, out_features=1) 162 | 163 | # Custom autgrad funkcije - Gradient multiplyers 164 | self.volume_lr_multiplier_layer = GradientMultiplyLayer().apply 165 | self.glob_max_lr_multiplier_layer = GradientMultiplyLayer().apply 166 | self.glob_avg_lr_multiplier_layer = GradientMultiplyLayer().apply 167 | 168 | self.device = device 169 | 170 | # Upsampling 171 | self.upsampling1 = UpSampling(n_conv_blocks=1, in_channels=1024, out_channels=16, n_channels_connected=64, kernel_size=5, padding=2) 172 | self.upsampling2 = UpSampling(n_conv_blocks=4, in_channels=16, out_channels=16, n_channels_connected=64, kernel_size=5, padding=2) 173 | self.upsampling3 = UpSampling(n_conv_blocks=3, in_channels=16, out_channels=8, n_channels_connected=32, kernel_size=5, padding=2) 174 | self.upsampling4 = nn.Sequential(Conv2d_init(in_channels=8, out_channels=1, kernel_size=5, padding=2, bias=False), FeatureNorm(num_features=1, eps=0.001, include_bias=False)) 175 | 176 | # Downsampling 177 | self.downsampling = nn.AvgPool2d(8) 178 | 179 | # SSE Module 180 | self.conv1_s = nn.Conv2d(32, 32, kernel_size=1, stride=1, bias=False) 181 | self.conv2_s = nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=False) 182 | self.conv3_s = nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=False) 183 | 184 | self.se_module_diff1 = Se_module_diff(inp=32, oup=32) 185 | self.se_module_diff2 = Se_module_diff(inp=64, oup=64) 186 | self.se_module_diff2 = Se_module_diff(inp=64, oup=64) 187 | 188 | def set_gradient_multipliers(self, multiplier): 189 | self.volume_lr_multiplier_mask = (torch.ones((1,)) * multiplier).to(self.device) 190 | self.glob_max_lr_multiplier_mask = (torch.ones((1,)) * multiplier).to(self.device) 191 | self.glob_avg_lr_multiplier_mask = (torch.ones((1,)) * multiplier).to(self.device) 192 | 193 | def forward(self, input): 194 | v1 = self.volume1(input) 195 | v2 = self.volume2(v1) 196 | v3 = self.volume3(v2) 197 | v4 = self.volume4(v3) 198 | 199 | conv1_s = self.conv1_s(v1) 200 | conv1_sse = self.se_module_diff1(v1, conv1_s) 201 | 202 | conv2_s = self.conv2_s(v2) 203 | conv2_sse = self.se_module_diff2(v2, conv2_s) 204 | 205 | conv3_s = self.conv3_s(v3) 206 | conv3_sse = self.se_module_diff2(v3, conv3_s) 207 | 208 | seg_mask_upsampled = self.upsampling1(v4, conv3_sse) 209 | seg_mask_upsampled = self.upsampling2(seg_mask_upsampled, conv2_sse) 210 | seg_mask_upsampled = self.upsampling3(seg_mask_upsampled, conv1_sse) 211 | seg_mask_upsampled = self.upsampling4(seg_mask_upsampled) 212 | 213 | seg_mask_downsampled = self.downsampling(seg_mask_upsampled) 214 | 215 | cat = torch.cat([v4, seg_mask_downsampled], dim=1) 216 | 217 | cat = self.volume_lr_multiplier_layer(cat, self.volume_lr_multiplier_mask) 218 | 219 | features = self.extractor(cat) 220 | global_max_feat = torch.max(torch.max(features, dim=-1, keepdim=True)[0], dim=-2, keepdim=True)[0] # torch.Size([1, 32, 1, 1]) 221 | global_avg_feat = torch.mean(features, dim=(-1, -2), keepdim=True) # torch.Size([1, 32, 1, 1]) 222 | global_max_seg = torch.max(torch.max(seg_mask_upsampled, dim=-1, keepdim=True)[0], dim=-2, keepdim=True)[0] # torch.Size([1, 1, 1, 1]) 223 | global_avg_seg = torch.mean(seg_mask_upsampled, dim=(-1, -2), keepdim=True) # torch.Size([1, 1, 1, 1]) 224 | 225 | global_max_feat = global_max_feat.reshape(global_max_feat.size(0), -1) # torch.Size([1, 32]) 226 | global_avg_feat = global_avg_feat.reshape(global_avg_feat.size(0), -1) # torch.Size([1, 32]) 227 | 228 | global_max_seg = global_max_seg.reshape(global_max_seg.size(0), -1) # torch.Size([1, 1]) 229 | global_max_seg = self.glob_max_lr_multiplier_layer(global_max_seg, self.glob_max_lr_multiplier_mask) # (torch.Size([1, 1]), torch.Size([1])) -> torch.Size([1, 1]) 230 | global_avg_seg = global_avg_seg.reshape(global_avg_seg.size(0), -1) # torch.Size([1, 1]) 231 | global_avg_seg = self.glob_avg_lr_multiplier_layer(global_avg_seg, self.glob_avg_lr_multiplier_mask) # (torch.Size([1, 1]), torch.Size([1])) -> torch.Size([1, 1]) 232 | 233 | fc_in = torch.cat([global_max_feat, global_avg_feat, global_max_seg, global_avg_seg], dim=1) # torch.Size([1, 66]) 234 | fc_in = fc_in.reshape(fc_in.size(0), -1) # torch.Size([1, 66]) 235 | prediction = self.fc(fc_in) # torch.Size([1, 1]) 236 | return prediction, seg_mask_upsampled 237 | 238 | 239 | class SegDecNetOriginalJIM(nn.Module): 240 | def __init__(self, device, input_width, input_height, input_channels): 241 | super(SegDecNetOriginalJIM, self).__init__() 242 | if input_width % 8 != 0 or input_height % 8 != 0: 243 | raise Exception(f"Input size must be divisible by 8! width={input_width}, height={input_height}") 244 | self.input_width = input_width 245 | self.input_height = input_height 246 | self.input_channels = input_channels 247 | 248 | self.volume1 = DownSampling(pooling=False, n_conv_blocks=1, in_channels=self.input_channels, out_channels=32, 249 | kernel_size=5, padding=2) 250 | self.volume2 = DownSampling(pooling=True, n_conv_blocks=3, in_channels=32, out_channels=64, kernel_size=5, 251 | padding=2) 252 | self.volume3 = DownSampling(pooling=True, n_conv_blocks=4, in_channels=64, out_channels=64, kernel_size=5, 253 | padding=2) 254 | self.volume4 = DownSampling(pooling=True, n_conv_blocks=1, in_channels=64, out_channels=1024, kernel_size=15, 255 | padding=7) 256 | 257 | self.extractor = nn.Sequential(nn.MaxPool2d(kernel_size=2), 258 | _conv_block(in_chanels=1025, out_chanels=8, kernel_size=5, padding=2), 259 | nn.MaxPool2d(kernel_size=2), 260 | _conv_block(in_chanels=8, out_chanels=16, kernel_size=5, padding=2), 261 | nn.MaxPool2d(kernel_size=2), 262 | _conv_block(in_chanels=16, out_chanels=32, kernel_size=5, padding=2)) 263 | 264 | self.global_max_pool_feat = nn.MaxPool2d(kernel_size=32) 265 | self.global_avg_pool_feat = nn.AvgPool2d(kernel_size=32) 266 | self.global_max_pool_seg = nn.MaxPool2d(kernel_size=(self.input_height / 8, self.input_width / 8)) 267 | self.global_avg_pool_seg = nn.AvgPool2d(kernel_size=(self.input_height / 8, self.input_width / 8)) 268 | 269 | self.fc = nn.Linear(in_features=66, out_features=1) 270 | 271 | # Custom autgrad funkcije - Gradient multiplyers 272 | self.volume_lr_multiplier_layer = GradientMultiplyLayer().apply 273 | self.glob_max_lr_multiplier_layer = GradientMultiplyLayer().apply 274 | self.glob_avg_lr_multiplier_layer = GradientMultiplyLayer().apply 275 | 276 | self.device = device 277 | 278 | self.seg_mask = nn.Sequential( 279 | Conv2d_init(in_channels=1024, out_channels=1, kernel_size=1, padding=0, bias=False), 280 | FeatureNorm(num_features=1, eps=0.001, include_bias=False)) 281 | 282 | def set_gradient_multipliers(self, multiplier): 283 | self.volume_lr_multiplier_mask = (torch.ones((1,)) * multiplier).to(self.device) 284 | self.glob_max_lr_multiplier_mask = (torch.ones((1,)) * multiplier).to(self.device) 285 | self.glob_avg_lr_multiplier_mask = (torch.ones((1,)) * multiplier).to(self.device) 286 | 287 | def forward(self, input): 288 | v1 = self.volume1(input) 289 | v2 = self.volume2(v1) 290 | v3 = self.volume3(v2) 291 | v4 = self.volume4(v3) 292 | 293 | seg_mask_downsampled = self.seg_mask(v4) 294 | seg_mask_upsampled = seg_mask_downsampled 295 | 296 | cat = torch.cat([v4, seg_mask_downsampled], dim=1) 297 | 298 | cat = self.volume_lr_multiplier_layer(cat, self.volume_lr_multiplier_mask) 299 | 300 | features = self.extractor(cat) 301 | global_max_feat = torch.max(torch.max(features, dim=-1, keepdim=True)[0], dim=-2, keepdim=True)[0] 302 | global_avg_feat = torch.mean(features, dim=(-1, -2), keepdim=True) 303 | global_max_seg = torch.max(torch.max(seg_mask_upsampled, dim=-1, keepdim=True)[0], dim=-2, keepdim=True)[0] 304 | global_avg_seg = torch.mean(seg_mask_upsampled, dim=(-1, -2), keepdim=True) 305 | 306 | global_max_feat = global_max_feat.reshape(global_max_feat.size(0), -1) 307 | global_avg_feat = global_avg_feat.reshape(global_avg_feat.size(0), -1) 308 | 309 | global_max_seg = global_max_seg.reshape(global_max_seg.size(0), -1) 310 | global_max_seg = self.glob_max_lr_multiplier_layer(global_max_seg, self.glob_max_lr_multiplier_mask) 311 | global_avg_seg = global_avg_seg.reshape(global_avg_seg.size(0), -1) 312 | global_avg_seg = self.glob_avg_lr_multiplier_layer(global_avg_seg, self.glob_avg_lr_multiplier_mask) 313 | 314 | fc_in = torch.cat([global_max_feat, global_avg_feat, global_max_seg, global_avg_seg], dim=1) 315 | fc_in = fc_in.reshape(fc_in.size(0), -1) 316 | prediction = self.fc(fc_in) 317 | return prediction, F.interpolate(seg_mask_downsampled, scale_factor=8, mode="nearest") 318 | 319 | 320 | class GradientMultiplyLayer(torch.autograd.Function): 321 | @staticmethod 322 | def forward(ctx, input, mask_bw): 323 | ctx.save_for_backward(mask_bw) 324 | return input 325 | 326 | @staticmethod 327 | def backward(ctx, grad_output): 328 | mask_bw, = ctx.saved_tensors 329 | return grad_output.mul(mask_bw), None -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | numpy~=1.18.5 3 | pandas~=1.0.5 4 | scipy~=1.5.2 5 | scikit-learn~=0.23.2 6 | torchvision 7 | matplotlib~=3.2.2 8 | torch>=1.6 9 | tensorboard -------------------------------------------------------------------------------- /res/intro_wide.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicoslab/segdec-net-plusplus-conbuildmat2023/b0c553bfa2dcb31d703da1f49406790bbda51992/res/intro_wide.png -------------------------------------------------------------------------------- /res/segmentation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicoslab/segdec-net-plusplus-conbuildmat2023/b0c553bfa2dcb31d703da1f49406790bbda51992/res/segmentation.png -------------------------------------------------------------------------------- /splits/SCCDNet/test_CFD.txt: -------------------------------------------------------------------------------- 1 | CFD_001.jpg 2 | CFD_007.jpg 3 | CFD_011.jpg 4 | CFD_013.jpg 5 | CFD_014.jpg 6 | CFD_019.jpg 7 | CFD_037.jpg 8 | CFD_040.jpg 9 | CFD_047.jpg 10 | CFD_070.jpg 11 | CFD_080.jpg 12 | CFD_082.jpg 13 | CFD_084.jpg 14 | CFD_088.jpg 15 | CFD_108.jpg 16 | CFD_109.jpg 17 | CFD_111.jpg 18 | CFD_118.jpg 19 | -------------------------------------------------------------------------------- /splits/SCCDNet/test_DeepCrack.txt: -------------------------------------------------------------------------------- 1 | DeepCrack_11113.jpg 2 | DeepCrack_11114.jpg 3 | DeepCrack_11116-2.jpg 4 | DeepCrack_11116-3.jpg 5 | DeepCrack_11118.jpg 6 | DeepCrack_11123.jpg 7 | DeepCrack_11125-1.jpg 8 | DeepCrack_11134.jpg 9 | DeepCrack_11134-3.jpg 10 | DeepCrack_11134-4.jpg 11 | DeepCrack_11151-2.jpg 12 | DeepCrack_11153.jpg 13 | DeepCrack_11155.jpg 14 | DeepCrack_11155-1.jpg 15 | DeepCrack_11156-1.jpg 16 | DeepCrack_11163-1.jpg 17 | DeepCrack_11164-2.jpg 18 | DeepCrack_11164-4.jpg 19 | DeepCrack_11165.jpg 20 | DeepCrack_11165-2.jpg 21 | DeepCrack_11178-3.jpg 22 | DeepCrack_11184-1.jpg 23 | DeepCrack_11188.jpg 24 | DeepCrack_11188-2.jpg 25 | DeepCrack_11197-2.jpg 26 | DeepCrack_11197-4.jpg 27 | DeepCrack_11204.jpg 28 | DeepCrack_11215-15.jpg 29 | DeepCrack_11215-3.jpg 30 | DeepCrack_11218-3.jpg 31 | DeepCrack_11231-3.jpg 32 | DeepCrack_11231-4.jpg 33 | DeepCrack_11236-1.jpg 34 | DeepCrack_11236-4.jpg 35 | DeepCrack_11240-6.jpg 36 | DeepCrack_11240-8.jpg 37 | DeepCrack_11247-12.jpg 38 | DeepCrack_11249-4.jpg 39 | DeepCrack_11249-7.jpg 40 | DeepCrack_11266-3.jpg 41 | DeepCrack_11269-1.jpg 42 | DeepCrack_11270-2.jpg 43 | DeepCrack_11271-2.jpg 44 | DeepCrack_11271-5.jpg 45 | DeepCrack_11286-2.jpg 46 | DeepCrack_11289-9.jpg 47 | DeepCrack_11294.jpg 48 | DeepCrack_11296-18.jpg 49 | DeepCrack_11296-19.jpg 50 | DeepCrack_11296-4.jpg 51 | DeepCrack_11296-7.jpg 52 | DeepCrack_11301-6.jpg 53 | DeepCrack_11304.jpg 54 | DeepCrack_7Q3A9060-3.jpg 55 | DeepCrack_7Q3A9060-9.jpg 56 | DeepCrack_7Q3A9064-12.jpg 57 | DeepCrack_7Q3A9064-17.jpg 58 | DeepCrack_7Q3A9064-19.jpg 59 | DeepCrack_7Q3A9064-20.jpg 60 | DeepCrack_IMG_6469-2.jpg 61 | DeepCrack_IMG_6472-1.jpg 62 | DeepCrack_IMG_6514-3.jpg 63 | DeepCrack_IMG_6516-1.jpg 64 | DeepCrack_IMG_6522-1.jpg 65 | DeepCrack_IMG_6537-1.jpg 66 | DeepCrack_IMG11-6.jpg 67 | DeepCrack_IMG14-1.jpg 68 | DeepCrack_IMG14-3.jpg 69 | DeepCrack_IMG25-6.jpg 70 | DeepCrack_IMG27-3.jpg 71 | DeepCrack_IMG27-7.jpg 72 | DeepCrack_IMG27-8.jpg 73 | DeepCrack_IMG27-9.jpg 74 | DeepCrack_IMG33-12.jpg 75 | DeepCrack_IMG33-16.jpg 76 | DeepCrack_IMG33-9.jpg 77 | DeepCrack_IMG36-2.jpg -------------------------------------------------------------------------------- /splits/SCCDNet/test_GAPS.txt: -------------------------------------------------------------------------------- 1 | GAPS384_test_0005_541_641.jpg 2 | GAPS384_test_0016_541_1.jpg 3 | GAPS384_test_0016_541_641.jpg 4 | GAPS384_test_0040_541_1.jpg 5 | GAPS384_test_0042_541_1.jpg 6 | GAPS384_test_0058_1_1.jpg 7 | GAPS384_test_0203_1_1.jpg 8 | GAPS384_test_0203_541_1.jpg 9 | GAPS384_test_0274_541_1.jpg 10 | GAPS384_train_0423_1_1.jpg 11 | GAPS384_train_0449_1_1.jpg 12 | GAPS384_train_0455_541_1.jpg 13 | GAPS384_train_0503_541_1.jpg 14 | GAPS384_train_0514_541_1.jpg 15 | GAPS384_train_0516_541_641.jpg 16 | GAPS384_train_0533_541_1.jpg 17 | GAPS384_train_0538_541_1.jpg 18 | GAPS384_train_0542_1_641.jpg 19 | GAPS384_train_0544_541_1.jpg 20 | GAPS384_train_0552_1_641.jpg 21 | GAPS384_train_0559_541_641.jpg 22 | GAPS384_train_0597_1_1.jpg 23 | GAPS384_train_0599_1_1.jpg 24 | GAPS384_train_0607_541_641.jpg 25 | GAPS384_train_0608_1_1.jpg 26 | GAPS384_train_0609_541_641.jpg 27 | GAPS384_train_0641_1_1.jpg 28 | GAPS384_train_0641_541_641.jpg 29 | GAPS384_train_0649_1_641.jpg 30 | GAPS384_train_0664_1_1.jpg 31 | GAPS384_train_0664_541_641.jpg 32 | GAPS384_train_0680_541_641.jpg 33 | GAPS384_train_0681_541_641.jpg 34 | GAPS384_train_0690_541_1.jpg 35 | GAPS384_train_0695_541_1.jpg 36 | GAPS384_train_0698_1_641.jpg 37 | GAPS384_train_0700_1_1.jpg 38 | GAPS384_train_0799_541_1.jpg 39 | GAPS384_train_0809_541_1.jpg 40 | GAPS384_train_0918_1_641.jpg 41 | GAPS384_train_0922_1_641.jpg 42 | GAPS384_train_0942_541_1.jpg 43 | GAPS384_train_0959_1_641.jpg 44 | GAPS384_train_0961_1_1.jpg 45 | GAPS384_train_0996_1_641.jpg 46 | GAPS384_train_1010_541_641.jpg 47 | GAPS384_train_1015_1_641.jpg 48 | GAPS384_train_1028_1_1.jpg 49 | GAPS384_train_1048_1_1.jpg 50 | GAPS384_train_1048_541_1.jpg 51 | GAPS384_train_1085_541_1.jpg 52 | GAPS384_train_1117_1_1.jpg 53 | GAPS384_train_1120_541_641.jpg 54 | GAPS384_train_1124_1_641.jpg 55 | GAPS384_train_1126_541_1.jpg 56 | GAPS384_train_1128_541_1.jpg 57 | GAPS384_train_1171_541_641.jpg 58 | GAPS384_train_1172_541_641.jpg 59 | GAPS384_train_1173_541_1.jpg 60 | GAPS384_train_1173_541_641.jpg 61 | GAPS384_train_1183_541_1.jpg 62 | GAPS384_train_1187_1_1.jpg 63 | GAPS384_train_1218_541_1.jpg 64 | GAPS384_train_1238_1_1.jpg 65 | GAPS384_train_1247_541_1.jpg 66 | GAPS384_train_1281_541_1.jpg 67 | GAPS384_train_1284_541_641.jpg 68 | GAPS384_train_1347_541_1.jpg 69 | GAPS384_train_1347_541_641.jpg 70 | GAPS384_train_1361_541_641.jpg 71 | GAPS384_train_1366_541_1.jpg 72 | GAPS384_train_1370_541_641.jpg 73 | GAPS384_train_1374_541_1.jpg 74 | GAPS384_train_1375_541_641.jpg 75 | GAPS384_train_1381_541_1.jpg 76 | GAPS384_valid_0016_541_1.jpg -------------------------------------------------------------------------------- /splits/SCCDNet/test_cracktree200.txt: -------------------------------------------------------------------------------- 1 | cracktree200_6201.jpg 2 | cracktree200_6203.jpg 3 | cracktree200_6234.jpg 4 | cracktree200_6243.jpg 5 | cracktree200_6247.jpg 6 | cracktree200_6258.jpg 7 | cracktree200_6259.jpg 8 | cracktree200_6261.jpg 9 | cracktree200_6266.jpg 10 | cracktree200_6268.jpg 11 | cracktree200_6278.jpg 12 | cracktree200_6281.jpg 13 | cracktree200_6297.jpg 14 | cracktree200_6298.jpg 15 | cracktree200_6301.jpg 16 | cracktree200_6310.jpg 17 | cracktree200_6321.jpg 18 | cracktree200_6327.jpg 19 | cracktree200_6328.jpg 20 | cracktree200_6329.jpg 21 | cracktree200_6330.jpg 22 | cracktree200_6334.jpg 23 | cracktree200_6340.jpg 24 | cracktree200_6351.jpg 25 | cracktree200_6623.jpg 26 | cracktree200_6635.jpg 27 | cracktree200_6645.jpg 28 | cracktree200_6647.jpg 29 | cracktree200_6658.jpg 30 | cracktree200_6710.jpg 31 | cracktree200_6774.jpg -------------------------------------------------------------------------------- /splits/SCCDNet/test_forest.txt: -------------------------------------------------------------------------------- 1 | forest_003.jpg 2 | forest_006.jpg 3 | forest_018.jpg 4 | forest_022.jpg 5 | forest_038.jpg 6 | forest_048.jpg 7 | forest_054.jpg 8 | forest_069.jpg 9 | forest_076.jpg 10 | forest_082.jpg 11 | forest_083.jpg 12 | forest_087.jpg 13 | forest_091.jpg 14 | forest_094.jpg 15 | forest_098.jpg 16 | forest_104.jpg 17 | forest_109.jpg 18 | forest_117.jpg -------------------------------------------------------------------------------- /splits/SCCDNet/test_noncrack.txt: -------------------------------------------------------------------------------- 1 | noncrack_noncrack_concrete_wall_0_0.jpg.jpg 2 | noncrack_noncrack_concrete_wall_0_3.jpg.jpg 3 | noncrack_noncrack_concrete_wall_100_4.jpg.jpg 4 | noncrack_noncrack_concrete_wall_101_2.jpg.jpg 5 | noncrack_noncrack_concrete_wall_101_8.jpg.jpg 6 | noncrack_noncrack_concrete_wall_101_9.jpg.jpg 7 | noncrack_noncrack_concrete_wall_102_5.jpg.jpg 8 | noncrack_noncrack_concrete_wall_103_0.jpg.jpg 9 | noncrack_noncrack_concrete_wall_103_1.jpg.jpg 10 | noncrack_noncrack_concrete_wall_11_4.jpg.jpg 11 | noncrack_noncrack_concrete_wall_12_3.jpg.jpg 12 | noncrack_noncrack_concrete_wall_12_9.jpg.jpg 13 | noncrack_noncrack_concrete_wall_13_2.jpg.jpg 14 | noncrack_noncrack_concrete_wall_13_4.jpg.jpg 15 | noncrack_noncrack_concrete_wall_13_6.jpg.jpg 16 | noncrack_noncrack_concrete_wall_14_11.jpg.jpg 17 | noncrack_noncrack_concrete_wall_14_25.jpg.jpg 18 | noncrack_noncrack_concrete_wall_15_5.jpg.jpg 19 | noncrack_noncrack_concrete_wall_15_7.jpg.jpg 20 | noncrack_noncrack_concrete_wall_16_1.jpg.jpg 21 | noncrack_noncrack_concrete_wall_16_5.jpg.jpg 22 | noncrack_noncrack_concrete_wall_18_1.jpg.jpg 23 | noncrack_noncrack_concrete_wall_18_11.jpg.jpg 24 | noncrack_noncrack_concrete_wall_18_12.jpg.jpg 25 | noncrack_noncrack_concrete_wall_18_18.jpg.jpg 26 | noncrack_noncrack_concrete_wall_18_19.jpg.jpg 27 | noncrack_noncrack_concrete_wall_18_3.jpg.jpg 28 | noncrack_noncrack_concrete_wall_18_9.jpg.jpg 29 | noncrack_noncrack_concrete_wall_19_1.jpg.jpg 30 | noncrack_noncrack_concrete_wall_19_3.jpg.jpg 31 | noncrack_noncrack_concrete_wall_19_6.jpg.jpg 32 | noncrack_noncrack_concrete_wall_19_7.jpg.jpg 33 | noncrack_noncrack_concrete_wall_19_8.jpg.jpg 34 | noncrack_noncrack_concrete_wall_20_1.jpg.jpg 35 | noncrack_noncrack_concrete_wall_20_15.jpg.jpg 36 | noncrack_noncrack_concrete_wall_20_32.jpg.jpg 37 | noncrack_noncrack_concrete_wall_20_52.jpg.jpg 38 | noncrack_noncrack_concrete_wall_20_54.jpg.jpg 39 | noncrack_noncrack_concrete_wall_20_59.jpg.jpg 40 | noncrack_noncrack_concrete_wall_20_62.jpg.jpg 41 | noncrack_noncrack_concrete_wall_20_68.jpg.jpg 42 | noncrack_noncrack_concrete_wall_20_70.jpg.jpg 43 | noncrack_noncrack_concrete_wall_21_0.jpg.jpg 44 | noncrack_noncrack_concrete_wall_23_1.jpg.jpg 45 | noncrack_noncrack_concrete_wall_23_4.jpg.jpg 46 | noncrack_noncrack_concrete_wall_23_5.jpg.jpg 47 | noncrack_noncrack_concrete_wall_23_7.jpg.jpg 48 | noncrack_noncrack_concrete_wall_24_19.jpg.jpg 49 | noncrack_noncrack_concrete_wall_24_31.jpg.jpg 50 | noncrack_noncrack_concrete_wall_24_35.jpg.jpg 51 | noncrack_noncrack_concrete_wall_24_38.jpg.jpg 52 | noncrack_noncrack_concrete_wall_24_4.jpg.jpg 53 | noncrack_noncrack_concrete_wall_24_8.jpg.jpg 54 | noncrack_noncrack_concrete_wall_26_0.jpg.jpg 55 | noncrack_noncrack_concrete_wall_27_5.jpg.jpg 56 | noncrack_noncrack_concrete_wall_28_0.jpg.jpg 57 | noncrack_noncrack_concrete_wall_29_6.jpg.jpg 58 | noncrack_noncrack_concrete_wall_3_2.jpg.jpg 59 | noncrack_noncrack_concrete_wall_30_2.jpg.jpg 60 | noncrack_noncrack_concrete_wall_31_0.jpg.jpg 61 | noncrack_noncrack_concrete_wall_31_4.jpg.jpg 62 | noncrack_noncrack_concrete_wall_33_4.jpg.jpg 63 | noncrack_noncrack_concrete_wall_34_0.jpg.jpg 64 | noncrack_noncrack_concrete_wall_35_4.jpg.jpg 65 | noncrack_noncrack_concrete_wall_37_1.jpg.jpg 66 | noncrack_noncrack_concrete_wall_37_4.jpg.jpg 67 | noncrack_noncrack_concrete_wall_38_4.jpg.jpg 68 | noncrack_noncrack_concrete_wall_38_5.jpg.jpg 69 | noncrack_noncrack_concrete_wall_38_6.jpg.jpg 70 | noncrack_noncrack_concrete_wall_39_9.jpg.jpg 71 | noncrack_noncrack_concrete_wall_40_0.jpg.jpg 72 | noncrack_noncrack_concrete_wall_40_5.jpg.jpg 73 | noncrack_noncrack_concrete_wall_41_11.jpg.jpg 74 | noncrack_noncrack_concrete_wall_41_12.jpg.jpg 75 | noncrack_noncrack_concrete_wall_41_2.jpg.jpg 76 | noncrack_noncrack_concrete_wall_41_35.jpg.jpg 77 | noncrack_noncrack_concrete_wall_41_37.jpg.jpg 78 | noncrack_noncrack_concrete_wall_41_38.jpg.jpg 79 | noncrack_noncrack_concrete_wall_41_4.jpg.jpg 80 | noncrack_noncrack_concrete_wall_41_43.jpg.jpg 81 | noncrack_noncrack_concrete_wall_41_48.jpg.jpg 82 | noncrack_noncrack_concrete_wall_41_58.jpg.jpg 83 | noncrack_noncrack_concrete_wall_41_63.jpg.jpg 84 | noncrack_noncrack_concrete_wall_41_65.jpg.jpg 85 | noncrack_noncrack_concrete_wall_42_6.jpg.jpg 86 | noncrack_noncrack_concrete_wall_43_14.jpg.jpg 87 | noncrack_noncrack_concrete_wall_43_15.jpg.jpg 88 | noncrack_noncrack_concrete_wall_43_18.jpg.jpg 89 | noncrack_noncrack_concrete_wall_43_2.jpg.jpg 90 | noncrack_noncrack_concrete_wall_43_25.jpg.jpg 91 | noncrack_noncrack_concrete_wall_43_34.jpg.jpg 92 | noncrack_noncrack_concrete_wall_43_42.jpg.jpg 93 | noncrack_noncrack_concrete_wall_43_44.jpg.jpg 94 | noncrack_noncrack_concrete_wall_43_48.jpg.jpg 95 | noncrack_noncrack_concrete_wall_43_50.jpg.jpg 96 | noncrack_noncrack_concrete_wall_44_0.jpg.jpg 97 | noncrack_noncrack_concrete_wall_44_4.jpg.jpg 98 | noncrack_noncrack_concrete_wall_45_13.jpg.jpg 99 | noncrack_noncrack_concrete_wall_45_4.jpg.jpg 100 | noncrack_noncrack_concrete_wall_45_5.jpg.jpg 101 | noncrack_noncrack_concrete_wall_46_8.jpg.jpg 102 | noncrack_noncrack_concrete_wall_47_4.jpg.jpg 103 | noncrack_noncrack_concrete_wall_47_6.jpg.jpg 104 | noncrack_noncrack_concrete_wall_48_0.jpg.jpg 105 | noncrack_noncrack_concrete_wall_48_1.jpg.jpg 106 | noncrack_noncrack_concrete_wall_48_5.jpg.jpg 107 | noncrack_noncrack_concrete_wall_49_4.jpg.jpg 108 | noncrack_noncrack_concrete_wall_5_10.jpg.jpg 109 | noncrack_noncrack_concrete_wall_50_2.jpg.jpg 110 | noncrack_noncrack_concrete_wall_53_3.jpg.jpg 111 | noncrack_noncrack_concrete_wall_54_24.jpg.jpg 112 | noncrack_noncrack_concrete_wall_54_27.jpg.jpg 113 | noncrack_noncrack_concrete_wall_54_28.jpg.jpg 114 | noncrack_noncrack_concrete_wall_54_31.jpg.jpg 115 | noncrack_noncrack_concrete_wall_54_33.jpg.jpg 116 | noncrack_noncrack_concrete_wall_54_36.jpg.jpg 117 | noncrack_noncrack_concrete_wall_54_4.jpg.jpg 118 | noncrack_noncrack_concrete_wall_54_40.jpg.jpg 119 | noncrack_noncrack_concrete_wall_54_44.jpg.jpg 120 | noncrack_noncrack_concrete_wall_54_48.jpg.jpg 121 | noncrack_noncrack_concrete_wall_54_53.jpg.jpg 122 | noncrack_noncrack_concrete_wall_54_54.jpg.jpg 123 | noncrack_noncrack_concrete_wall_54_57.jpg.jpg 124 | noncrack_noncrack_concrete_wall_54_72.jpg.jpg 125 | noncrack_noncrack_concrete_wall_55_10.jpg.jpg 126 | noncrack_noncrack_concrete_wall_55_24.jpg.jpg 127 | noncrack_noncrack_concrete_wall_55_29.jpg.jpg 128 | noncrack_noncrack_concrete_wall_55_32.jpg.jpg 129 | noncrack_noncrack_concrete_wall_55_38.jpg.jpg 130 | noncrack_noncrack_concrete_wall_55_4.jpg.jpg 131 | noncrack_noncrack_concrete_wall_55_41.jpg.jpg 132 | noncrack_noncrack_concrete_wall_55_49.jpg.jpg 133 | noncrack_noncrack_concrete_wall_55_6.jpg.jpg 134 | noncrack_noncrack_concrete_wall_57_11.jpg.jpg 135 | noncrack_noncrack_concrete_wall_57_6.jpg.jpg 136 | noncrack_noncrack_concrete_wall_58_0.jpg.jpg 137 | noncrack_noncrack_concrete_wall_58_11.jpg.jpg 138 | noncrack_noncrack_concrete_wall_58_12.jpg.jpg 139 | noncrack_noncrack_concrete_wall_58_13.jpg.jpg 140 | noncrack_noncrack_concrete_wall_58_6.jpg.jpg 141 | noncrack_noncrack_concrete_wall_58_8.jpg.jpg 142 | noncrack_noncrack_concrete_wall_59_0.jpg.jpg 143 | noncrack_noncrack_concrete_wall_59_3.jpg.jpg 144 | noncrack_noncrack_concrete_wall_6_0.jpg.jpg 145 | noncrack_noncrack_concrete_wall_60_13.jpg.jpg 146 | noncrack_noncrack_concrete_wall_60_6.jpg.jpg 147 | noncrack_noncrack_concrete_wall_60_8.jpg.jpg 148 | noncrack_noncrack_concrete_wall_63_1.jpg.jpg 149 | noncrack_noncrack_concrete_wall_65_0.jpg.jpg 150 | noncrack_noncrack_concrete_wall_65_10.jpg.jpg 151 | noncrack_noncrack_concrete_wall_66_1.jpg.jpg 152 | noncrack_noncrack_concrete_wall_66_10.jpg.jpg 153 | noncrack_noncrack_concrete_wall_66_21.jpg.jpg 154 | noncrack_noncrack_concrete_wall_66_8.jpg.jpg 155 | noncrack_noncrack_concrete_wall_67_1.jpg.jpg 156 | noncrack_noncrack_concrete_wall_67_2.jpg.jpg 157 | noncrack_noncrack_concrete_wall_68_0.jpg.jpg 158 | noncrack_noncrack_concrete_wall_68_1.jpg.jpg 159 | noncrack_noncrack_concrete_wall_69_6.jpg.jpg 160 | noncrack_noncrack_concrete_wall_7_0.jpg.jpg 161 | noncrack_noncrack_concrete_wall_7_1.jpg.jpg 162 | noncrack_noncrack_concrete_wall_70_3.jpg.jpg 163 | noncrack_noncrack_concrete_wall_72_0.jpg.jpg 164 | noncrack_noncrack_concrete_wall_73_1.jpg.jpg 165 | noncrack_noncrack_concrete_wall_73_2.jpg.jpg 166 | noncrack_noncrack_concrete_wall_73_4.jpg.jpg 167 | noncrack_noncrack_concrete_wall_74_10.jpg.jpg 168 | noncrack_noncrack_concrete_wall_75_0.jpg.jpg 169 | noncrack_noncrack_concrete_wall_75_18.jpg.jpg 170 | noncrack_noncrack_concrete_wall_75_24.jpg.jpg 171 | noncrack_noncrack_concrete_wall_76_1.jpg.jpg 172 | noncrack_noncrack_concrete_wall_76_22.jpg.jpg 173 | noncrack_noncrack_concrete_wall_76_30.jpg.jpg 174 | noncrack_noncrack_concrete_wall_76_36.jpg.jpg 175 | noncrack_noncrack_concrete_wall_76_38.jpg.jpg 176 | noncrack_noncrack_concrete_wall_76_49.jpg.jpg 177 | noncrack_noncrack_concrete_wall_76_5.jpg.jpg 178 | noncrack_noncrack_concrete_wall_76_62.jpg.jpg 179 | noncrack_noncrack_concrete_wall_77_1.jpg.jpg 180 | noncrack_noncrack_concrete_wall_77_5.jpg.jpg 181 | noncrack_noncrack_concrete_wall_79_2.jpg.jpg 182 | noncrack_noncrack_concrete_wall_79_6.jpg.jpg 183 | noncrack_noncrack_concrete_wall_81_4.jpg.jpg 184 | noncrack_noncrack_concrete_wall_82_14.jpg.jpg 185 | noncrack_noncrack_concrete_wall_82_23.jpg.jpg 186 | noncrack_noncrack_concrete_wall_82_24.jpg.jpg 187 | noncrack_noncrack_concrete_wall_82_27.jpg.jpg 188 | noncrack_noncrack_concrete_wall_82_32.jpg.jpg 189 | noncrack_noncrack_concrete_wall_82_4.jpg.jpg 190 | noncrack_noncrack_concrete_wall_82_5.jpg.jpg 191 | noncrack_noncrack_concrete_wall_83_10.jpg.jpg 192 | noncrack_noncrack_concrete_wall_83_4.jpg.jpg 193 | noncrack_noncrack_concrete_wall_83_9.jpg.jpg 194 | noncrack_noncrack_concrete_wall_86_1.jpg.jpg 195 | noncrack_noncrack_concrete_wall_86_2.jpg.jpg 196 | noncrack_noncrack_concrete_wall_88_3.jpg.jpg 197 | noncrack_noncrack_concrete_wall_89_0.jpg.jpg 198 | noncrack_noncrack_concrete_wall_89_1.jpg.jpg 199 | noncrack_noncrack_concrete_wall_9_14.jpg.jpg 200 | noncrack_noncrack_concrete_wall_9_22.jpg.jpg 201 | noncrack_noncrack_concrete_wall_9_29.jpg.jpg 202 | noncrack_noncrack_concrete_wall_9_32.jpg.jpg 203 | noncrack_noncrack_concrete_wall_9_33.jpg.jpg 204 | noncrack_noncrack_concrete_wall_91_1.jpg.jpg 205 | noncrack_noncrack_concrete_wall_92_7.jpg.jpg 206 | noncrack_noncrack_concrete_wall_93_5.jpg.jpg 207 | noncrack_noncrack_concrete_wall_94_4.jpg.jpg 208 | noncrack_noncrack_concrete_wall_96_1.jpg.jpg 209 | noncrack_noncrack_concrete_wall_96_12.jpg.jpg 210 | noncrack_noncrack_concrete_wall_96_2.jpg.jpg 211 | noncrack_noncrack_concrete_wall_96_27.jpg.jpg 212 | noncrack_noncrack_concrete_wall_99_2.jpg.jpg -------------------------------------------------------------------------------- /splits/SCCDNet/test_rissbilder.txt: -------------------------------------------------------------------------------- 1 | Rissbilder_for_Florian_9S6A2782_0_0_3840_5760.jpg 2 | Rissbilder_for_Florian_9S6A2783_485_963_3019_3865.jpg 3 | Rissbilder_for_Florian_9S6A2783_510_849_2889_2880.jpg 4 | Rissbilder_for_Florian_9S6A2783_555_1445_2892_3793.jpg 5 | Rissbilder_for_Florian_9S6A2783_96_1214_3516_4173.jpg 6 | Rissbilder_for_Florian_9S6A2784_100_1539_3455_3308.jpg 7 | Rissbilder_for_Florian_9S6A2784_1029_155_2671_3164.jpg 8 | Rissbilder_for_Florian_9S6A2784_241_1425_3582_3991.jpg 9 | Rissbilder_for_Florian_9S6A2785_118_523_3535_4238.jpg 10 | Rissbilder_for_Florian_9S6A2785_197_1269_2404_2359.jpg 11 | Rissbilder_for_Florian_9S6A2785_239_136_3078_2666.jpg 12 | Rissbilder_for_Florian_9S6A2785_493_901_3299_3768.jpg 13 | Rissbilder_for_Florian_9S6A2785_51_1717_2854_3381.jpg 14 | Rissbilder_for_Florian_9S6A2786_1709_1270_2041_2349.jpg 15 | Rissbilder_for_Florian_9S6A2786_641_1214_2289_2280.jpg 16 | Rissbilder_for_Florian_9S6A2787_1034_2113_2702_3383.jpg 17 | Rissbilder_for_Florian_9S6A2787_114_2564_2503_2920.jpg 18 | Rissbilder_for_Florian_9S6A2787_1217_749_2520_2686.jpg 19 | Rissbilder_for_Florian_9S6A2787_1287_2060_2196_2885.jpg 20 | Rissbilder_for_Florian_9S6A2787_38_553_3789_4026.jpg 21 | Rissbilder_for_Florian_9S6A2787_469_792_2677_2506.jpg 22 | Rissbilder_for_Florian_9S6A2788_0_0_3840_5760.jpg 23 | Rissbilder_for_Florian_9S6A2788_827_2151_2719_2314.jpg 24 | Rissbilder_for_Florian_9S6A2789_29_2188_3523_3363.jpg 25 | Rissbilder_for_Florian_9S6A2789_79_2844_3130_2630.jpg 26 | Rissbilder_for_Florian_9S6A2790_213_1721_2871_3688.jpg 27 | Rissbilder_for_Florian_9S6A2790_415_1305_3121_2371.jpg 28 | Rissbilder_for_Florian_9S6A2790_750_1920_2101_2560.jpg 29 | Rissbilder_for_Florian_9S6A2791_180_347_3514_4469.jpg 30 | Rissbilder_for_Florian_9S6A2791_268_477_2530_3149.jpg 31 | Rissbilder_for_Florian_9S6A2791_454_1477_3234_3143.jpg 32 | Rissbilder_for_Florian_9S6A2792_0_0_5760_3840.jpg 33 | Rissbilder_for_Florian_9S6A2792_1773_105_2717_3479.jpg 34 | Rissbilder_for_Florian_9S6A2792_1778_280_3876_3455.jpg 35 | Rissbilder_for_Florian_9S6A2792_2643_751_3074_3028.jpg 36 | Rissbilder_for_Florian_9S6A2792_753_1245_2029_2585.jpg 37 | Rissbilder_for_Florian_9S6A2793_377_1016_2309_2713.jpg 38 | Rissbilder_for_Florian_9S6A2793_811_444_2800_3267.jpg 39 | Rissbilder_for_Florian_9S6A2795_244_2755_3348_2948.jpg 40 | Rissbilder_for_Florian_9S6A2795_345_1636_2968_2382.jpg 41 | Rissbilder_for_Florian_9S6A2795_726_782_2478_3159.jpg 42 | Rissbilder_for_Florian_9S6A2796_352_1675_3345_3505.jpg 43 | Rissbilder_for_Florian_9S6A2796_360_1209_3312_4152.jpg 44 | Rissbilder_for_Florian_9S6A2796_365_740_3391_4131.jpg 45 | Rissbilder_for_Florian_9S6A2797_0_0_3840_5760.jpg 46 | Rissbilder_for_Florian_9S6A2797_211_1594_3203_4094.jpg 47 | Rissbilder_for_Florian_9S6A2797_27_2177_3736_3368.jpg 48 | Rissbilder_for_Florian_9S6A2797_331_75_3433_4182.jpg 49 | Rissbilder_for_Florian_9S6A2798_499_1239_3234_3324.jpg 50 | Rissbilder_for_Florian_9S6A2798_614_1857_2201_2501.jpg 51 | Rissbilder_for_Florian_9S6A2798_697_1777_2994_3062.jpg 52 | Rissbilder_for_Florian_9S6A2799_133_2058_3555_3079.jpg 53 | Rissbilder_for_Florian_9S6A2799_275_471_3176_4152.jpg 54 | Rissbilder_for_Florian_9S6A2799_288_786_3196_3304.jpg 55 | Rissbilder_for_Florian_9S6A2800_191_3241_2390_2435.jpg 56 | Rissbilder_for_Florian_9S6A2800_195_2531_3219_2684.jpg 57 | Rissbilder_for_Florian_9S6A2800_382_814_2795_2670.jpg 58 | Rissbilder_for_Florian_9S6A2800_48_573_3788_4425.jpg 59 | Rissbilder_for_Florian_9S6A2800_552_1819_3171_3588.jpg 60 | Rissbilder_for_Florian_9S6A2800_817_399_2611_2043.jpg 61 | Rissbilder_for_Florian_9S6A2802_251_84_3754_3687.jpg 62 | Rissbilder_for_Florian_9S6A2802_2931_279_2589_2739.jpg 63 | Rissbilder_for_Florian_9S6A2802_631_103_2560_2012.jpg 64 | Rissbilder_for_Florian_9S6A2803_209_1110_2922_3685.jpg 65 | Rissbilder_for_Florian_9S6A2803_247_1248_3459_4462.jpg 66 | Rissbilder_for_Florian_9S6A2803_272_142_3359_3107.jpg 67 | Rissbilder_for_Florian_9S6A2803_612_408_2717_3030.jpg 68 | Rissbilder_for_Florian_9S6A2804_118_1818_2606_2570.jpg 69 | Rissbilder_for_Florian_9S6A2805_1632_240_3224_2475.jpg 70 | Rissbilder_for_Florian_9S6A2805_3150_203_1998_2621.jpg 71 | Rissbilder_for_Florian_9S6A2805_565_1400_2938_2247.jpg 72 | Rissbilder_for_Florian_9S6A2806_1082_397_4233_3223.jpg 73 | Rissbilder_for_Florian_9S6A2806_3363_1303_2009_2459.jpg 74 | Rissbilder_for_Florian_9S6A2806_368_348_2093_2759.jpg 75 | Rissbilder_for_Florian_9S6A2807_112_2167_3474_2842.jpg 76 | Rissbilder_for_Florian_9S6A2807_149_637_3674_3362.jpg 77 | Rissbilder_for_Florian_9S6A2807_224_1187_3581_3340.jpg 78 | Rissbilder_for_Florian_9S6A2807_577_1818_2154_2452.jpg 79 | Rissbilder_for_Florian_9S6A2808_19_37_3435_3257.jpg 80 | Rissbilder_for_Florian_9S6A2808_222_2951_3443_2623.jpg 81 | Rissbilder_for_Florian_9S6A2808_30_709_3501_4305.jpg 82 | Rissbilder_for_Florian_9S6A2810_309_181_3327_4094.jpg 83 | Rissbilder_for_Florian_9S6A2810_43_528_3225_4185.jpg 84 | Rissbilder_for_Florian_9S6A2810_65_655_2748_2720.jpg 85 | Rissbilder_for_Florian_9S6A2811_119_80_3414_3691.jpg 86 | Rissbilder_for_Florian_9S6A2811_1873_211_2406_2890.jpg 87 | Rissbilder_for_Florian_9S6A2812_1545_131_2276_2406.jpg 88 | Rissbilder_for_Florian_9S6A2812_372_1081_3294_3264.jpg 89 | Rissbilder_for_Florian_9S6A2812_6_41_3627_3234.jpg 90 | Rissbilder_for_Florian_9S6A2813_90_1297_3435_4044.jpg 91 | Rissbilder_for_Florian_9S6A2815_1237_159_3590_2891.jpg 92 | Rissbilder_for_Florian_9S6A2816_0_0_3840_5760.jpg 93 | Rissbilder_for_Florian_9S6A2816_113_51_3722_4201.jpg 94 | Rissbilder_for_Florian_9S6A2816_1644_2004_1934_2558.jpg 95 | Rissbilder_for_Florian_9S6A2816_255_1654_2762_3248.jpg 96 | Rissbilder_for_Florian_9S6A2816_375_1185_3365_3750.jpg 97 | Rissbilder_for_Florian_9S6A2816_48_1105_3458_4373.jpg 98 | Rissbilder_for_Florian_9S6A2817_266_1535_2877_3827.jpg 99 | Rissbilder_for_Florian_9S6A2817_362_2779_2835_2376.jpg 100 | Rissbilder_for_Florian_9S6A2817_378_2811_2792_2576.jpg 101 | Rissbilder_for_Florian_9S6A2817_515_925_2769_3395.jpg 102 | Rissbilder_for_Florian_9S6A2817_67_221_2558_2411.jpg 103 | Rissbilder_for_Florian_9S6A2818_205_138_2610_2193.jpg 104 | Rissbilder_for_Florian_9S6A2818_208_545_3096_3152.jpg 105 | Rissbilder_for_Florian_9S6A2818_22_73_3648_4328.jpg 106 | Rissbilder_for_Florian_9S6A2818_287_1431_3198_4185.jpg 107 | Rissbilder_for_Florian_9S6A2818_819_1604_2582_2762.jpg 108 | Rissbilder_for_Florian_9S6A2818_952_1532_2829_2875.jpg 109 | Rissbilder_for_Florian_9S6A2820_304_486_3141_3670.jpg 110 | Rissbilder_for_Florian_9S6A2820_366_660_2637_3456.jpg 111 | Rissbilder_for_Florian_9S6A2821_1009_504_3415_2649.jpg 112 | Rissbilder_for_Florian_9S6A2821_1089_894_2343_2903.jpg 113 | Rissbilder_for_Florian_9S6A2821_2779_46_2936_3794.jpg 114 | Rissbilder_for_Florian_9S6A2822_1030_155_3516_3590.jpg 115 | Rissbilder_for_Florian_9S6A2822_1137_465_2983_2428.jpg 116 | Rissbilder_for_Florian_9S6A2822_2401_400_2963_3258.jpg 117 | Rissbilder_for_Florian_9S6A2823_106_892_3634_4471.jpg 118 | Rissbilder_for_Florian_9S6A2825_325_1707_3330_2732.jpg 119 | Rissbilder_for_Florian_9S6A2825_340_882_2590_2354.jpg 120 | Rissbilder_for_Florian_9S6A2825_565_2563_2166_2123.jpg 121 | Rissbilder_for_Florian_9S6A2825_65_315_3693_4161.jpg 122 | Rissbilder_for_Florian_9S6A2825_71_1920_3644_3801.jpg 123 | Rissbilder_for_Florian_9S6A2825_711_1408_2863_2522.jpg 124 | Rissbilder_for_Florian_9S6A2826_0_0_3840_5760.jpg 125 | Rissbilder_for_Florian_9S6A2826_418_1200_2774_3626.jpg 126 | Rissbilder_for_Florian_9S6A2826_65_1595_3304_2654.jpg 127 | Rissbilder_for_Florian_9S6A2826_985_495_2477_3280.jpg 128 | Rissbilder_for_Florian_9S6A2827_1172_836_2226_2545.jpg 129 | Rissbilder_for_Florian_9S6A2827_1553_813_2699_2293.jpg 130 | Rissbilder_for_Florian_9S6A2828_111_1161_3542_4005.jpg 131 | Rissbilder_for_Florian_9S6A2828_81_1831_3515_3918.jpg 132 | Rissbilder_for_Florian_9S6A2828_958_2102_2564_2428.jpg 133 | Rissbilder_for_Florian_9S6A2829_248_0_4607_3807.jpg 134 | Rissbilder_for_Florian_9S6A2829_465_128_3261_3385.jpg 135 | Rissbilder_for_Florian_9S6A2830_317_1218_3139_3120.jpg 136 | Rissbilder_for_Florian_9S6A2830_356_323_2546_2076.jpg 137 | Rissbilder_for_Florian_9S6A2830_715_2986_2996_2571.jpg 138 | Rissbilder_for_Florian_9S6A2830_87_511_3003_3652.jpg 139 | Rissbilder_for_Florian_9S6A2831_130_3142_3148_2567.jpg 140 | Rissbilder_for_Florian_9S6A2831_1392_523_2363_2146.jpg 141 | Rissbilder_for_Florian_9S6A2831_14_1354_3612_3768.jpg 142 | Rissbilder_for_Florian_9S6A2831_183_1551_2640_2168.jpg 143 | Rissbilder_for_Florian_9S6A2831_237_201_3399_4522.jpg 144 | Rissbilder_for_Florian_9S6A2831_277_1810_3479_3118.jpg 145 | Rissbilder_for_Florian_9S6A2831_343_1804_2873_2254.jpg 146 | Rissbilder_for_Florian_9S6A2832_1234_336_2375_2845.jpg 147 | Rissbilder_for_Florian_9S6A2832_185_2346_3101_2742.jpg 148 | Rissbilder_for_Florian_9S6A2832_375_1408_3366_3866.jpg 149 | Rissbilder_for_Florian_9S6A2833_0_720_3814_3967.jpg 150 | Rissbilder_for_Florian_9S6A2833_11_1057_3805_3303.jpg 151 | Rissbilder_for_Florian_9S6A2833_596_837_3205_3950.jpg 152 | Rissbilder_for_Florian_9S6A2834_1012_1643_2453_3015.jpg 153 | Rissbilder_for_Florian_9S6A2834_18_332_3392_3333.jpg 154 | Rissbilder_for_Florian_9S6A2834_196_114_2762_3611.jpg 155 | Rissbilder_for_Florian_9S6A2834_40_1275_3784_4195.jpg 156 | Rissbilder_for_Florian_9S6A2835_373_1783_2745_2384.jpg 157 | Rissbilder_for_Florian_9S6A2835_459_1693_3011_2424.jpg 158 | Rissbilder_for_Florian_9S6A2835_468_2521_2391_2304.jpg 159 | Rissbilder_for_Florian_9S6A2835_469_1050_2755_3142.jpg 160 | Rissbilder_for_Florian_9S6A2835_637_235_2191_2778.jpg 161 | Rissbilder_for_Florian_9S6A2835_964_2753_2392_2184.jpg 162 | Rissbilder_for_Florian_9S6A2836_0_0_3840_5760.jpg 163 | Rissbilder_for_Florian_9S6A2836_391_1220_2381_3118.jpg 164 | Rissbilder_for_Florian_9S6A2837_523_803_3305_3454.jpg 165 | Rissbilder_for_Florian_9S6A2838_1264_697_1978_2438.jpg 166 | Rissbilder_for_Florian_9S6A2838_419_638_3373_4346.jpg 167 | Rissbilder_for_Florian_9S6A2838_565_1405_2810_2334.jpg 168 | Rissbilder_for_Florian_9S6A2839_132_1745_2786_3604.jpg 169 | Rissbilder_for_Florian_9S6A2839_8_923_3789_4181.jpg 170 | Rissbilder_for_Florian_9S6A2840_408_2141_3112_2692.jpg 171 | Rissbilder_for_Florian_9S6A2840_520_1171_3029_3249.jpg 172 | Rissbilder_for_Florian_9S6A2840_7_1394_3830_4317.jpg 173 | Rissbilder_for_Florian_9S6A2840_73_1408_3251_2521.jpg 174 | Rissbilder_for_Florian_9S6A2841_19_570_3460_3784.jpg 175 | Rissbilder_for_Florian_9S6A2841_217_2299_3214_2878.jpg 176 | Rissbilder_for_Florian_9S6A2841_247_162_2749_3393.jpg 177 | Rissbilder_for_Florian_9S6A2841_319_1106_2769_3450.jpg 178 | Rissbilder_for_Florian_9S6A2841_341_1075_3324_3954.jpg 179 | Rissbilder_for_Florian_9S6A2841_542_955_3068_2456.jpg 180 | Rissbilder_for_Florian_9S6A2842_321_767_3019_3008.jpg 181 | Rissbilder_for_Florian_9S6A2842_635_1731_1931_2541.jpg 182 | Rissbilder_for_Florian_9S6A2842_70_2154_3631_3282.jpg 183 | Rissbilder_for_Florian_9S6A2843_12_1425_2920_2441.jpg 184 | Rissbilder_for_Florian_9S6A2843_178_404_2380_2116.jpg 185 | Rissbilder_for_Florian_9S6A2843_369_1526_1885_2389.jpg 186 | Rissbilder_for_Florian_9S6A2843_734_2194_2503_2654.jpg 187 | Rissbilder_for_Florian_9S6A2843_81_439_3537_3036.jpg 188 | Rissbilder_for_Florian_9S6A2844_0_0_3840_5760.jpg 189 | Rissbilder_for_Florian_9S6A2844_443_437_3072_2992.jpg 190 | Rissbilder_for_Florian_9S6A2844_736_848_3078_2427.jpg 191 | Rissbilder_for_Florian_9S6A2845_0_0_5760_3840.jpg 192 | Rissbilder_for_Florian_9S6A2845_1333_32_4399_3805.jpg 193 | Rissbilder_for_Florian_9S6A2845_1480_64_3182_3685.jpg 194 | Rissbilder_for_Florian_9S6A2845_1641_474_2958_3084.jpg 195 | Rissbilder_for_Florian_9S6A2845_2026_3_2804_3214.jpg 196 | Rissbilder_for_Florian_9S6A2845_2181_159_3010_3511.jpg 197 | Rissbilder_for_Florian_9S6A2845_628_184_3360_3293.jpg 198 | Rissbilder_for_Florian_9S6A2846_230_168_2768_2605.jpg 199 | Rissbilder_for_Florian_9S6A2846_268_1498_2452_2689.jpg 200 | Rissbilder_for_Florian_9S6A2846_330_640_3250_2553.jpg 201 | Rissbilder_for_Florian_9S6A2846_392_1024_2627_3077.jpg 202 | Rissbilder_for_Florian_9S6A2846_64_1600_3177_3483.jpg 203 | Rissbilder_for_Florian_9S6A2847_112_411_3688_4096.jpg 204 | Rissbilder_for_Florian_9S6A2847_422_1541_2737_2708.jpg 205 | Rissbilder_for_Florian_9S6A2847_57_1886_3694_3848.jpg 206 | Rissbilder_for_Florian_9S6A2848_105_342_3183_3261.jpg 207 | Rissbilder_for_Florian_9S6A2848_1214_523_4329_3293.jpg 208 | Rissbilder_for_Florian_9S6A2849_1010_1722_2676_2957.jpg 209 | Rissbilder_for_Florian_9S6A2849_239_1191_3521_3188.jpg 210 | Rissbilder_for_Florian_9S6A2849_465_1014_2857_3753.jpg 211 | Rissbilder_for_Florian_9S6A2850_1113_51_4606_3781.jpg 212 | Rissbilder_for_Florian_9S6A2850_1408_904_2105_2792.jpg 213 | Rissbilder_for_Florian_9S6A2851_1713_30_3619_3365.jpg 214 | Rissbilder_for_Florian_9S6A2851_1976_734_2746_2491.jpg 215 | Rissbilder_for_Florian_9S6A2851_2413_175_2645_3485.jpg 216 | Rissbilder_for_Florian_9S6A2851_512_50_2972_2678.jpg 217 | Rissbilder_for_Florian_9S6A2852_1108_175_3570_3421.jpg 218 | Rissbilder_for_Florian_9S6A2852_356_540_3667_2981.jpg 219 | Rissbilder_for_Florian_9S6A2853_1127_1463_2379_2376.jpg 220 | Rissbilder_for_Florian_9S6A2853_826_286_2004_2464.jpg 221 | Rissbilder_for_Florian_9S6A2854_162_1195_3169_2569.jpg 222 | Rissbilder_for_Florian_9S6A2856_1840_62_3055_3769.jpg 223 | Rissbilder_for_Florian_9S6A2856_2611_839_1919_2466.jpg 224 | Rissbilder_for_Florian_9S6A2856_3030_1197_2337_2140.jpg 225 | Rissbilder_for_Florian_9S6A2856_3095_641_2548_2416.jpg 226 | Rissbilder_for_Florian_9S6A2857_1203_1130_2423_3169.jpg 227 | Rissbilder_for_Florian_9S6A2857_148_211_2303_2832.jpg 228 | Rissbilder_for_Florian_9S6A2857_159_2331_3518_2794.jpg 229 | Rissbilder_for_Florian_9S6A2857_585_241_3220_4175.jpg 230 | Rissbilder_for_Florian_9S6A2857_807_939_2369_1886.jpg 231 | Rissbilder_for_Florian_9S6A2857_89_2266_3467_3213.jpg 232 | Rissbilder_for_Florian_9S6A2858_103_1887_2963_2745.jpg 233 | Rissbilder_for_Florian_9S6A2858_138_1397_3695_4304.jpg 234 | Rissbilder_for_Florian_9S6A2858_203_242_3312_2597.jpg 235 | Rissbilder_for_Florian_9S6A2858_24_161_2934_2248.jpg 236 | Rissbilder_for_Florian_9S6A2858_559_619_3260_3533.jpg 237 | Rissbilder_for_Florian_9S6A2858_58_840_3696_4727.jpg 238 | Rissbilder_for_Florian_9S6A2858_74_2277_2849_2285.jpg 239 | Rissbilder_for_Florian_9S6A2858_825_486_2361_2715.jpg 240 | Rissbilder_for_Florian_9S6A2859_1580_320_3896_3471.jpg 241 | Rissbilder_for_Florian_9S6A2859_1796_34_3392_3713.jpg 242 | Rissbilder_for_Florian_9S6A2859_2073_725_3430_2687.jpg 243 | Rissbilder_for_Florian_9S6A2860_109_355_3609_3381.jpg 244 | Rissbilder_for_Florian_9S6A2860_2652_869_3027_2537.jpg 245 | Rissbilder_for_Florian_9S6A2860_476_406_3989_3027.jpg 246 | Rissbilder_for_Florian_9S6A2860_707_1403_2200_2149.jpg 247 | Rissbilder_for_Florian_9S6A2861_1002_825_2601_2341.jpg 248 | Rissbilder_for_Florian_9S6A2861_327_851_2964_3474.jpg 249 | Rissbilder_for_Florian_9S6A2861_716_324_2856_3710.jpg 250 | Rissbilder_for_Florian_9S6A2862_13_1221_3798_4291.jpg 251 | Rissbilder_for_Florian_9S6A2862_16_760_3062_2984.jpg 252 | Rissbilder_for_Florian_9S6A2862_54_2006_3577_3513.jpg 253 | Rissbilder_for_Florian_9S6A2862_550_1911_3038_3042.jpg 254 | Rissbilder_for_Florian_9S6A2863_312_1094_3025_3361.jpg 255 | Rissbilder_for_Florian_9S6A2863_423_3163_2502_2009.jpg 256 | Rissbilder_for_Florian_9S6A2864_0_0_3840_5760.jpg 257 | Rissbilder_for_Florian_9S6A2864_474_1863_3308_2536.jpg 258 | Rissbilder_for_Florian_9S6A2864_95_886_3690_4688.jpg 259 | Rissbilder_for_Florian_9S6A2865_215_1866_2936_3607.jpg 260 | Rissbilder_for_Florian_9S6A2865_321_3005_2468_2755.jpg 261 | Rissbilder_for_Florian_9S6A2865_402_376_2830_3345.jpg 262 | Rissbilder_for_Florian_9S6A2865_415_206_2924_3039.jpg 263 | Rissbilder_for_Florian_9S6A2865_48_35_3665_3847.jpg 264 | Rissbilder_for_Florian_9S6A2865_874_516_2613_2236.jpg 265 | Rissbilder_for_Florian_9S6A2866_277_585_3310_3706.jpg 266 | Rissbilder_for_Florian_9S6A2866_643_1201_2050_2536.jpg 267 | Rissbilder_for_Florian_9S6A2867_114_180_3373_2642.jpg 268 | Rissbilder_for_Florian_9S6A2867_249_168_3377_3485.jpg 269 | Rissbilder_for_Florian_9S6A2867_26_155_3813_3142.jpg 270 | Rissbilder_for_Florian_9S6A2867_515_377_2926_3568.jpg 271 | Rissbilder_for_Florian_9S6A2867_657_766_2790_3388.jpg 272 | Rissbilder_for_Florian_9S6A2868_169_34_3542_2947.jpg 273 | Rissbilder_for_Florian_9S6A2868_252_1319_2263_2294.jpg 274 | Rissbilder_for_Florian_9S6A2868_257_950_3108_3346.jpg 275 | Rissbilder_for_Florian_9S6A2868_353_804_2737_3193.jpg 276 | Rissbilder_for_Florian_9S6A2868_867_2101_2821_2989.jpg 277 | Rissbilder_for_Florian_9S6A2869_107_1230_3580_4395.jpg 278 | Rissbilder_for_Florian_9S6A2869_1234_372_2013_2272.jpg 279 | Rissbilder_for_Florian_9S6A2869_171_1902_3454_3763.jpg 280 | Rissbilder_for_Florian_9S6A2869_37_1430_3692_3865.jpg 281 | Rissbilder_for_Florian_9S6A2869_6_538_3621_3662.jpg 282 | Rissbilder_for_Florian_9S6A2869_855_1425_2447_2662.jpg 283 | Rissbilder_for_Florian_9S6A2869_878_3059_2683_2082.jpg 284 | Rissbilder_for_Florian_9S6A2870_121_518_3261_3345.jpg 285 | Rissbilder_for_Florian_9S6A2870_504_1261_2761_3438.jpg 286 | Rissbilder_for_Florian_9S6A2873_0_0_5760_3840.jpg 287 | Rissbilder_for_Florian_9S6A2873_244_84_4072_3620.jpg 288 | Rissbilder_for_Florian_9S6A2873_904_515_2792_2550.jpg 289 | Rissbilder_for_Florian_9S6A2873_905_438_3442_2738.jpg 290 | Rissbilder_for_Florian_9S6A2873_930_236_3379_3204.jpg 291 | Rissbilder_for_Florian_9S6A2874_251_526_2458_2696.jpg 292 | Rissbilder_for_Florian_9S6A2875_169_1864_3516_3562.jpg 293 | Rissbilder_for_Florian_9S6A2875_228_47_2676_2777.jpg 294 | Rissbilder_for_Florian_9S6A2875_36_523_3631_3027.jpg 295 | Rissbilder_for_Florian_9S6A2875_97_864_3672_4225.jpg 296 | Rissbilder_for_Florian_9S6A2876_169_722_2977_3880.jpg 297 | Rissbilder_for_Florian_9S6A2876_79_1349_3734_3387.jpg 298 | Rissbilder_for_Florian_9S6A2876_800_744_2945_3705.jpg 299 | Rissbilder_for_Florian_9S6A2877_304_1360_2589_2329.jpg 300 | Rissbilder_for_Florian_9S6A2877_417_140_3357_4443.jpg 301 | Rissbilder_for_Florian_9S6A2877_472_1716_2206_2901.jpg 302 | Rissbilder_for_Florian_9S6A2878_186_801_3630_3688.jpg 303 | Rissbilder_for_Florian_9S6A2878_287_270_3223_3935.jpg 304 | Rissbilder_for_Florian_9S6A2878_29_861_3713_4568.jpg 305 | Rissbilder_for_Florian_9S6A2878_373_2066_2452_3245.jpg 306 | Rissbilder_for_Florian_9S6A2878_67_878_2624_2752.jpg 307 | Rissbilder_for_Florian_9S6A2878_861_28_2800_2885.jpg 308 | Rissbilder_for_Florian_9S6A2879_0_0_5760_3840.jpg 309 | Rissbilder_for_Florian_9S6A2879_1312_923_2418_2909.jpg 310 | Rissbilder_for_Florian_9S6A2879_1320_542_3826_3214.jpg 311 | Rissbilder_for_Florian_9S6A2880_1_709_3836_3003.jpg 312 | Rissbilder_for_Florian_9S6A2880_1244_1022_2375_2527.jpg 313 | Rissbilder_for_Florian_9S6A2880_131_2019_3358_3435.jpg 314 | Rissbilder_for_Florian_9S6A2880_277_388_3305_4321.jpg 315 | Rissbilder_for_Florian_9S6A2881_0_0_3840_5760.jpg 316 | Rissbilder_for_Florian_9S6A2881_1046_39_2575_3270.jpg 317 | Rissbilder_for_Florian_9S6A2881_277_1792_3424_2971.jpg 318 | Rissbilder_for_Florian_9S6A2881_77_297_3144_3819.jpg 319 | Rissbilder_for_Florian_9S6A2882_346_490_2497_1975.jpg 320 | Rissbilder_for_Florian_9S6A2882_458_1944_3282_2844.jpg 321 | Rissbilder_for_Florian_9S6A2883_0_0_3840_5760.jpg 322 | Rissbilder_for_Florian_9S6A2883_290_1780_2688_2984.jpg 323 | Rissbilder_for_Florian_9S6A2884_103_1141_3407_4451.jpg 324 | Rissbilder_for_Florian_9S6A2884_158_1281_3498_4054.jpg 325 | Rissbilder_for_Florian_9S6A2884_19_1803_3537_2796.jpg 326 | Rissbilder_for_Florian_9S6A2884_197_1323_3592_3665.jpg 327 | Rissbilder_for_Florian_9S6A2885_167_1375_2073_2363.jpg 328 | Rissbilder_for_Florian_9S6A2885_67_973_3621_3313.jpg 329 | Rissbilder_for_Florian_9S6A2885_82_1409_2816_3479.jpg 330 | Rissbilder_for_Florian_9S6A2886_106_479_3642_4594.jpg 331 | Rissbilder_for_Florian_9S6A2886_110_1312_3454_3075.jpg 332 | Rissbilder_for_Florian_9S6A2886_12_1171_3583_2973.jpg 333 | Rissbilder_for_Florian_9S6A2886_273_2665_2929_2302.jpg 334 | Rissbilder_for_Florian_9S6A2886_695_71_2627_3457.jpg 335 | Rissbilder_for_Florian_9S6A2886_984_32_2437_2598.jpg 336 | Rissbilder_for_Florian_9S6A2887_452_483_3013_2564.jpg 337 | Rissbilder_for_Florian_9S6A2887_749_2501_3000_2991.jpg 338 | Rissbilder_for_Florian_9S6A2888_133_918_3617_3712.jpg 339 | Rissbilder_for_Florian_9S6A2889_470_1014_3063_3785.jpg 340 | Rissbilder_for_Florian_9S6A2889_671_2114_2686_3238.jpg 341 | Rissbilder_for_Florian_9S6A2890_0_0_3840_5760.jpg 342 | Rissbilder_for_Florian_9S6A2890_1213_1864_1830_2425.jpg 343 | Rissbilder_for_Florian_9S6A2892_103_890_2526_2526.jpg 344 | Rissbilder_for_Florian_9S6A2892_1086_390_3050_2813.jpg 345 | Rissbilder_for_Florian_9S6A2892_1754_604_2639_2889.jpg 346 | Rissbilder_for_Florian_9S6A2893_46_3035_3311_2720.jpg 347 | Rissbilder_for_Florian_9S6A2893_986_2947_2387_2036.jpg 348 | Rissbilder_for_Florian_9S6A2894_2035_188_3164_3103.jpg 349 | Rissbilder_for_Florian_9S6A2894_2274_588_3314_3190.jpg 350 | Rissbilder_for_Florian_9S6A2895_46_163_3635_3331.jpg 351 | Rissbilder_for_Florian_9S6A2896_0_0_3840_5760.jpg 352 | Rissbilder_for_Florian_9S6A2896_1287_1184_2528_1916.jpg 353 | Rissbilder_for_Florian_9S6A2896_1446_730_2288_2912.jpg 354 | Rissbilder_for_Florian_9S6A2896_28_1443_3701_3540.jpg 355 | Rissbilder_for_Florian_9S6A2896_616_2037_3008_3401.jpg 356 | Rissbilder_for_Florian_9S6A2896_788_3134_2243_2150.jpg 357 | Rissbilder_for_Florian_9S6A2897_1309_1879_2158_2129.jpg 358 | Rissbilder_for_Florian_9S6A2897_161_1181_3216_3670.jpg 359 | Rissbilder_for_Florian_9S6A2897_480_85_2898_3545.jpg 360 | Rissbilder_for_Florian_9S6A2897_575_1892_2708_3567.jpg 361 | Rissbilder_for_Florian_9S6A2897_9_818_3756_3081.jpg 362 | Rissbilder_for_Florian_9S6A2900_113_3085_3124_2494.jpg 363 | Rissbilder_for_Florian_9S6A2900_163_3290_2437_2240.jpg 364 | Rissbilder_for_Florian_9S6A2900_197_1041_3309_3760.jpg 365 | Rissbilder_for_Florian_9S6A2901_0_0_3840_5760.jpg 366 | Rissbilder_for_Florian_9S6A2901_55_3400_2984_2240.jpg 367 | Rissbilder_for_Florian_9S6A2901_67_161_3343_3273.jpg 368 | Rissbilder_for_Florian_9S6A2902_0_0_3840_5760.jpg 369 | Rissbilder_for_Florian_9S6A2902_163_2262_3171_3274.jpg 370 | Rissbilder_for_Florian_9S6A2902_192_967_2598_2117.jpg 371 | Rissbilder_for_Florian_9S6A2903_385_511_3375_3468.jpg 372 | Rissbilder_for_Florian_9S6A2903_438_739_2998_2746.jpg 373 | Rissbilder_for_Florian_9S6A2904_447_893_3086_3169.jpg 374 | Rissbilder_for_Florian_9S6A2904_78_719_3211_3093.jpg 375 | Rissbilder_for_Florian_9S6A2904_86_208_2615_2808.jpg 376 | Rissbilder_for_Florian_9S6A2905_1082_994_2878_2819.jpg 377 | Rissbilder_for_Florian_9S6A2905_153_412_3269_2899.jpg 378 | Rissbilder_for_Florian_9S6A2905_2363_42_2827_2612.jpg 379 | Rissbilder_for_Florian_9S6A2905_795_159_2896_2675.jpg 380 | Rissbilder_for_Florian_9S6A2905_928_88_4020_3658.jpg 381 | Rissbilder_for_Florian_9S6A2906_0_0_3840_5760.jpg 382 | Rissbilder_for_Florian_9S6A2906_358_658_3398_3904.jpg 383 | Rissbilder_for_Florian_9S6A2906_52_256_3772_3845.jpg 384 | Rissbilder_for_Florian_9S6A2906_54_350_3631_4518.jpg 385 | Rissbilder_for_Florian_9S6A2906_67_759_3263_3857.jpg 386 | Rissbilder_for_Florian_9S6A2908_1091_77_2830_3744.jpg 387 | Rissbilder_for_Florian_9S6A2908_1096_896_2445_1861.jpg 388 | Rissbilder_for_Florian_9S6A2908_1599_736_3074_2729.jpg 389 | Rissbilder_for_Florian_9S6A2909_191_2009_3191_3109.jpg 390 | Rissbilder_for_Florian_9S6A2909_36_61_3613_3826.jpg 391 | Rissbilder_for_Florian_9S6A2909_485_850_3106_4081.jpg 392 | Rissbilder_for_Florian_9S6A2910_0_0_3840_5760.jpg 393 | Rissbilder_for_Florian_9S6A2910_113_175_3097_2360.jpg 394 | Rissbilder_for_Florian_9S6A2910_43_1959_3615_3077.jpg 395 | Rissbilder_for_Florian_9S6A2910_59_1878_2063_2153.jpg 396 | Rissbilder_for_Florian_9S6A2910_880_2365_2685_2887.jpg 397 | Rissbilder_for_Florian_9S6A2911_1125_1393_2108_2755.jpg 398 | Rissbilder_for_Florian_9S6A2911_323_1692_3460_3572.jpg 399 | Rissbilder_for_Florian_9S6A2912_0_0_3840_5760.jpg 400 | Rissbilder_for_Florian_9S6A2912_255_1175_3429_3203.jpg 401 | Rissbilder_for_Florian_9S6A2912_85_369_3711_3429.jpg 402 | Rissbilder_for_Florian_9S6A2913_20_297_2640_2502.jpg 403 | Rissbilder_for_Florian_9S6A2913_320_3010_3339_2578.jpg 404 | Rissbilder_for_Florian_9S6A2913_321_16_3485_4543.jpg 405 | Rissbilder_for_Florian_9S6A2914_10_816_3799_4560.jpg 406 | Rissbilder_for_Florian_9S6A2914_165_203_2511_2307.jpg 407 | Rissbilder_for_Florian_9S6A2914_601_1274_2657_3363.jpg 408 | Rissbilder_for_Florian_9S6A2914_61_827_3521_3770.jpg 409 | Rissbilder_for_Florian_9S6A2914_864_1519_2861_3089.jpg 410 | Rissbilder_for_Florian_9S6A2915_1128_2752_2373_2395.jpg 411 | Rissbilder_for_Florian_9S6A2915_757_2194_2406_2383.jpg 412 | Rissbilder_for_Florian_9S6A2916_1381_1379_2811_2259.jpg 413 | Rissbilder_for_Florian_9S6A2916_1475_441_4120_3342.jpg 414 | Rissbilder_for_Florian_9S6A2916_2449_405_2485_3276.jpg 415 | Rissbilder_for_Florian_9S6A2916_26_880_2781_2363.jpg 416 | Rissbilder_for_Florian_9S6A2916_361_66_3733_3710.jpg 417 | Rissbilder_for_Florian_9S6A2917_0_74_3825_4343.jpg 418 | Rissbilder_for_Florian_9S6A2917_313_1519_3434_4037.jpg 419 | Rissbilder_for_Florian_9S6A2917_486_1027_3148_3681.jpg 420 | Rissbilder_for_Florian_9S6A2917_599_2662_2081_2529.jpg 421 | Rissbilder_for_Florian_9S6A2918_430_1739_2794_3223.jpg 422 | Rissbilder_for_Florian_9S6A2919_1008_848_2956_2327.jpg 423 | Rissbilder_for_Florian_9S6A2919_1139_6_3401_3091.jpg 424 | Rissbilder_for_Florian_9S6A2919_311_375_3331_2605.jpg 425 | Rissbilder_for_Florian_9S6A2919_910_611_3253_2887.jpg 426 | Rissbilder_for_Florian_9S6A2923_1668_185_3721_3137.jpg 427 | Rissbilder_for_Florian_9S6A2923_49_326_3736_3442.jpg 428 | Rissbilder_for_Florian_9S6A2923_875_539_3052_2796.jpg 429 | Rissbilder_for_Florian_9S6A3085_46_1568_3730_4175.jpg 430 | Rissbilder_for_Florian_9S6A3085_497_692_2640_2077.jpg 431 | Rissbilder_for_Florian_9S6A3085_67_497_3262_3700.jpg 432 | Rissbilder_for_Florian_9S6A3087_146_59_3684_4477.jpg 433 | Rissbilder_for_Florian_9S6A3088_370_2375_2620_2464.jpg 434 | Rissbilder_for_Florian_9S6A3089_0_0_3840_5760.jpg 435 | Rissbilder_for_Florian_9S6A3089_369_1679_3470_3717.jpg 436 | Rissbilder_for_Florian_9S6A3090_836_130_3789_3301.jpg 437 | Rissbilder_for_Florian_9S6A3090_960_1737_2204_2039.jpg 438 | Rissbilder_for_Florian_9S6A3091_375_333_2309_2364.jpg 439 | Rissbilder_for_Florian_9S6A3091_481_1320_3311_3035.jpg 440 | Rissbilder_for_Florian_9S6A3092_272_70_2808_2402.jpg 441 | Rissbilder_for_Florian_9S6A3092_3399_925_2286_2829.jpg 442 | Rissbilder_for_Florian_9S6A3092_623_215_3689_2974.jpg 443 | Rissbilder_for_Florian_9S6A3093_157_2766_2021_2677.jpg 444 | Rissbilder_for_Florian_9S6A3093_616_1617_3135_4052.jpg 445 | Rissbilder_for_Florian_9S6A3093_895_2172_2481_2123.jpg 446 | Rissbilder_for_Florian_9S6A3094_17_1195_3751_3661.jpg 447 | Rissbilder_for_Florian_9S6A3094_504_2131_3084_3443.jpg 448 | Rissbilder_for_Florian_9S6A3094_596_1754_2681_3046.jpg 449 | Rissbilder_for_Florian_9S6A3095_418_2347_2757_3271.jpg 450 | Rissbilder_for_Florian_9S6A3095_514_444_3123_3906.jpg 451 | Rissbilder_for_Florian_9S6A3095_592_717_2568_2704.jpg 452 | Rissbilder_for_Florian_9S6A3095_89_2165_3402_3441.jpg 453 | Rissbilder_for_Florian_9S6A3096_347_506_3372_3116.jpg 454 | Rissbilder_for_Florian_9S6A3096_515_1896_2914_2761.jpg 455 | Rissbilder_for_Florian_9S6A3096_899_155_2326_2742.jpg 456 | Rissbilder_for_Florian_9S6A3098_1300_3220_2283_2309.jpg 457 | Rissbilder_for_Florian_9S6A3098_64_1239_3474_2730.jpg 458 | Rissbilder_for_Florian_9S6A3098_88_800_3751_4641.jpg 459 | Rissbilder_for_Florian_9S6A3099_12_492_3822_3921.jpg 460 | Rissbilder_for_Florian_9S6A3099_120_26_3496_4484.jpg 461 | Rissbilder_for_Florian_9S6A3099_441_1535_3289_4016.jpg 462 | Rissbilder_for_Florian_9S6A3099_86_2659_2827_2257.jpg 463 | Rissbilder_for_Florian_9S6A3100_374_2397_2891_2543.jpg 464 | Rissbilder_for_Florian_9S6A3100_62_648_2874_2646.jpg 465 | Rissbilder_for_Florian_9S6A3100_765_387_2677_2632.jpg 466 | Rissbilder_for_Florian_9S6A3101_1262_2535_2488_2185.jpg 467 | Rissbilder_for_Florian_9S6A3101_148_599_2929_2917.jpg 468 | Rissbilder_for_Florian_9S6A3101_487_946_2726_3042.jpg 469 | Rissbilder_for_Florian_9S6A3102_100_448_3704_4115.jpg 470 | Rissbilder_for_Florian_9S6A3103_225_1283_3432_3981.jpg 471 | Rissbilder_for_Florian_9S6A3103_43_936_3780_4403.jpg 472 | Rissbilder_for_Florian_9S6A3103_446_1371_3306_4182.jpg 473 | Rissbilder_for_Florian_9S6A3103_513_1787_2538_2641.jpg 474 | Rissbilder_for_Florian_9S6A3104_1061_1181_3086_2630.jpg 475 | Rissbilder_for_Florian_9S6A3104_151_147_3971_3570.jpg 476 | Rissbilder_for_Florian_9S6A3104_783_263_3586_3511.jpg 477 | Rissbilder_for_Florian_9S6A3105_186_619_3616_4246.jpg 478 | Rissbilder_for_Florian_9S6A3105_871_2904_2401_2180.jpg 479 | Rissbilder_for_Florian_9S6A3105_95_40_3736_3924.jpg 480 | Rissbilder_for_Florian_9S6A3106_108_1927_3275_2664.jpg 481 | Rissbilder_for_Florian_9S6A3106_228_1483_2823_2920.jpg 482 | Rissbilder_for_Florian_9S6A3106_310_2925_3257_2813.jpg 483 | Rissbilder_for_Florian_9S6A3106_333_1784_3162_3278.jpg 484 | Rissbilder_for_Florian_9S6A3106_521_315_3058_2943.jpg 485 | Rissbilder_for_Florian_9S6A3106_85_2598_2058_2287.jpg 486 | Rissbilder_for_Florian_9S6A3106_9_406_3814_4267.jpg 487 | Rissbilder_for_Florian_9S6A3108_130_372_2601_2517.jpg 488 | Rissbilder_for_Florian_9S6A3108_134_412_3328_4228.jpg 489 | Rissbilder_for_Florian_9S6A3108_160_1043_3170_3960.jpg 490 | Rissbilder_for_Florian_9S6A3108_365_3231_2323_2320.jpg 491 | Rissbilder_for_Florian_9S6A3108_43_1162_3415_3672.jpg 492 | Rissbilder_for_Florian_9S6A3108_651_10_2896_3045.jpg 493 | Rissbilder_for_Florian_9S6A3109_163_622_3006_3321.jpg 494 | Rissbilder_for_Florian_9S6A3109_440_362_3337_4316.jpg 495 | Rissbilder_for_Florian_9S6A3109_793_2237_2466_2142.jpg 496 | Rissbilder_for_Florian_9S6A3110_0_0_3840_5760.jpg 497 | Rissbilder_for_Florian_9S6A3110_52_945_3558_4432.jpg 498 | Rissbilder_for_Florian_9S6A3111_42_1509_3500_3908.jpg 499 | Rissbilder_for_Florian_9S6A3111_601_683_2556_3338.jpg 500 | Rissbilder_for_Florian_9S6A3112_1296_2688_2055_2721.jpg 501 | Rissbilder_for_Florian_9S6A3112_1349_1614_2446_2337.jpg 502 | Rissbilder_for_Florian_9S6A3113_104_526_3717_3372.jpg 503 | Rissbilder_for_Florian_9S6A3114_1215_3118_2077_2478.jpg 504 | Rissbilder_for_Florian_9S6A3114_358_1983_3344_3063.jpg 505 | Rissbilder_for_Florian_9S6A3114_871_3543_2822_2137.jpg 506 | Rissbilder_for_Florian_9S6A3115_1009_154_4329_3598.jpg 507 | Rissbilder_for_Florian_9S6A3115_1837_674_2470_3120.jpg 508 | Rissbilder_for_Florian_9S6A3115_1867_748_3166_2919.jpg 509 | Rissbilder_for_Florian_9S6A3116_390_1097_2997_3963.jpg 510 | Rissbilder_for_Florian_9S6A3116_555_2380_2956_2340.jpg 511 | Rissbilder_for_Florian_9S6A3117_0_0_3840_5760.jpg 512 | Rissbilder_for_Florian_9S6A3117_155_109_3546_2785.jpg 513 | Rissbilder_for_Florian_9S6A3117_911_680_2749_2883.jpg 514 | Rissbilder_for_Florian_9S6A3118_1132_115_4379_3677.jpg 515 | Rissbilder_for_Florian_9S6A3118_2514_38_2922_3514.jpg 516 | Rissbilder_for_Florian_9S6A3119_1583_2261_2214_2879.jpg 517 | Rissbilder_for_Florian_9S6A3119_179_1656_3530_3136.jpg 518 | Rissbilder_for_Florian_9S6A3119_520_1811_3242_3051.jpg 519 | Rissbilder_for_Florian_9S6A3120_14_1099_3666_4059.jpg 520 | Rissbilder_for_Florian_9S6A3120_217_1113_3331_4192.jpg 521 | Rissbilder_for_Florian_9S6A3120_236_1671_2928_2310.jpg 522 | Rissbilder_for_Florian_9S6A3120_875_1120_2089_2571.jpg 523 | Rissbilder_for_Florian_9S6A3121_1095_2665_2007_2602.jpg 524 | Rissbilder_for_Florian_9S6A3121_224_1466_3238_3220.jpg 525 | Rissbilder_for_Florian_9S6A3121_408_826_2841_3663.jpg 526 | Rissbilder_for_Florian_9S6A3122_155_390_2741_3264.jpg 527 | Rissbilder_for_Florian_9S6A3122_463_1629_3085_3858.jpg 528 | Rissbilder_for_Florian_9S6A3123_240_2905_2709_2486.jpg 529 | Rissbilder_for_Florian_9S6A3123_372_1209_3339_4319.jpg 530 | Rissbilder_for_Florian_9S6A3124_1300_898_2343_2899.jpg 531 | Rissbilder_for_Florian_9S6A3124_465_2434_3240_3146.jpg 532 | Rissbilder_for_Florian_9S6A3126_162_642_3629_3769.jpg 533 | Rissbilder_for_Florian_9S6A3126_206_1216_3376_3686.jpg 534 | Rissbilder_for_Florian_9S6A3126_264_1533_3180_4043.jpg 535 | Rissbilder_for_Florian_9S6A3126_429_167_3310_4297.jpg 536 | Rissbilder_for_Florian_9S6A3127_114_2737_3530_2753.jpg 537 | Rissbilder_for_Florian_9S6A3127_54_1984_3083_3305.jpg 538 | Rissbilder_for_Florian_9S6A3127_595_2880_2296_2633.jpg 539 | Rissbilder_for_Florian_9S6A3127_653_1639_2788_3252.jpg 540 | Rissbilder_for_Florian_9S6A3127_89_657_3259_3858.jpg 541 | Rissbilder_for_Florian_9S6A3127_9_1051_3792_3003.jpg 542 | Rissbilder_for_Florian_9S6A3128_304_338_2449_2867.jpg 543 | Rissbilder_for_Florian_9S6A3128_580_813_2343_2413.jpg 544 | Rissbilder_for_Florian_9S6A3128_7_578_2768_3056.jpg 545 | Rissbilder_for_Florian_9S6A3129_141_412_2185_2751.jpg 546 | Rissbilder_for_Florian_9S6A3129_78_688_3494_3199.jpg 547 | Rissbilder_for_Florian_9S6A3129_882_707_2680_2479.jpg 548 | Rissbilder_for_Florian_9S6A3130_190_1907_3510_2920.jpg 549 | Rissbilder_for_Florian_9S6A3130_24_518_3753_4265.jpg 550 | Rissbilder_for_Florian_9S6A3130_35_1947_3497_3753.jpg 551 | Rissbilder_for_Florian_9S6A3130_485_279_2724_2520.jpg 552 | Rissbilder_for_Florian_9S6A3130_741_2592_3069_3082.jpg 553 | Rissbilder_for_Florian_9S6A3131_308_1998_2814_2860.jpg 554 | Rissbilder_for_Florian_9S6A3131_389_1670_3375_3959.jpg 555 | Rissbilder_for_Florian_9S6A3131_542_1698_3243_3761.jpg 556 | Rissbilder_for_Florian_9S6A3132_2349_1232_2560_2243.jpg 557 | Rissbilder_for_Florian_9S6A3132_2998_275_2346_3000.jpg 558 | Rissbilder_for_Florian_9S6A3133_207_186_4184_3281.jpg 559 | Rissbilder_for_Florian_9S6A3134_140_2003_2431_2080.jpg 560 | Rissbilder_for_Florian_9S6A3134_417_310_2997_3294.jpg 561 | Rissbilder_for_Florian_9S6A3134_422_2625_3386_2683.jpg 562 | Rissbilder_for_Florian_9S6A3134_750_425_1978_2414.jpg 563 | Rissbilder_for_Florian_9S6A3134_76_855_3718_4555.jpg 564 | Rissbilder_for_Florian_9S6A3135_339_1311_2661_3168.jpg 565 | Rissbilder_for_Florian_9S6A3136_0_0_5760_3840.jpg 566 | Rissbilder_for_Florian_9S6A3136_1628_12_2911_3801.jpg 567 | Rissbilder_for_Florian_9S6A3137_1104_2295_2445_3107.jpg 568 | Rissbilder_for_Florian_9S6A3137_1228_1792_2559_2477.jpg 569 | Rissbilder_for_Florian_9S6A3137_182_823_3574_4047.jpg 570 | Rissbilder_for_Florian_9S6A3137_201_2087_3465_3669.jpg 571 | Rissbilder_for_Florian_9S6A3137_313_2279_2337_2933.jpg 572 | Rissbilder_for_Florian_9S6A3137_507_1097_2555_2213.jpg 573 | Rissbilder_for_Florian_9S6A3138_115_569_2228_2606.jpg -------------------------------------------------------------------------------- /train_net.py: -------------------------------------------------------------------------------- 1 | from end2end import End2End 2 | import argparse 3 | from config import Config 4 | 5 | def str2bool(v): 6 | return v.lower() in ("yes", "true", "t", "1") 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | 11 | parser.add_argument('--ARCHITECTURE', type=str, required=False, default='SegDecNet++', help="Model architecture to use: SegDecNet++ or SegDecNetOriginalJIM.") 12 | parser.add_argument('--GPU', type=int, required=True, help="ID of GPU used for training/evaluation.") 13 | parser.add_argument('--RUN_NAME', type=str, required=True, help="Name of the run, used as directory name for storing results.") 14 | parser.add_argument('--DATASET', type=str, required=True, help="Which dataset to use.") 15 | parser.add_argument('--DATASET_PATH', type=str, required=True, help="Path to the dataset.") 16 | 17 | parser.add_argument('--EPOCHS', type=int, required=True, help="Number of training epochs.") 18 | 19 | parser.add_argument('--LEARNING_RATE', type=float, required=True, help="Learning rate.") 20 | parser.add_argument('--DELTA_CLS_LOSS', type=float, required=True, help="Weight delta for classification loss.") 21 | 22 | parser.add_argument('--BATCH_SIZE', type=int, required=True, help="Batch size for training.") 23 | 24 | parser.add_argument('--WEIGHTED_SEG_LOSS', type=str2bool, required=True, help="Whether to use weighted segmentation loss.") 25 | parser.add_argument('--WEIGHTED_SEG_LOSS_P', type=float, required=False, default=None, help="Degree of polynomial for weighted segmentation loss.") 26 | parser.add_argument('--WEIGHTED_SEG_LOSS_MAX', type=float, required=False, default=None, help="Scaling factor for weighted segmentation loss.") 27 | parser.add_argument('--DYN_BALANCED_LOSS', type=str2bool, required=True, help="Whether to use dynamically balanced loss.") 28 | parser.add_argument('--GRADIENT_ADJUSTMENT', type=str2bool, required=True, help="Whether to use gradient adjustment.") 29 | parser.add_argument('--FREQUENCY_SAMPLING', type=str2bool, required=False, help="Whether to use frequency-of-use based sampling.") 30 | 31 | parser.add_argument('--DILATE', type=int, required=False, default=None, help="Size of dilation kernel for labels") 32 | 33 | parser.add_argument('--FOLD', type=int, default=None, help="Which fold (KSDD) or class (DAGM) to train.") 34 | parser.add_argument('--TRAIN_NUM', type=int, default=None, help="Number of positive training samples for KSDD or STEEL.") 35 | parser.add_argument('--NUM_SEGMENTED', type=int, required=True, default=None, help="Number of segmented positive samples.") 36 | parser.add_argument('--RESULTS_PATH', type=str, default=None, help="Directory to which results are saved.") 37 | 38 | parser.add_argument('--VALIDATE', type=str2bool, default=None, help="Whether to validate during training.") 39 | parser.add_argument('--VALIDATE_ON_TEST', type=str2bool, default=None, help="Whether to validate on test set.") 40 | parser.add_argument('--VALIDATION_N_EPOCHS', type=int, default=None, help="Number of epochs between consecutive validation runs.") 41 | parser.add_argument('--USE_BEST_MODEL', type=str2bool, default=None, help="Whether to use the best model according to validation metrics for evaluation.") 42 | 43 | parser.add_argument('--ON_DEMAND_READ', type=str2bool, default=False, help="Whether to use on-demand read of data from disk instead of storing it in memory.") 44 | parser.add_argument('--REPRODUCIBLE_RUN', type=int, default=None, required=False, help="Whether to fix seeds and disable CUDA benchmark mode.") 45 | 46 | parser.add_argument('--MEMORY_FIT', type=int, default=None, help="How many images can be fitted in GPU memory.") 47 | parser.add_argument('--SAVE_IMAGES', type=str2bool, default=None, help="Save test images or not.") 48 | 49 | parser.add_argument('--BEST_MODEL_TYPE', type=str, default="dec", required=False, help="Best model save depend on segmentation or decision.") 50 | 51 | parser.add_argument('--AUGMENTATION', type=str2bool, default=False, required=False, help="Wheter to use data augmentation.") 52 | 53 | parser.add_argument('--USE_NEGATIVES', type=str, default=None, required=False, help="Wheter to use negative samples with CRACK500 dataset.") 54 | parser.add_argument('--VAL_NEG', type=str, default=None, required=False, help="Wheter to use negative samples in validation set with CRACK500 dataset.") 55 | 56 | parser.add_argument('--OPTIMIZER', type=str, default="sgd", required=False, help="Optimizer to be used.") 57 | parser.add_argument('--SCHEDULER', type=float, nargs="+", default=None, required=False, help="Learning rate scheduler parameters to be used.") 58 | 59 | parser.add_argument('--HARD_NEG_MINING', type=float, nargs="+", default=None, required=False, help="Hard negative mining parameters. First parameter is hard_sample_size, second hard_samples_selected_min_percent.") 60 | 61 | parser.add_argument('--PXL_DISTANCE', type=int, default=2, required=False, help="Pixel distance for Pr, Re and F1 metrics at evaluation.") 62 | 63 | parser.add_argument('--SEG_BLACK', type=str2bool, default=False, required=False, help="Wheter to use segmentation resetting.") 64 | parser.add_argument('--THR_ADJUSTMENT', type=float, default=None, required=False, help="Segmentation threshold adjustment.") 65 | 66 | parser.add_argument('--BCE_LOSS_W', type=str2bool, default=False, required=False, help="Wheter to use BCE pos_weight parameter.") 67 | 68 | parser.add_argument('--TRAIN_SPLIT', type=int, default=None, required=False, help="Index of train split to use as validation set.") 69 | 70 | args = parser.parse_args() 71 | 72 | return args 73 | 74 | if __name__ == '__main__': 75 | args = parse_args() 76 | 77 | configuration = Config() 78 | configuration.merge_from_args(args) 79 | configuration.init_extra() 80 | 81 | end2end = End2End(cfg=configuration) 82 | end2end.train() -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from tkinter import image_names 2 | from unicodedata import decimal 3 | import matplotlib 4 | 5 | matplotlib.use('Agg') 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | from sklearn.metrics import precision_recall_curve, roc_curve, auc 9 | import pandas as pd 10 | import os 11 | import errno 12 | import pickle 13 | import cv2 14 | import shutil 15 | import torch 16 | from matplotlib.colors import ListedColormap 17 | 18 | 19 | def create_folder(folder, exist_ok=True): 20 | try: 21 | os.makedirs(folder) 22 | except OSError as e: 23 | if e.errno != errno.EEXIST or not exist_ok: 24 | raise 25 | 26 | def save_current_learning_method(save_path): 27 | create_folder(f"{save_path}/learning_method") 28 | files = ['end2end.py', 'utils.py', 'models.py'] 29 | for file in files: 30 | shutil.copy2(file, f"{save_path}/learning_method/{file}") 31 | 32 | def calc_confusion_mat(D, Y): 33 | FP = (D != Y) & (Y.astype(np.bool) == False) 34 | FN = (D != Y) & (Y.astype(np.bool) == True) 35 | TN = (D == Y) & (Y.astype(np.bool) == False) 36 | TP = (D == Y) & (Y.astype(np.bool) == True) 37 | 38 | return FP, FN, TN, TP 39 | 40 | 41 | def plot_sample(image_name, image, segmentation, label, save_dir, decision=None, blur=True, plot_seg=False): 42 | plt.figure() 43 | plt.clf() 44 | plt.subplot(1, 4, 1) 45 | plt.xticks([]) 46 | plt.yticks([]) 47 | plt.title('Input image') 48 | if image.shape[0] < image.shape[1]: 49 | image = np.transpose(image, axes=[1, 0, 2]) 50 | segmentation = np.transpose(segmentation) 51 | label = np.transpose(label) 52 | if image.shape[2] == 1: 53 | plt.imshow(image, cmap="gray") 54 | else: 55 | plt.imshow(image) 56 | 57 | plt.subplot(1, 4, 2) 58 | plt.xticks([]) 59 | plt.yticks([]) 60 | plt.title('Groundtruth') 61 | plt.imshow(label, cmap="gray") 62 | 63 | plt.subplot(1, 4, 3) 64 | plt.xticks([]) 65 | plt.yticks([]) 66 | if decision is None: 67 | plt.title('Output') 68 | else: 69 | plt.title(f"Output:\n{decision:.5f}") 70 | # display max 71 | vmax_value = max(1, np.max(segmentation)) 72 | plt.imshow(segmentation, cmap="jet", vmax=vmax_value) 73 | 74 | plt.subplot(1, 4, 4) 75 | plt.xticks([]) 76 | plt.yticks([]) 77 | plt.title('Output\nscaled') 78 | if blur: 79 | normed = segmentation / segmentation.max() if segmentation.max() > 0 else segmentation 80 | blured = cv2.blur(normed, (32, 32)) 81 | plt.imshow(((blured / blured.max() if blured.max() > 0 else blured) * 255).astype(np.uint8), cmap="jet") 82 | else: 83 | plt.imshow(((segmentation / segmentation.max() if segmentation.max() > 0 else segmentation) * 255).astype(np.uint8), cmap="jet") 84 | 85 | out_prefix = '{:.3f}_'.format(decision) if decision is not None else '' 86 | plt.savefig(f"{save_dir}/{out_prefix}result_{image_name}.jpg", bbox_inches='tight', dpi=300) 87 | plt.close() 88 | 89 | if plot_seg: 90 | jet_seg = cv2.applyColorMap((segmentation * 255).astype(np.uint8), cv2.COLORMAP_JET) 91 | cv2.imwrite(f"{save_dir}/{out_prefix}_segmentation_{image_name}.png", jet_seg) 92 | 93 | 94 | def evaluate_metrics(samples, results_path, run_name, segmentation_predicted, segmentation_truth, images, dice_threshold, dataset_kind): 95 | samples = np.array(samples) 96 | 97 | img_names = samples[:, 4] 98 | predictions = samples[:, 0] 99 | labels = samples[:, 3].astype(np.float32) 100 | 101 | metrics = get_metrics(labels, predictions) 102 | dice_mean, dice_std, iou_mean, iou_std = dice_iou(segmentation_predicted, segmentation_truth, dice_threshold, images, img_names, results_path) 103 | 104 | df = pd.DataFrame( 105 | data={'prediction': predictions, 106 | 'decision': metrics['decisions'], 107 | 'ground_truth': labels, 108 | 'img_name': img_names}) 109 | df.to_csv(os.path.join(results_path, 'results.csv'), index=False) 110 | 111 | print( 112 | f'{run_name} EVAL on {dataset_kind} AUC={metrics["AUC"]:f}, and AP={metrics["AP"]:f}, w/ best thr={metrics["best_thr"]:f} at f-m={metrics["best_f_measure"]:.3f} and FP={sum(metrics["FP"]):d}, FN={sum(metrics["FN"]):d}\nDice: mean: {dice_mean:f}, std: {dice_std:f}, IOU: mean: {iou_mean:f}, std: {iou_std:f}, Dice Threshold: {dice_threshold:f}') 113 | 114 | with open(os.path.join(results_path, 'metrics.pkl'), 'wb') as f: 115 | pickle.dump(metrics, f) 116 | f.close() 117 | 118 | plt.figure(1) 119 | plt.clf() 120 | plt.plot(metrics['recall'], metrics['precision']) 121 | plt.title('Average Precision=%.4f' % metrics['AP']) 122 | plt.xlabel('Recall') 123 | plt.ylabel('Precision') 124 | plt.savefig(f"{results_path}/precision-recall", bbox_inches='tight', dpi=200) 125 | 126 | plt.figure(1) 127 | plt.clf() 128 | plt.plot(metrics['FPR'], metrics['TPR']) 129 | plt.title('AUC=%.4f' % metrics['AUC']) 130 | plt.xlabel('False positive rate') 131 | plt.ylabel('True positive rate') 132 | plt.savefig(f"{results_path}/ROC", bbox_inches='tight', dpi=200) 133 | 134 | 135 | def get_metrics(labels, predictions): 136 | metrics = {} 137 | precision, recall, thresholds = precision_recall_curve(labels, predictions) 138 | metrics['precision'] = precision 139 | metrics['recall'] = recall 140 | metrics['thresholds'] = thresholds 141 | f_measures = 2 * np.multiply(recall, precision) / (recall + precision + 1e-8) 142 | metrics['f_measures'] = f_measures 143 | ix_best = np.argmax(f_measures) 144 | metrics['ix_best'] = ix_best 145 | best_f_measure = f_measures[ix_best] 146 | metrics['best_f_measure'] = best_f_measure 147 | best_thr = thresholds[ix_best] 148 | metrics['best_thr'] = best_thr 149 | FPR, TPR, _ = roc_curve(labels, predictions) 150 | metrics['FPR'] = FPR 151 | metrics['TPR'] = TPR 152 | AUC = auc(FPR, TPR) 153 | metrics['AUC'] = AUC 154 | AP = auc(recall, precision) 155 | metrics['AP'] = AP 156 | decisions = predictions >= best_thr 157 | metrics['decisions'] = decisions 158 | FP, FN, TN, TP = calc_confusion_mat(decisions, labels) 159 | metrics['FP'] = FP 160 | metrics['FN'] = FN 161 | metrics['TN'] = TN 162 | metrics['TP'] = TP 163 | metrics['accuracy'] = (sum(TP) + sum(TN)) / (sum(TP) + sum(TN) + sum(FP) + sum(FN)) 164 | return metrics 165 | 166 | def save_predicted_segmentation(predicted_segmentation, sample_name, run_path): 167 | save_folder = f"{run_path}/seg_pred" 168 | if not os.path.exists(save_folder): 169 | create_folder(save_folder) 170 | plt.imsave(f"{save_folder}/{sample_name}.png", predicted_segmentation, cmap='gray', vmin=0, vmax=1, dpi=200) 171 | 172 | def dice_iou(segmentation_predicted, segmentation_truth, seg_thresholds, images=None, image_names=None, run_path=None, decisions=None, save_images=False, adjusted_threshold=None): 173 | 174 | results_dice = [] 175 | results_iou = [] 176 | results_f1 = [] 177 | results_pr = [] 178 | results_re = [] 179 | adjusted_thr = {"Dice": [], "IoU": [], "F1": []} 180 | 181 | if images is not None: 182 | if not (len(segmentation_predicted) == len(segmentation_truth) == len(images) == len(image_names)): 183 | raise ValueError('Not equal size of segmentation masks or images') 184 | else: 185 | if not (len(segmentation_predicted) == len(segmentation_truth)): 186 | raise ValueError('Not equal size of segmentation masks') 187 | 188 | # Save folder 189 | if run_path is not None: 190 | save_folder = f"{run_path}/dices" 191 | create_folder(save_folder) 192 | if save_images: 193 | save_folder_seg_pred = f"{run_path}/seg_pred" 194 | save_folder_seg_pred_bin = f"{run_path}/seg_pred_bin" 195 | create_folder(save_folder_seg_pred) 196 | create_folder(save_folder_seg_pred_bin) 197 | 198 | # For each pair of segmentation masks we calculate dice and IOU 199 | for i in range(len(segmentation_predicted)): 200 | image = images[i] 201 | image_name = image_names[i] 202 | seg_pred = segmentation_predicted[i] 203 | seg_true_bin = segmentation_truth[i].astype(np.uint8) 204 | 205 | # Thresholds 206 | thr_dice = seg_thresholds["dice_threshold"] 207 | thr_iou = seg_thresholds["iou_threshold"] 208 | thr_f1 = seg_thresholds["f1_threshold"] 209 | 210 | # Masks 211 | seg_pred_bin_dice = (seg_pred > thr_dice).astype(np.uint8) 212 | seg_pred_bin_iou = (seg_pred > seg_thresholds["iou_threshold"]).astype(np.uint8) 213 | seg_pred_bin_f1 = (seg_pred > seg_thresholds["f1_threshold"]).astype(np.uint8) 214 | 215 | # Adjusted thresholds 216 | if adjusted_threshold and decisions[i]: 217 | if seg_pred_bin_dice.max() == 0: 218 | print(seg_pred.max()) 219 | thr_dice *= adjusted_threshold 220 | seg_pred_bin_dice = (seg_pred > thr_dice).astype(np.uint8) 221 | adjusted_thr['Dice'].append(image_name) 222 | if seg_pred_bin_iou.max() == 0: 223 | thr_iou *= adjusted_threshold 224 | seg_pred_bin_iou = (seg_pred > thr_iou).astype(np.uint8) 225 | adjusted_thr['IoU'].append(image_name) 226 | if seg_pred_bin_f1.max() == 0: 227 | thr_f1 *= adjusted_threshold 228 | seg_pred_bin_f1 = (seg_pred > thr_f1).astype(np.uint8) 229 | adjusted_thr['F1'].append(image_name) 230 | 231 | result_dice = dice(seg_true_bin, seg_pred_bin_dice) 232 | results_dice += [result_dice] 233 | 234 | result_iou = iou(seg_true_bin, seg_pred_bin_iou) 235 | results_iou += [result_iou] 236 | 237 | re = recall(seg_true_bin, seg_pred_bin_f1) 238 | pr = precision(seg_true_bin, seg_pred_bin_f1) 239 | result_f1 = (2 * pr * re) / (pr + re) 240 | results_f1 += [result_f1] 241 | results_pr += [pr] 242 | results_re += [re] 243 | 244 | if save_images: 245 | plt.imsave(f"{save_folder_seg_pred}/{image_name}.png", seg_pred, cmap='gray', vmin=0, vmax=1, dpi=200) 246 | plt.imsave(f"{save_folder_seg_pred_bin}/{image_name}.png", seg_pred_bin_dice, cmap='gray', vmin=0, vmax=1, dpi=200) 247 | 248 | plt.figure() 249 | plt.clf() 250 | 251 | plt.subplot(1, 5, 1) 252 | plt.xticks([]) 253 | plt.yticks([]) 254 | plt.title('Image') 255 | plt.imshow(image) 256 | plt.xlabel(f"Decision:\n{decisions[i]}") 257 | 258 | plt.subplot(1, 5, 2) 259 | plt.xticks([]) 260 | plt.yticks([]) 261 | plt.title('GT') 262 | plt.imshow(seg_true_bin, cmap='gray', vmin=0, vmax=1) 263 | 264 | plt.subplot(1, 5, 3) 265 | plt.xticks([]) 266 | plt.yticks([]) 267 | plt.title('Segmentation') 268 | plt.imshow(seg_pred, cmap='gray', vmin=0, vmax=1) 269 | plt.xlabel(f"IOU: {round(result_iou.item(), 4)}\nThr: {round(thr_iou, 3)}") 270 | 271 | plt.subplot(1, 5, 4) 272 | plt.xticks([]) 273 | plt.yticks([]) 274 | plt.title('Segmentation\nmask') 275 | plt.imshow(seg_pred_bin_dice, cmap='gray', vmin=0, vmax=1) 276 | plt.xlabel(f"Dice: {round(result_dice.item(), 4)}\nThr: {round(thr_dice, 3)}") 277 | 278 | plt.subplot(1, 5, 5) 279 | plt.xticks([]) 280 | plt.yticks([]) 281 | plt.title('Overlap') 282 | plt.imshow((seg_pred_bin_dice * 2) + seg_true_bin, cmap=ListedColormap([['black', 'gray', 'red', 'white'][i] for i in np.unique((seg_pred_bin_dice * 2) + seg_true_bin)])) 283 | plt.xlabel(f"F1: {round(result_f1.item(), 4)}\nThr: {round(thr_f1, 3)}") 284 | 285 | plt.savefig(f"{save_folder}/{round(result_dice.item(), 5):.3f}_dice_{image_name}.pdf", bbox_inches='tight', dpi=600) 286 | plt.close() 287 | 288 | # Write examples with adjusted threshold to txt file 289 | adjusted_threshold_s = set() 290 | for m, n in adjusted_thr.items(): 291 | if n: 292 | txt_file = f"{m}_adjusted_threshold.txt" 293 | file = open(os.path.join(run_path, txt_file), "w") 294 | for sample in n: 295 | file.write(sample + "\n") 296 | adjusted_threshold_s.add(sample) 297 | file.close() 298 | 299 | return np.mean(results_dice), np.std(results_dice), np.mean(results_iou), np.std(results_iou), np.mean(results_pr), np.std(results_pr), np.mean(results_re), np.std(results_re), len(adjusted_threshold_s) 300 | 301 | def segmentation_metrics(seg_truth, seg_predicted, two_pixel_threshold, samples=None, run_path=None, pxl_distance=2, adjusted_thresholds=None): 302 | # Save folder 303 | if run_path is not None: 304 | save_folder = f"{run_path}/seg_metrics" 305 | create_folder(save_folder) 306 | 307 | n_samples = len(seg_truth) 308 | kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (1 + pxl_distance * 2, 1 + pxl_distance * 2)) 309 | results = [] 310 | 311 | for i in range(n_samples): 312 | 313 | if adjusted_thresholds is not None: 314 | two_pixel_threshold = adjusted_thresholds[i] 315 | 316 | y_true = np.array(seg_truth[i]).astype(np.uint8) 317 | y_true_d = cv2.dilate(y_true, kernel) 318 | y_pred = (np.array(seg_predicted[i])>two_pixel_threshold).astype(np.uint8) 319 | 320 | tp_d = sum(sum((y_true_d==1)&(y_pred==1))).item() 321 | fp_d = sum(sum((y_true_d==0)&(y_pred==1))).item() 322 | fn = sum(sum((y_true==1)&(y_pred==0))).item() 323 | 324 | pr = tp_d / (tp_d + fp_d) if tp_d else 0 325 | re = tp_d / (tp_d + fn) if tp_d else 0 326 | f1 = (2 * pr * re) / (pr + re) if pr and re else 0 327 | 328 | results.append((pr, re, f1)) 329 | 330 | if samples is not None: 331 | image = samples['images'][i] 332 | image_name = samples['image_names'][i] 333 | decision = samples['decisions'][i] 334 | 335 | plt.figure() 336 | plt.clf() 337 | 338 | plt.subplot(1, 5, 1) 339 | plt.xticks([]) 340 | plt.yticks([]) 341 | plt.title('Image') 342 | plt.imshow(image) 343 | plt.xlabel(f"Decision:\n{decision}") 344 | 345 | plt.subplot(1, 5, 2) 346 | plt.xticks([]) 347 | plt.yticks([]) 348 | plt.title('GT') 349 | plt.imshow(seg_truth[i], cmap='gray') 350 | plt.xlabel(f"Seg thr: {round(two_pixel_threshold, 3)}") 351 | 352 | plt.subplot(1, 5, 3) 353 | plt.xticks([]) 354 | plt.yticks([]) 355 | plt.title('Segmentation') 356 | plt.imshow(seg_predicted[i], cmap='gray', vmin=0, vmax=1) 357 | plt.xlabel(f"Pr: {round(pr, 4)}") 358 | 359 | plt.subplot(1, 5, 4) 360 | plt.xticks([]) 361 | plt.yticks([]) 362 | plt.title('GT\nDilated') 363 | plt.imshow(y_true_d, cmap='gray', vmin=0, vmax=1) 364 | plt.xlabel(f"Re: {round(re, 4)}") 365 | 366 | plt.subplot(1, 5, 5) 367 | plt.xticks([]) 368 | plt.yticks([]) 369 | plt.title('Segmentation\nmask') 370 | plt.imshow(y_pred, cmap='gray', vmin=0, vmax=1) 371 | plt.xlabel(f"F1: {round(f1, 4)}") 372 | 373 | plt.savefig(f"{save_folder}/{round(f1, 3):.3f}_{image_name}.png", bbox_inches='tight', dpi=300) 374 | plt.close() 375 | 376 | pr = np.mean(np.array(results)[:, 0]) 377 | re = np.mean(np.array(results)[:, 1]) 378 | f1 = np.mean(np.array(results)[:, 2]) 379 | 380 | return pr, re, f1 381 | 382 | # SCCDNet metrics 383 | 384 | def dice(y_true, y_pred): 385 | return (2 * (y_true * y_pred).sum() + 1e-15) / (y_true.sum() + y_pred.sum() + 1e-15) 386 | 387 | def iou(y_true, y_pred): 388 | intersection = (y_true * y_pred).sum() 389 | union = y_true.sum() + y_pred.sum() - intersection 390 | return (intersection + 1e-15) / (union + 1e-15) 391 | 392 | def precision(y_true, y_pred): 393 | TP = (y_true * y_pred).sum() 394 | FP = ((1-y_true)*y_pred).sum() 395 | return (TP + 1e-15) / (TP + FP + 1e-15) 396 | 397 | def recall(y_true, y_pred): 398 | TP = (y_true * y_pred).sum() 399 | FN = (y_true*(1 - y_pred)).sum() 400 | return (TP + 1e-15) / (TP + FN + 1e-15) --------------------------------------------------------------------------------