├── .gitignore ├── .travis.yml ├── DecisionTree_Dependent.py ├── DecisionTree_Independent.py ├── LAS_scripts ├── LAScanopyclassify.bat └── LASconversion.bat ├── LICENSE ├── Obsolete_code ├── add_canopy_files.py ├── common-tree-filter.py ├── coordinate-filter.py ├── data-connector.py ├── dominant-filter.py ├── forest-filter.py ├── forest_grid_uncertain.py ├── growing_forest_grid.py ├── outlier-filter.py ├── relativemax.py └── tree_indexing.py ├── SVM LiDAR tree classifier.ipynb ├── data_processing.py ├── python-requirements.txt ├── readme.md ├── tree_processing.py └── treespecies.txt /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | *.csv 3 | *.zip 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.6" 4 | install: 5 | - pip install pycodestyle 6 | script: 7 | - pycodestyle --show-source --show-pep8 ./ 8 | -------------------------------------------------------------------------------- /DecisionTree_Dependent.py: -------------------------------------------------------------------------------- 1 | from sklearn import tree 2 | import csv 3 | import numpy as np 4 | import graphviz 5 | import matplotlib.pylab as plt 6 | 7 | # Converts relative_learning_data.csv to a list 8 | data = [] 9 | with open('relative_learning_data.csv') as f: 10 | f = csv.reader(f, delimiter=',') 11 | for line in f: 12 | data.append(line) 13 | 14 | features = data[0] 15 | classes = [] 16 | data = data[1:] 17 | # ID's of columns that aren't usefull for decision trees. 18 | removed_features = [0, 1, 2, 3, 4, 5] 19 | 20 | # Removes these columns from the feature names and the dataset. 21 | cl_features = [] 22 | cl_data = [] 23 | for i in range(len(features)): 24 | if i not in removed_features: 25 | cl_features.append(features[i]) 26 | for line in data: 27 | newline = [] 28 | for i in range(len(features)): 29 | if i not in removed_features: 30 | newline.append(line[i]) 31 | cl_data.append(newline) 32 | features = cl_features 33 | data = cl_data 34 | 35 | 36 | def decisiontree(data): 37 | Xt = [] 38 | Yt = [] 39 | Xv = [] 40 | Yv = [] 41 | # Adds 90% of the data to the trainingsset, 10% to the validationset. 42 | np.random.shuffle(data) 43 | trainingsize = 0.9 * len(data) 44 | training = data[:int(trainingsize)] 45 | validation = data[int(trainingsize):] 46 | 47 | # Creates the X and Y parts of the training and test sets. 48 | # Also fills the tree species list (classes) with all different species. 49 | for line in training: 50 | if line[-1] not in classes: 51 | classes.append(line[-1]) 52 | Xt.append(line[0:-1]) 53 | Yt.append(line[-1]) 54 | for line in validation: 55 | if line[-1] not in classes: 56 | return decisiontree(data) 57 | Xv.append(line[0:-1]) 58 | Yv.append(line[-1]) 59 | 60 | clf = tree.DecisionTreeClassifier() 61 | clf = clf.fit(Xt, Yt) 62 | return clf, Xt, Yt, Xv, Yv 63 | 64 | 65 | clf, Xt, Yt, Xv, Yv = decisiontree(data) 66 | # Sorts the classes alphabetically, which makes them work as class_names 67 | classes.sort() 68 | 69 | # This creates an image of the decisiontree and exports it as a PDF. 70 | dot_data = tree.export_graphviz(clf, 71 | out_file=None, 72 | class_names=classes, 73 | feature_names=features[:-1], 74 | rounded=True, 75 | special_characters=True) 76 | graph = graphviz.Source(dot_data) 77 | graph.render('tree', view=True) 78 | 79 | 80 | # This calculates the average correctness for the dataset. 81 | def avgcost(data, n): 82 | totalcost = 0 83 | for i in range(n): 84 | clf, Xt, Yt, Xv, Yv = decisiontree(data) 85 | totalcost = totalcost + clf.score(Xv, Yv) 86 | return totalcost / n 87 | 88 | 89 | print('Average Correctness: ' + str(avgcost(data, 500))) 90 | 91 | 92 | # This calculates the usage (/importance) for all features in the decisiontree. 93 | def avgimportance(data, n, features): 94 | totalimportance = [] 95 | for i in range(n): 96 | clf, _, _, _, _ = decisiontree(data) 97 | importance = clf.feature_importances_ 98 | if len(totalimportance) == 0: 99 | totalimportance = importance 100 | else: 101 | totalimportance = [ 102 | x + y for x, 103 | y in zip( 104 | totalimportance, 105 | importance)] 106 | for i in range(len(importance)): 107 | print(str(features[i]) + ': ' + str(totalimportance[i] / n)) 108 | 109 | 110 | avgimportance(data, 500, features) 111 | -------------------------------------------------------------------------------- /DecisionTree_Independent.py: -------------------------------------------------------------------------------- 1 | from sklearn import tree 2 | import csv 3 | import numpy as np 4 | import graphviz 5 | import matplotlib.pylab as plt 6 | 7 | # Converts relative_learning_data.csv to a list 8 | data = [] 9 | with open('relative_learning_data.csv') as f: 10 | f = csv.reader(f, delimiter=',') 11 | for line in f: 12 | data.append(line) 13 | 14 | features = data[0] 15 | classes = [] 16 | data = data[1:] 17 | # ID's of columns that aren't usefull for decision trees. 18 | removed_features = [0, 2, 3, 4, 5] 19 | 20 | # Removes these columns from the feature names and the dataset. 21 | cl_features = [] 22 | cl_data = [] 23 | for i in range(len(features)): 24 | if i not in removed_features: 25 | cl_features.append(features[i]) 26 | for line in data: 27 | newline = [] 28 | for i in range(len(features)): 29 | if i not in removed_features: 30 | newline.append(line[i]) 31 | cl_data.append(newline) 32 | features = cl_features 33 | data = cl_data 34 | 35 | 36 | def decisiontree(data): 37 | Xt = [] 38 | Yt = [] 39 | Xv = [] 40 | Yv = [] 41 | # Finds all polygonID's, randomly adds 90% of ID's to the trainingset. 42 | polygonIDs = [] 43 | for line in data: 44 | if line[0] not in polygonIDs: 45 | polygonIDs.append(line[0]) 46 | np.random.shuffle(polygonIDs) 47 | trainingsize = 0.9 * len(polygonIDs) 48 | trainingIDs = polygonIDs[:int(trainingsize)] 49 | 50 | # Assigns each line in the list to the training/test dataset. 51 | # Also fills the tree species list (classes) with all different species. 52 | training = [] 53 | validation = [] 54 | for line in data: 55 | if line[-1] not in classes: 56 | classes.append(line[-1]) 57 | if line[0] in trainingIDs: 58 | training.append(line) 59 | else: 60 | validation.append(line) 61 | # Creates the X and Y parts of the training and test sets. 62 | for line in training: 63 | Xt.append(line[1:-1]) 64 | Yt.append(line[-1]) 65 | for line in validation: 66 | Xv.append(line[1:-1]) 67 | Yv.append(line[-1]) 68 | 69 | clf = tree.DecisionTreeClassifier(min_impurity_split=0.77) 70 | clf = clf.fit(Xt, Yt) 71 | return clf, Xt, Yt, Xv, Yv 72 | 73 | 74 | clf, Xt, Yt, Xv, Yv = decisiontree(data) 75 | 76 | # Sorts the classes alphabetically, which makes them work as class_names 77 | classes.sort() 78 | 79 | # This creates an image of the decisiontree and exports it as a PDF 80 | dot_data = tree.export_graphviz(clf, 81 | out_file=None, 82 | class_names=classes, 83 | feature_names=features[1:-1], 84 | rounded=True, 85 | special_characters=True) 86 | graph = graphviz.Source(dot_data) 87 | graph.render('tree', view=True) 88 | 89 | 90 | # This calculates the average correctness for the dataset. 91 | def avgcost(data, n): 92 | totalcost = 0 93 | for i in range(n): 94 | clf, Xt, Yt, Xv, Yv = decisiontree(data) 95 | totalcost = totalcost + clf.score(Xv, Yv) 96 | return totalcost / n 97 | 98 | 99 | print('Average Correctness: ' + str(avgcost(data, 500))) 100 | 101 | 102 | # This calculates the usage (/importance) for all features in the decisiontree. 103 | def avgimportance(data, n, features): 104 | totalimportance = [] 105 | for i in range(n): 106 | clf, _, _, _, _ = decisiontree(data) 107 | importance = clf.feature_importances_ 108 | if len(totalimportance) == 0: 109 | totalimportance = importance 110 | else: 111 | totalimportance = [ 112 | x + y for x, 113 | y in zip( 114 | totalimportance, 115 | importance)] 116 | for i in range(len(importance)): 117 | print(str(features[i]) + ': ' + str(totalimportance[i] / n)) 118 | 119 | 120 | avgimportance(data, 500, features) 121 | -------------------------------------------------------------------------------- /LAS_scripts/LAScanopyclassify.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | set outputfolder=LiDAR_data 3 | 4 | echo Running LAScanopy... (this may take a while) 5 | mkdir %outputfolder%\canopy 6 | for %%a in (%outputfolder%\classify\*.laz) do ( 7 | echo - Running canopy on %%~nxa... 8 | lascanopy -i LiDAR_data\classify\%%~nxa -names -lor LiDAR_data\ID_forest_grid_coords.csv -dns -p 5 10 25 50 75 90 -min -max -avg -std -ske -kur -qav -cov -c 2 4 10 50 -int_min -int_max -int_avg -int_qav -int_std -int_ske -int_kur -int_c 128 256 1024 -int_p 25 50 75 -o LiDAR_data\canopy\%%~na.csv 9 | ) 10 | 11 | PAUSE -------------------------------------------------------------------------------- /LAS_scripts/LASconversion.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | set filelist=26gn1 32fz2 33az1 33az2 32hn2 33cn1 33cn2 32hz2 33cz1 33cz2 28cn1 28cn2 28cz2 3 | set outputfolder=LiDAR_data2 4 | 5 | if not exist %outputfolder% ( 6 | echo LiDAR_data folder doesn't exist yet. Creating... 7 | mkdir %outputfolder% 8 | ) 9 | 10 | echo Stage 1: running LASmerge... (this may take a while) 11 | for %%a in (%filelist%) do ( 12 | if exist g%%a.laz ( 13 | if exist u%%a.laz ( 14 | echo - Merging %%a... 15 | lasmerge -i g%%a.laz u%%a.laz -o %outputfolder%\%%a.laz 16 | ) 17 | ) 18 | ) 19 | 20 | echo Stage 2: running LASindex... (this may take a while) 21 | for %%a in (%outputfolder%\*.laz) do ( 22 | echo - Indexing %%a... 23 | lasindex -i %%a 24 | ) 25 | 26 | echo Stage 3: running Tiling (this may take a while) 27 | mkdir %outputfolder%\tiling 28 | for %%a in (%outputfolder%\*.laz) do ( 29 | echo - Running on %%~na... 30 | lastile -i %%a -o %outputfolder%\tiling\%%~na 31 | ) 32 | 33 | echo Stage 4: running LASground_new (this may take a while) 34 | mkdir %outputfolder%\ground 35 | for %%a in (%outputfolder%\tiling\*.las) do ( 36 | echo - Running on %%~nxa... 37 | lasground_new -i %outputfolder%\tiling\%%~nxa -o %outputfolder%\ground\%%~na.laz -ignore_class 7 38 | ) 39 | 40 | echo Stage 5: running LASheight (this may take a while) 41 | mkdir %outputfolder%\height 42 | for %%a in (%outputfolder%\ground\*.laz) do ( 43 | echo - Running on %%~nxa... 44 | lasheight -i %outputfolder%\ground\%%~nxa -o %outputfolder%\height\%%~na.laz 45 | ) 46 | 47 | echo Conversion finished 48 | PAUSE 49 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Obsolete_code/add_canopy_files.py: -------------------------------------------------------------------------------- 1 | ''' 2 | add_canopy_files.py 3 | 4 | usage for map: add_canopy_files.py input_directory output.csv 5 | usage for single file: add_canopy_files.py input.csv output.csv 6 | 7 | Adds together all csv files generated from lascanopy and removes lines 8 | with missing values. Can also be used to remove lines with missing values 9 | from a single csv file. 10 | The map containing the csv files that have to be combined has to be 11 | present in the same directory as this file. 12 | ''' 13 | import os 14 | from sys import argv 15 | 16 | dirname = argv[1] 17 | try: 18 | outputfile = argv[2] 19 | except BaseException: 20 | print("no output file given, output saved to 'combined_canopy.csv'") 21 | outputfile = "combined_canopy.csv" 22 | 23 | lines = [] 24 | header = None 25 | if dirname.endswith('.csv'): 26 | with open(dirname, 'r') as f: 27 | for i, line in enumerate(f, 0): 28 | if i != 0: 29 | lines.append(line) 30 | else: 31 | header = line 32 | else: 33 | for file in os.listdir(dirname): 34 | if file.endswith(".csv"): 35 | with open(dirname + '/' + file, 'r') as f: 36 | for i, line in enumerate(f, 0): 37 | if i != 0: 38 | lines.append(line) 39 | else: 40 | header = line 41 | 42 | with open(outputfile, 'w') as f: 43 | f.write(header) 44 | for line in lines: 45 | line2 = line.split(',') 46 | if "-" not in line2: 47 | f.write(line) 48 | -------------------------------------------------------------------------------- /Obsolete_code/common-tree-filter.py: -------------------------------------------------------------------------------- 1 | import csv 2 | 3 | with open("learning_data.csv") as f1: 4 | with open("common_learning_data.csv", 'w') as f2: 5 | lines = csv.reader(f1, delimiter=";") 6 | data = [] 7 | for line in lines: 8 | data.append(line) 9 | seen = [] 10 | for line1 in data: 11 | if line1 == data[0]: 12 | f2.write(';'.join(line1) + '\n') 13 | c = line1[-1] 14 | if c not in seen: 15 | counter = 0 16 | for line2 in data: 17 | if line2[-1] == c: 18 | counter += 1 19 | if counter >= 50: 20 | seen.append(c) 21 | f2.write(';'.join(line1) + '\n') 22 | else: 23 | f2.write(';'.join(line1) + '\n') 24 | -------------------------------------------------------------------------------- /Obsolete_code/coordinate-filter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filters the RD-triangle coordinates from WKT to a min/max-x/y grid. 3 | 4 | Call from commandline with the name of the file that is to be converted 5 | as argument. This file should have a polygonID in the 2nd column (index 1) 6 | and the actual WKT in the 6th column (index 5). 7 | Returns a .csv file containing the polygon Id's and coordinate boundries. 8 | """ 9 | 10 | import numpy as np 11 | import sys 12 | 13 | # load data: 14 | filename = sys.argv[1] 15 | trees = np.loadtxt(open(filename, 'rb'), dtype=str, delimiter=';') 16 | 17 | # filter out the plot id and coordinates 18 | raw_polys = [] 19 | polys = [] 20 | for line in trees: 21 | raw_polys.append([line[1], line[5]]) 22 | for line in raw_polys: 23 | if line not in polys: 24 | polys.append(line) 25 | 26 | # the indice of the first coordinates in the polygon column 27 | # this is necessary because of a weird bug in which this indice is different 28 | # depending on your computer 29 | indice = polys[0][1].find('(') + 2 30 | 31 | # extract the coordinates from the plot string 32 | plots = [] 33 | for line in polys: 34 | X = [] 35 | Y = [] 36 | polyID = line[0] 37 | allCoords = line[1][indice:-2] 38 | coords = allCoords.split(',') 39 | for coord in coords: 40 | x, y = coord.split(' ') 41 | X.append(x) 42 | Y.append(y) 43 | plots.append([polyID, min(X), min(Y), max(X), max(Y)]) 44 | 45 | # write coordinates to file with corrosponding polygonID 46 | with open('ID_forest_grid_coords.csv', 'a') as result_file: 47 | result_file.write("polygonID min_x min_y max_x max_y\n") 48 | for line in plots: 49 | stringLine = ' '.join(line) 50 | result_file.write(stringLine + '\n') 51 | -------------------------------------------------------------------------------- /Obsolete_code/data-connector.py: -------------------------------------------------------------------------------- 1 | """ 2 | Connect the input and labels and print to file. 3 | 4 | Call from commandline with the names of the files that are to be combined 5 | as arguments. The first file should be the inputs and the second file 6 | the labels (which is dominant_trees.csv). 7 | Returns a .csv file containing the data required for machinelearning. 8 | """ 9 | 10 | import sys 11 | import csv 12 | 13 | # filename1 = "combined_canopy.csv" 14 | filename1 = sys.argv[1] 15 | # filename2 = "dominant_trees.csv" 16 | filename2 = sys.argv[2] 17 | 18 | with open(filename1) as f1: 19 | with open(filename2) as f2: 20 | with open('learning_data.csv', 'a') as result_file: 21 | f1 = csv.reader(f1, delimiter=';') 22 | f2 = csv.reader(f2, delimiter=';') 23 | latindict = {} 24 | # uses dominant_trees to find the name, and put it in a dictionary 25 | for line in f2: 26 | latindict[line[1]] = line[7] 27 | # use the dictionary to find the treename and add it to the line 28 | for line in f1: 29 | if line[0] == 'index': 30 | line.append('latinname') 31 | else: 32 | latinname = latindict[line[1]] 33 | line.append(latinname) 34 | outputline = '' 35 | for item in line: 36 | outputline = outputline + item 37 | if item != line[-1]: 38 | outputline = outputline + ';' 39 | result_file.write(outputline + '\n') 40 | -------------------------------------------------------------------------------- /Obsolete_code/dominant-filter.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import operator 3 | 4 | with open('forest_filter_result.csv') as f: 5 | with open('dominant_trees.csv', 'a') as result_file: 6 | convertedf = csv.reader(f, delimiter=';') 7 | # groups all lines in the csv file on polygonID 8 | sortedf = sorted(convertedf, key=operator.itemgetter(1)) 9 | current_polygon = '' 10 | dominant_tree = '' 11 | dominant_tree_ratio = 0 12 | for line in sortedf: 13 | # if a new polygonID is found, write the dominant tree of the 14 | # previous polygonID to the resultfile 15 | if line[1] != current_polygon: 16 | if current_polygon != '': 17 | for data in dominant_tree: 18 | rebuildline = rebuildline + data 19 | if data != dominant_tree[-1]: 20 | rebuildline = rebuildline + ';' 21 | result_file.write(rebuildline + '\n') 22 | current_polygon = line[1] 23 | dominant_tree_ratio = 0 24 | rebuildline = '' 25 | # if current quantity percentage is higher than highest 26 | # percentage for this polygonID, replace the old one 27 | if float(line[3]) > float(dominant_tree_ratio): 28 | dominant_tree = line 29 | dominant_tree_ratio = line[3] 30 | # adds the last dominant_tree to the result file 31 | for data in dominant_tree: 32 | rebuildline = rebuildline + data 33 | if data != dominant_tree[-1]: 34 | rebuildline = rebuildline + ';' 35 | result_file.write(rebuildline + '\n') 36 | -------------------------------------------------------------------------------- /Obsolete_code/forest-filter.py: -------------------------------------------------------------------------------- 1 | treespecies = [] 2 | with open('treespecies.txt') as b: 3 | for species in b: 4 | treespecies.append(species) 5 | 6 | with open('joined_db2.csv') as f: 7 | with open('forest_filter_result.csv', 'a') as result_file: 8 | for line in f: 9 | if 'Forest' in line: 10 | for species in treespecies: 11 | if species in line: 12 | result_file.write(line) 13 | -------------------------------------------------------------------------------- /Obsolete_code/forest_grid_uncertain.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import sys 3 | filename = sys.argv[1] 4 | 5 | plots_n = [] 6 | plots_e = [] 7 | plots_s = [] 8 | plots_w = [] 9 | plots_nw = [] 10 | plots_ne = [] 11 | plots_sw = [] 12 | plots_se = [] 13 | plots_alldirections = [] 14 | 15 | with open(filename) as f: 16 | convertedf = csv.reader(f, delimiter=';') 17 | # extract the coordinates from the plot string 18 | for line in convertedf: 19 | X = [] 20 | Y = [] 21 | # the uncertainty in meters for this line 22 | u = int(line[4]) 23 | polyID = line[1] 24 | indice = line[5].find('(') + 2 25 | allCoords = line[5][indice:-2] 26 | coords = allCoords.split(',') 27 | for coord in coords: 28 | x, y = coord.split(' ') 29 | X.append(x) 30 | Y.append(y) 31 | # adds the uncertainty in order to get the coordinates for when the 32 | # coordinates are incorrect and are actually in that direction 33 | plots_n.append([polyID, int(min(X)), int(min(Y)) + 34 | u, int(min(X)), int(min(Y)) + u]) 35 | plots_e.append([polyID, int(min(X)) + u, int(min(Y)), 36 | int(min(X)) + u, int(min(Y))]) 37 | plots_s.append([polyID, int(min(X)), int(min(Y)) - 38 | u, int(min(X)), int(min(Y)) - u]) 39 | plots_w.append([polyID, int(min(X)) - u, int(min(Y)), 40 | int(min(X)) - u, int(min(Y))]) 41 | plots_nw.append([polyID, 42 | int(min(X)) - u, 43 | int(min(Y)) + u, 44 | int(min(X)) - u, 45 | int(min(Y)) + u]) 46 | plots_ne.append([polyID, 47 | int(min(X)) + u, 48 | int(min(Y)) + u, 49 | int(min(X)) + u, 50 | int(min(Y)) + u]) 51 | plots_sw.append([polyID, 52 | int(min(X)) - u, 53 | int(min(Y)) - u, 54 | int(min(X)) - u, 55 | int(min(Y)) - u]) 56 | plots_se.append([polyID, 57 | int(min(X)) + u, 58 | int(min(Y)) - u, 59 | int(min(X)) + u, 60 | int(min(Y)) - u]) 61 | plots_alldirections.append([polyID, 62 | int(min(X)) - u, 63 | int(min(Y)) - u, 64 | int(min(X)) + u, 65 | int(min(Y)) + u]) 66 | 67 | # creates 9 files, for each uncertainty direction one 68 | with open('ID_uncertain_forest_north.csv', 'a') as result_file: 69 | result_file.write("polygonID min_x min_y max_x max_y\n") 70 | for line in plots_n: 71 | stringLine = '' 72 | for value in line: 73 | stringLine = stringLine + str(value) 74 | if value != line[-1]: 75 | stringLine = stringLine + ' ' 76 | result_file.write(stringLine + '\n') 77 | 78 | with open('ID_uncertain_forest_east.csv', 'a') as result_file: 79 | result_file.write("polygonID min_x min_y max_x max_y\n") 80 | for line in plots_e: 81 | stringLine = '' 82 | for value in line: 83 | stringLine = stringLine + str(value) 84 | if value != line[-1]: 85 | stringLine = stringLine + ' ' 86 | result_file.write(stringLine + '\n') 87 | 88 | with open('ID_uncertain_forest_south.csv', 'a') as result_file: 89 | result_file.write("polygonID min_x min_y max_x max_y\n") 90 | for line in plots_s: 91 | stringLine = '' 92 | for value in line: 93 | stringLine = stringLine + str(value) 94 | if value != line[-1]: 95 | stringLine = stringLine + ' ' 96 | result_file.write(stringLine + '\n') 97 | 98 | with open('ID_uncertain_forest_west.csv', 'a') as result_file: 99 | result_file.write("polygonID min_x min_y max_x max_y\n") 100 | for line in plots_w: 101 | stringLine = '' 102 | for value in line: 103 | stringLine = stringLine + str(value) 104 | if value != line[-1]: 105 | stringLine = stringLine + ' ' 106 | result_file.write(stringLine + '\n') 107 | 108 | with open('ID_uncertain_forest_northwest.csv', 'a') as result_file: 109 | result_file.write("polygonID min_x min_y max_x max_y\n") 110 | for line in plots_nw: 111 | stringLine = '' 112 | for value in line: 113 | stringLine = stringLine + str(value) 114 | if value != line[-1]: 115 | stringLine = stringLine + ' ' 116 | result_file.write(stringLine + '\n') 117 | 118 | with open('ID_uncertain_forest_northeast.csv', 'a') as result_file: 119 | result_file.write("polygonID min_x min_y max_x max_y\n") 120 | for line in plots_ne: 121 | stringLine = '' 122 | for value in line: 123 | stringLine = stringLine + str(value) 124 | if value != line[-1]: 125 | stringLine = stringLine + ' ' 126 | result_file.write(stringLine + '\n') 127 | 128 | with open('ID_uncertain_forest_southwest.csv', 'a') as result_file: 129 | result_file.write("polygonID min_x min_y max_x max_y\n") 130 | for line in plots_sw: 131 | stringLine = '' 132 | for value in line: 133 | stringLine = stringLine + str(value) 134 | if value != line[-1]: 135 | stringLine = stringLine + ' ' 136 | result_file.write(stringLine + '\n') 137 | 138 | with open('ID_uncertain_forest_southeast.csv', 'a') as result_file: 139 | result_file.write("polygonID min_x min_y max_x max_y\n") 140 | for line in plots_se: 141 | stringLine = '' 142 | for value in line: 143 | stringLine = stringLine + str(value) 144 | if value != line[-1]: 145 | stringLine = stringLine + ' ' 146 | result_file.write(stringLine + '\n') 147 | 148 | with open('ID_uncertain_forest_alldirections.csv', 'a') as result_file: 149 | result_file.write("polygonID min_x min_y max_x max_y\n") 150 | for line in plots_alldirections: 151 | stringLine = '' 152 | for value in line: 153 | stringLine = stringLine + str(value) 154 | if value != line[-1]: 155 | stringLine = stringLine + ' ' 156 | result_file.write(stringLine + '\n') 157 | -------------------------------------------------------------------------------- /Obsolete_code/growing_forest_grid.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import sys 3 | filename = sys.argv[1] 4 | 5 | growing_plots = [] 6 | 7 | with open(filename) as f: 8 | convertedf = csv.reader(f, delimiter=';') 9 | # extract the coordinates from the plot string 10 | for line in convertedf: 11 | X = [] 12 | Y = [] 13 | # the uncertainty in meters for this line 14 | polyID = line[1] 15 | indice = line[5].find('(') + 2 16 | allCoords = line[5][indice:-2] 17 | coords = allCoords.split(',') 18 | for coord in coords: 19 | x, y = coord.split(' ') 20 | X.append(x) 21 | Y.append(y) 22 | size = int(max(X)) - int(min(X)) 23 | types = [0, -size, size, -size * 2, size * 2] 24 | typecombinations = [] 25 | for xtype in types: 26 | for ytype in types: 27 | typecombinations.append([xtype, ytype]) 28 | 29 | for ux, uy in typecombinations: 30 | growing_plots.append([polyID, 31 | int(min(X)) + ux, 32 | int(min(Y)) + uy, 33 | int(max(X)) + ux, 34 | int(max(Y)) + uy]) 35 | 36 | # creates 9 files, for each uncertainty direction one 37 | 38 | 39 | with open('ID_growing_forest.csv', 'a') as result_file: 40 | result_file.write("polygonID min_x min_y max_x max_y\n") 41 | for line in growing_plots: 42 | stringLine = '' 43 | for value in line: 44 | stringLine = stringLine + str(value) 45 | if value != line[-1]: 46 | stringLine = stringLine + ' ' 47 | result_file.write(stringLine + '\n') 48 | -------------------------------------------------------------------------------- /Obsolete_code/outlier-filter.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import sys 3 | import math 4 | filename = sys.argv[1] 5 | 6 | with open(filename) as f: 7 | with open('cleaned_canopy.csv', 'a') as result_file: 8 | f = csv.reader(f, delimiter=',') 9 | o = [] 10 | polygonID = 0 11 | for line in f: 12 | if line[0] == 'index': 13 | outputline = '' 14 | for item in line: 15 | outputline = outputline + item + ';' 16 | result_file.write(outputline[:-1] + '\n') 17 | else: 18 | if line[1] != polygonID: 19 | polygonID = line[1] 20 | o = line 21 | outputline = '' 22 | for item in line: 23 | outputline = '' 24 | for item in line: 25 | outputline = outputline + item + ';' 26 | result_file.write(outputline[:-1] + '\n') 27 | else: 28 | write = True 29 | for i in range(6, len(o[6:])): 30 | if not abs(float(o[i]) - 31 | float(line[i])) <= max(0.2 * 32 | abs(float(o[i])), 2): 33 | write = False 34 | if write: 35 | outputline = '' 36 | for item in line: 37 | outputline = outputline + item + ';' 38 | result_file.write(outputline[:-1] + '\n') 39 | -------------------------------------------------------------------------------- /Obsolete_code/relativemax.py: -------------------------------------------------------------------------------- 1 | import csv 2 | 3 | with open("common_learning_data.csv") as f: 4 | with open("relative_learning_data.csv", 'w') as result_file: 5 | f = csv.reader(f, delimiter=";") 6 | data = [] 7 | for line in f: 8 | data.append(line) 9 | result_file.write(','.join(data[0]) + '\n') 10 | for line in data[1:]: 11 | newline = '' 12 | count = 0 13 | locations = [13, 14, 15, 16, 17, 18] 14 | for element in line: 15 | if count in locations: 16 | newline = newline + \ 17 | str(float(element) / float(line[7])) + ',' 18 | else: 19 | newline = newline + str(element) + ',' 20 | count += 1 21 | result_file.write(newline[:-1] + '\n') 22 | -------------------------------------------------------------------------------- /Obsolete_code/tree_indexing.py: -------------------------------------------------------------------------------- 1 | ''' 2 | tree_indexing.py 3 | 4 | Usage: tree_indexing inputfile.csv outputfile.csv 5 | 6 | Indexes tree species by giving each species an unique number. 7 | ''' 8 | import sys 9 | 10 | # Get user input: 11 | filename = sys.argv[1] 12 | output_file = sys.argv[2] 13 | 14 | list = [] 15 | 16 | with open(filename, 'r') as f: 17 | for i, line in enumerate(f, 0): 18 | if i > 0: 19 | species = line.split(';')[-1].rstrip('\n') 20 | 21 | if species not in list: 22 | list.append(species) 23 | f.close() 24 | 25 | with open(output_file, 'w') as f2: 26 | with open(filename, 'r') as f: 27 | for line in f: 28 | species = line.split(';')[-1].rstrip('\n') 29 | if species in list: 30 | write_str = line.rstrip('\n') + ';' + \ 31 | str(list.index(species)) + '\n' 32 | else: 33 | write_str = line.rstrip('\n') + '; species_index' + '\n' 34 | f2.write(write_str) 35 | -------------------------------------------------------------------------------- /SVM LiDAR tree classifier.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true, 8 | "scrolled": true 9 | }, 10 | "outputs": [], 11 | "source": [ 12 | "import numpy as np\n", 13 | "import csv\n", 14 | "from sklearn import svm\n", 15 | "import itertools\n", 16 | "import time\n", 17 | "from multiprocessing.dummy import Pool as ThreadPool " 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": { 24 | "collapsed": true 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "filename = \"indexed_learning_data.csv\"\n", 29 | "data = []\n", 30 | "with open(filename) as f:\n", 31 | " r = csv.reader(f, delimiter=',')\n", 32 | " for line in r:\n", 33 | " data.append(line)\n", 34 | "data = np.array(data[1:])" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 3, 40 | "metadata": { 41 | "scrolled": true 42 | }, 43 | "outputs": [ 44 | { 45 | "name": "stdout", 46 | "output_type": "stream", 47 | "text": [ 48 | "[[ 4.86460000e+05 2.56500000e+01 5.28900000e+01 ..., 4.91500000e+01\n", 49 | " 5.11900000e+01 4.90800000e+03]\n", 50 | " [ 4.58697000e+05 3.31900000e+01 4.83000000e+01 ..., 4.37300000e+01\n", 51 | " 4.65400000e+01 1.19000000e+02]\n", 52 | " [ 4.52080000e+05 3.51800000e+01 6.27800000e+01 ..., 5.84300000e+01\n", 53 | " 6.04300000e+01 1.09400000e+03]\n", 54 | " ..., \n", 55 | " [ 4.58486000e+05 3.03500000e+01 3.86200000e+01 ..., 3.46100000e+01\n", 56 | " 3.60800000e+01 1.32000000e+02]\n", 57 | " [ 4.51165000e+05 4.01000000e+01 5.18900000e+01 ..., 4.98000000e+01\n", 58 | " 5.10700000e+01 5.60000000e+01]\n", 59 | " [ 4.50080000e+05 4.27800000e+01 6.88000000e+01 ..., 6.17500000e+01\n", 60 | " 6.42900000e+01 5.43000000e+02]]\n", 61 | "[0 1 0 ..., 1 6 1]\n", 62 | "[[ 4.59704000e+05 2.89900000e+01 4.97900000e+01 ..., 4.78500000e+01\n", 63 | " 4.91900000e+01 5.80000000e+01]\n", 64 | " [ 4.59690000e+05 2.62700000e+01 4.41700000e+01 ..., 4.14800000e+01\n", 65 | " 4.24000000e+01 1.60200000e+03]\n", 66 | " [ 4.61350000e+05 6.25100000e+01 8.83500000e+01 ..., 7.71100000e+01\n", 67 | " 8.41000000e+01 0.00000000e+00]\n", 68 | " ..., \n", 69 | " [ 4.54786000e+05 2.64300000e+01 4.64300000e+01 ..., 4.43100000e+01\n", 70 | " 4.48800000e+01 6.10000000e+01]\n", 71 | " [ 4.60047000e+05 2.29400000e+01 2.84000000e+01 ..., 2.73300000e+01\n", 72 | " 2.80000000e+01 2.30000000e+01]\n", 73 | " [ 4.55240000e+05 7.64000000e+00 2.30900000e+01 ..., 1.83100000e+01\n", 74 | " 2.01000000e+01 1.32200000e+03]]\n", 75 | "[14 1 4 3 4 4 1 12 1 9 4 1 2 2 4 4 1 9 1 21 0 10 9 13 1\n", 76 | " 1 10 1 1 9 1 15 1 12 2 9 12 2 0 1 4 17 1 19 2 22 1 1 0 2\n", 77 | " 2 2 12 1 1 1 1 3 2 6 1 2 1 2 1 2 2 0 9 4 1 2 1 2 6\n", 78 | " 4 2 4 11 3 1 1 1 1 1 3 1 2 2 2 1 1 2 1 1 1 2 4 1 13\n", 79 | " 2 1 6 1 1 2 12 1 7 3 4 3 2 17 4 2 3 1 13 1 12 14 1 1 13\n", 80 | " 4 1 4 0 2 13 11 2 3 2 20 1 4 1 2 1 17 1 9 2 2 1 1 2 3\n", 81 | " 2 1 9 15 2 3 1 4 1 2 2 10 4 1 10 11 14 1 14 3 10 1 16 1 9\n", 82 | " 2 2 10 10 6 1 4 1 1 2 2 10 10 9 1 2 3 4 9 4 4 1 15 1 2\n", 83 | " 1 2 1 5 2 3 1 2 2 1 4 3 13 15 20 0 4 2 2 1 0 1 1 1 2\n", 84 | " 5 2 2 1 17 1 2 1 2 1 1 3 6 1 4 1 2 1 2 2 0 1 1 3 1\n", 85 | " 1 0 2 9 11 3 9 1 14 3 9 1 13 4 3 2 10 4 12 4 7 2 4 1 1\n", 86 | " 10 1 11 14 1 17 1 2 9 16 2 3 10 2 1 3 2 15 9 1 1 9 14 1 18\n", 87 | " 9 2 1 1 1 1 1 10 1 4 13 9 1 15 2 0 7 4 2 9 18 1 2 2 4\n", 88 | " 9 11 1 2 8 4 1 1 1 2 1 4 0 16 14 2 4 2 1 2 9 1 3 13 2\n", 89 | " 3 14 10 2 10 2 3 3 15 1 1 1 1 1 9 1 0 17 14 2 1 10 11 1 1\n", 90 | " 1 3 9 1 9 2 1 1 2 2 1 12 1 2 0 1 1 13 1 3 5 1 1 3 14\n", 91 | " 2 17 1 1 2 14 8 3 14 10 2 13 4 2 2 0 1 4 15 1 1 9 1 2 5\n", 92 | " 9 1 2 1 9 2 6 0 17 4 1 4 1 1 0 2 1 12 1 2 12 1 13 1 15\n", 93 | " 1 2 5 1 1 1 1 1 1 13 1 16 9 2 4 1 1 1 2 2 0 1 1 1 14\n", 94 | " 1 2 9 10 2 1 3 1 9 9 17 4 1 2 2 1 13 6 13 2 2 2 13 14 6\n", 95 | " 3 1 9 2 1 4 1 5 1 1 1 3 1 14 19 2 4 1 14 1 0 0 2 14 2\n", 96 | " 4 18 10 2 1 1 2 3 11 13 3 1 3 1 3 1 1 4 1 1 1 10 4 10 4\n", 97 | " 4 17 2 13 1 1 2 4 2 12 6 1 1 2 2 3 1 2 0 3 1 1 1 2 15\n", 98 | " 1 4 12 9 9 0 21 1 2 2 3 4 17 3 0 1 5 9 2 1 4 1 12 9 2\n", 99 | " 3 2 14 2 2 0 2 1 2 6 1 1 1 4 1 1 9 4 2 2 3 2 1 5 11\n", 100 | " 1 1 1 1 0 9 1 2 1 13 2 1 1 2 2 2 4 1 13 1 4 2 2 1 3\n", 101 | " 1 2 10 1 3 14 1 1 5 2 1 2 11 3 1 1 1 20 1 1 14 17 2 20 10\n", 102 | " 0 3 1 2 2 2 0 1 17 4 4 1 1 3 4 1 3 4 4 6 17 4 2 1 1\n", 103 | " 2 9 2 1 1 2 2 3 1 1 9 2 14 9 1 4 3 2 9 3 2 4 3 6 1\n", 104 | " 13 5 2 11 10 2 10 6 15 4 20 1 1 2 0 1 9 1 1 1 4 7 10 1 9\n", 105 | " 0 1 14 1 13 1 5 2 1 1 2 4 9 13 11 8 16 1]\n" 106 | ] 107 | } 108 | ], 109 | "source": [ 110 | "def validation_split(data, ratio):\n", 111 | " shuffleData = np.copy(data)\n", 112 | " np.random.shuffle(shuffleData)\n", 113 | " border = int(ratio*len(shuffleData))\n", 114 | " trainSet = shuffleData[:border]\n", 115 | " valSet = shuffleData[border:]\n", 116 | " Xt = trainSet[:,5:-2].astype(float)\n", 117 | " Yt = trainSet[:,-1].astype(int)\n", 118 | " Xv = valSet[:,5:-2].astype(float)\n", 119 | " Yv = valSet[:,-1].astype(int)\n", 120 | " Xt = np.delete(Xt, 11, 1)\n", 121 | " Xv = np.delete(Xv, 11, 1)\n", 122 | " return Xt, Yt, Xv, Yv\n", 123 | "\n", 124 | "Xt, Yt, Xv, Yv = validation_split(data, 0.8)\n", 125 | "print (Xt)\n", 126 | "print (Yt)\n", 127 | "print (Xv)\n", 128 | "print (Yv)" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 4, 134 | "metadata": { 135 | "collapsed": true 136 | }, 137 | "outputs": [], 138 | "source": [ 139 | "def average_accuracy(data, loops = 1000, c = 64, kernelType = 'rbf'):\n", 140 | " score_list = []\n", 141 | " for i in range(loops):\n", 142 | " Xt, Yt, Xv, Yv = validation_split(data, 0.8)\n", 143 | " svc = svm.SVC(kernel = kernelType, C = c)\n", 144 | " svc = svc.fit(Xt, Yt)\n", 145 | " score_list.append(svc.score(Xv, Yv))\n", 146 | " return np.mean(np.array(score_list))" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": 58, 152 | "metadata": { 153 | "scrolled": true 154 | }, 155 | "outputs": [ 156 | { 157 | "name": "stdout", 158 | "output_type": "stream", 159 | "text": [ 160 | "0.323880208333 -5\n", 161 | "0.322135416667 -4\n", 162 | "0.319622395833 -3\n", 163 | "0.322708333333 -2\n", 164 | "0.336328125 -1\n", 165 | "0.3564453125 0\n", 166 | "0.372083333333 1\n", 167 | "0.369388020833 2\n", 168 | "0.372669270833 3\n", 169 | "0.374283854167 4\n", 170 | "0.374453125 5\n", 171 | "0.375208333333 6\n", 172 | "0.3726171875 7\n", 173 | "0.374635416667 8\n", 174 | "0.373684895833 9\n", 175 | "0.374583333333 10\n", 176 | "0.374296875 11\n", 177 | "0.3735546875 12\n", 178 | "0.373346354167 13\n", 179 | "0.371731770833 14\n", 180 | "0.375208333333 6\n" 181 | ] 182 | } 183 | ], 184 | "source": [ 185 | "bestAcc = 0.0\n", 186 | "bestI = -6\n", 187 | "for i in range(-5,15):\n", 188 | " acc = average_accuracy(data, loops = 100, c = (2**i))\n", 189 | " print (acc, i)\n", 190 | " if acc > bestAcc:\n", 191 | " bestAcc = acc\n", 192 | " bestI = i\n", 193 | "print (bestAcc, bestI)" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 60, 199 | "metadata": { 200 | "scrolled": false 201 | }, 202 | "outputs": [ 203 | { 204 | "name": "stdout", 205 | "output_type": "stream", 206 | "text": [ 207 | "0.373307291667\n" 208 | ] 209 | } 210 | ], 211 | "source": [ 212 | "print (average_accuracy(data, loops = 100))" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 43, 218 | "metadata": { 219 | "collapsed": true 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "filename = \"indexed_learning_val.csv\"\n", 224 | "new_data = []\n", 225 | "with open(filename) as f:\n", 226 | " r = csv.reader(f, delimiter=';')\n", 227 | " for line in r:\n", 228 | " new_data.append(line)\n", 229 | "new_data = np.array(new_data[1:])" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": 56, 235 | "metadata": {}, 236 | "outputs": [ 237 | { 238 | "data": { 239 | "text/plain": [ 240 | "0.17801047120418848" 241 | ] 242 | }, 243 | "execution_count": 56, 244 | "metadata": {}, 245 | "output_type": "execute_result" 246 | } 247 | ], 248 | "source": [ 249 | "svc = svm.SVC(C = 32)\n", 250 | "X = data[:,5:-2].astype(float)\n", 251 | "Y = data[:,-1].astype(int)\n", 252 | "new_X = new_data[:,5:-2].astype(float)\n", 253 | "new_Y = new_data[:,-1].astype(int)\n", 254 | "X = np.delete(X, 11, 1)\n", 255 | "new_X = np.delete(new_X, 11, 1)\n", 256 | "svc = svc.fit(X, Y)\n", 257 | "svc.score(new_X, new_Y)" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 4, 263 | "metadata": { 264 | "collapsed": true 265 | }, 266 | "outputs": [], 267 | "source": [ 268 | "def my_function(subset, data, loops = 100, c = 64, kernelType = 'rbf'):\n", 269 | " times = []\n", 270 | " accuracy = []\n", 271 | " for i in range(loops):\n", 272 | " start = time.time()\n", 273 | " Xt, Yt, Xv, Yv = validation_split(data, 0.8)\n", 274 | " varXt = Xt[:, subset]\n", 275 | " varXv = Xv[:, subset]\n", 276 | " svc = svm.SVC(kernel = kernelType, C = c)\n", 277 | " svc = svc.fit(varXt, Yt)\n", 278 | " accuracy.append(svc.score(varXv, Yv))\n", 279 | " stop = time.time()\n", 280 | " times.append(stop-start)\n", 281 | " score = np.mean(np.array(accuracy))/(np.mean(times))\n", 282 | " return (score, subset)\n", 283 | "\n", 284 | "def optimal_feature_finder(data, loops = 100, c = 64, kernelType = 'rbf'):\n", 285 | " Xt, Yt, Xv, Yv = validation_split(data, 0.8)\n", 286 | " indices = range(len(Xt[0]))\n", 287 | " end = []\n", 288 | " previous_best = (0, [])\n", 289 | " for L in range(1, len(indices)+1):\n", 290 | " print (\"Progress: \", L)\n", 291 | " subsets = [previous_best[1]+[i] for i in indices if i not in previous_best[1]]\n", 292 | " pool = ThreadPool(8) \n", 293 | " results = pool.starmap(my_function, zip(subsets, itertools.repeat(data), itertools.repeat(loops), itertools.repeat(c), itertools.repeat(kernelType)))\n", 294 | " pool.close() \n", 295 | " pool.join() \n", 296 | " end.append(max(results,key=lambda item:item[0]))\n", 297 | " previous_best = end[-1]\n", 298 | " return end" 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": 5, 304 | "metadata": { 305 | "scrolled": true 306 | }, 307 | "outputs": [ 308 | { 309 | "name": "stdout", 310 | "output_type": "stream", 311 | "text": [ 312 | "Progress: 1\n", 313 | "Progress: 2\n", 314 | "Progress: 3\n", 315 | "Progress: 4\n", 316 | "Progress: 5\n", 317 | "Progress: 6\n", 318 | "Progress: 7\n", 319 | "Progress: 8\n", 320 | "Progress: 9\n", 321 | "Progress: 10\n", 322 | "Progress: 11\n", 323 | "Progress: 12\n", 324 | "Progress: 13\n", 325 | "Progress: 14\n", 326 | "[(0.4837377408931805, [1]), (0.7089594394280162, [1, 9]), (0.84617444999562041, [1, 9, 5]), (0.88017112291544986, [1, 9, 5, 6]), (0.91377201784258133, [1, 9, 5, 6, 12]), (0.58883001526239009, [1, 9, 5, 6, 12, 7]), (0.58487851867473883, [1, 9, 5, 6, 12, 7, 8]), (0.57239530525329019, [1, 9, 5, 6, 12, 7, 8, 10]), (0.58620613020391132, [1, 9, 5, 6, 12, 7, 8, 10, 3]), (0.58784942804896123, [1, 9, 5, 6, 12, 7, 8, 10, 3, 2]), (0.60905141052953582, [1, 9, 5, 6, 12, 7, 8, 10, 3, 2, 11]), (0.28324263952314044, [1, 9, 5, 6, 12, 7, 8, 10, 3, 2, 11, 0]), (0.17763683738756716, [1, 9, 5, 6, 12, 7, 8, 10, 3, 2, 11, 0, 13]), (0.10529244206247786, [1, 9, 5, 6, 12, 7, 8, 10, 3, 2, 11, 0, 13, 4])]\n", 327 | "(0.91377201784258133, [1, 9, 5, 6, 12])\n" 328 | ] 329 | } 330 | ], 331 | "source": [ 332 | "feature_scores = optimal_feature_finder(data, loops=500)\n", 333 | "print (feature_scores)\n", 334 | "print (max(feature_scores,key=lambda item:item[0]))" 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": null, 340 | "metadata": { 341 | "collapsed": true 342 | }, 343 | "outputs": [], 344 | "source": [] 345 | } 346 | ], 347 | "metadata": { 348 | "kernelspec": { 349 | "display_name": "Python 3", 350 | "language": "python", 351 | "name": "python3" 352 | }, 353 | "language_info": { 354 | "codemirror_mode": { 355 | "name": "ipython", 356 | "version": 3 357 | }, 358 | "file_extension": ".py", 359 | "mimetype": "text/x-python", 360 | "name": "python", 361 | "nbconvert_exporter": "python", 362 | "pygments_lexer": "ipython3", 363 | "version": "3.6.3" 364 | } 365 | }, 366 | "nbformat": 4, 367 | "nbformat_minor": 2 368 | } 369 | -------------------------------------------------------------------------------- /data_processing.py: -------------------------------------------------------------------------------- 1 | ''' 2 | data_processing.py 3 | 4 | usage for map: data_processing.py input_directory output.csv 5 | usage for single file: data_processing.py input.csv output.csv 6 | 7 | Adds together all csv files generated from lascanopy and removes lines 8 | with missing values. Can also be used to remove lines with missing values 9 | from a single csv file. 10 | The map containing the csv files that have to be combined has to be 11 | present in the same directory as this file. 12 | Further uses dominant_trees.csv to create data suitable for learning 13 | algorithms. 14 | ''' 15 | import os 16 | from sys import argv 17 | import sys 18 | import csv 19 | 20 | # Aquire source and destination from commandline. 21 | dirname = argv[1] 22 | try: 23 | outputfile = argv[2] 24 | except BaseException: 25 | print("no output file given, output saved to 'combined_canopy.csv'") 26 | outputfile = "combined_canopy.csv" 27 | 28 | # Extract and store single header and every other line from all files. 29 | lines = [] 30 | header = None 31 | if dirname.endswith('.csv'): 32 | with open(dirname, 'r') as f: 33 | for i, line in enumerate(f, 0): 34 | if i != 0: 35 | lines.append(line) 36 | else: 37 | header = line 38 | else: 39 | for file in os.listdir(dirname): 40 | if file.endswith(".csv"): 41 | with open(dirname + '/' + file, 'r') as f: 42 | for i, line in enumerate(f, 0): 43 | if i != 0: 44 | lines.append(line) 45 | else: 46 | header = line 47 | 48 | # Remove NULL lines and write to output file. 49 | with open(outputfile, 'w') as f: 50 | f.write(header) 51 | for line in lines: 52 | line2 = line.split(',') 53 | if "-" not in line2: 54 | f.write(line) 55 | 56 | # Remove any polygons generated with growing forrest who's values deviate too 57 | # much from the original polygon. 58 | with open(outputfile) as f: 59 | with open('cleaned_canopy.csv', 'a') as result_file: 60 | f = csv.reader(f, delimiter=',') 61 | o = [] 62 | polygonID = 0 63 | for line in f: 64 | if line[0] == 'index': 65 | outputline = '' 66 | for item in line: 67 | outputline = outputline + item + ',' 68 | result_file.write(outputline[:-1] + '\n') 69 | else: 70 | if line[1] != polygonID: 71 | polygonID = line[1] 72 | o = line 73 | outputline = '' 74 | for item in line: 75 | outputline = '' 76 | for item in line: 77 | outputline = outputline + item + ',' 78 | result_file.write(outputline[:-1] + '\n') 79 | else: 80 | write = True 81 | for i in range(6, len(o[6:])): 82 | if not abs(float(o[i]) - 83 | float(line[i])) <= max(0.2 * 84 | abs(float(o[i])), 2): 85 | write = False 86 | if write: 87 | outputline = '' 88 | for item in line: 89 | outputline = outputline + item + ',' 90 | 91 | result_file.write(outputline[:-1] + '\n') 92 | 93 | # Combine cleaned LAScanopy data with corresponding vegetation database labels. 94 | with open('cleaned_canopy.csv') as f1: 95 | with open('dominant_trees.csv') as f2: 96 | with open('learning_data.csv', 'a') as result_file: 97 | f1 = csv.reader(f1, delimiter=',') 98 | f2 = csv.reader(f2, delimiter=',') 99 | latindict = {} 100 | # Uses dominant_trees to find the name, and put it in a dictionary. 101 | for line in f2: 102 | latindict[line[1]] = line[-1] 103 | # Use the dictionary to find the treename and add it to the line. 104 | for line in f1: 105 | if line[0] == 'index': 106 | line.append('latinname') 107 | else: 108 | latinname = latindict[line[1]] 109 | line.append(latinname) 110 | outputline = '' 111 | for item in line: 112 | outputline = outputline + item 113 | if item != line[-1]: 114 | outputline = outputline + ',' 115 | result_file.write(outputline + '\n') 116 | 117 | # Adds numerical value to each datapoint to indicate species, meant for use 118 | # with SVM or Neural Network 119 | lis = [] 120 | with open('learning_data.csv', 'r') as f: 121 | for i, line in enumerate(f, 0): 122 | if i > 0: 123 | species = line.split(',')[-1].rstrip('\n') 124 | 125 | if species not in lis: 126 | lis.append(species) 127 | with open('indexed_learning_data.csv', 'w') as f2: 128 | with open('learning_data.csv', 'r') as f: 129 | for line in f: 130 | species = line.split(',')[-1].rstrip('\n') 131 | if species in lis: 132 | write_str = line.rstrip('\n') + ',' + \ 133 | str(lis.index(species)) + '\n' 134 | else: 135 | write_str = line.rstrip('\n') + ', species_index' + '\n' 136 | f2.write(write_str) 137 | 138 | # For every tree species, add it to the csv file if that species has at least 139 | # 50 rows of data in the dataset. 140 | with open("indexed_learning_data.csv") as f1: 141 | with open("common_learning_data.csv", 'w') as f2: 142 | lines = csv.reader(f1, delimiter=",") 143 | data = [] 144 | for line in lines: 145 | data.append(line) 146 | seen = [] 147 | for line1 in data: 148 | if line1 == data[0]: 149 | f2.write(','.join(line1) + '\n') 150 | c = line1[-1] 151 | if c not in seen: 152 | counter = 0 153 | for line2 in data: 154 | if line2[-1] == c: 155 | counter += 1 156 | if counter >= 50: 157 | seen.append(c) 158 | f2.write(','.join(line1) + '\n') 159 | else: 160 | f2.write(','.join(line1) + '\n') 161 | 162 | # Transform the P05 to P90 variables to a percentage of the max, to remove 163 | # the differences between smaller trees of the same species. 164 | with open("common_learning_data.csv") as f: 165 | with open("relative_learning_data.csv", 'w') as result_file: 166 | f = csv.reader(f, delimiter=",") 167 | data = [] 168 | for line in f: 169 | data.append(line) 170 | result_file.write(','.join(data[0]) + '\n') 171 | for line in data[1:]: 172 | newline = '' 173 | count = 0 174 | locations = [13, 14, 15, 16, 17, 18] 175 | for element in line: 176 | if count in locations: 177 | newline = newline + \ 178 | str(float(element) / float(line[7])) + ',' 179 | else: 180 | newline = newline + str(element) + ',' 181 | count += 1 182 | result_file.write(newline[:-1] + '\n') 183 | -------------------------------------------------------------------------------- /python-requirements.txt: -------------------------------------------------------------------------------- 1 | pycodestyle 2 | autopep8 3 | sklearn 4 | numpy 5 | graphviz 6 | matplotlib 7 | graphviz 8 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/JorisJoBo/treedentifier.svg?branch=master)](https://travis-ci.org/JorisJoBo/treedentifier) 2 | [![HitCount](http://hits.dwyl.io/JorisJoBo/treedentifier.svg)](http://hits.dwyl.io/JorisJoBo/treedentifier) 3 | 4 | # Treedentifier 5 | Treedentifier allows you to identify trees in your country. The algorithm can learn 6 | what the characteristics of a specific tree species are and detect them when 7 | properly learned. For the development of this algorithm we use LiDAR data of the Netherlands. 8 | 9 | ## Setup 10 | ### Software 11 | After cloning the project, please install the required python packages first. 12 | For this project we use Python 3.6. If you don't have python already, you can 13 | download it [here](https://www.python.org/downloads/release/python-364/). 14 | To install the required packages, just execute: 15 | ``` 16 | pip install -r python-requirements.txt 17 | ``` 18 | 19 | To process the LAZ files in which most LiDAR datasets are formatted, you can 20 | use LAStools. You can download LAStools [here](https://rapidlasso.com/lastools/). 21 | Please note that LAStools is not free. You can use LAStools without paying but it will 22 | distort your data and insert black diagonals in LAScanopy. 23 | 24 | ### Datasets 25 | After that, you'll need a vegetation dataset of your area, as well as a filtered 26 | and a unfiltered LiDAR dataset. We used a modified version of the [Dutch Vegetation Database](https://www.gbif.org/dataset/740df67d-5663-41a2-9d12-33ec33876c47) 27 | as our vegetation dataset and as LiDAR data we used [filtered AHN2 data](http://geodata.nationaalgeoregister.nl/ahn2/atom/ahn2_gefilterd.xml) 28 | and [filtered out AHN2 data](http://geodata.nationaalgeoregister.nl/ahn2/atom/ahn2_uitgefilterd.xml). 29 | 30 | ### Processing data 31 | To be able to use this data in our algorithm, we need to do some preprocessing first. 32 | 33 | #### Preparing the vegetation datasets 34 | The vegetation dataset contains a lot of different plants. Since we are only 35 | interested in trees, we need to remove all non-trees. In the file treespecies, 36 | you can find a list of trees extracted from the Dutch Vegetation Database. 37 | 38 | To extract all dominant trees from the vegetation dataset, you can run 'tree_processing.py'. 39 | ``` 40 | python tree_processing.py 41 | ``` 42 | Please note that this program looks for the 'joined_db2.csv' file, so you'll have to name your 43 | vegetation dataset like this. This program will create a file called 'dominant_trees.csv' which will be used in a later step. 44 | 45 | Now you can run all LAStools you want. We set up a default batch script which will 46 | basically work for everyone. Just run the batch file and you should be fine. You'll need the output file 'ID_growing_forest' of the 'tree_processing.py' step, for the last LAStools step 'LAScanopy'. 47 | 48 | When done with LAStools run 'data_processing.py' to combine the dominant trees 49 | and the result matrix of LAScanopy and to remove any unnecessary data.. 50 | ``` 51 | python data_processing.py input_directory output.csv 52 | ``` 53 | (The 'dominant_trees.csv' file in used by the program above is one of the output files of the earlier executed 54 | 'tree_processing.py' step). 55 | 56 | Now you're done with the setup. You now should be able to run the algorithms without any problems. 57 | 58 | ## Machine learning algorithms 59 | In this project we created python algorithms based on two machine learning principles. 60 | You can choose between decision trees and support vector machines. It is recommended using the decision tree algorithm. 61 | During tests decision trees showed better accuracy as well as efficiency (speed) than support vector machines. 62 | 63 | ### Running the decision tree algorithms 64 | When running the decision tree algorithm, you can take one of the following versions: 65 | - Dependent 66 | - independent 67 | 68 | #### Dependent decision tree algorithm 69 | This runs the algorithm on the same forestry data as the training data is originating from. 70 | 71 | #### Dependent decision tree algorithm 72 | This runs the algorithm on forest data from other geographic places than the training data is originating from. 73 | 74 | ### Running the support vector machine algorithms 75 | This algorithm is provided as a Jupyter Notebook ('SVM LiDAR tree classifier.ipynb'). 76 | To run this program you'll have to install Jupyter/Ipython Notebook. 77 | 78 | ## Contributing 79 | ### Testing 80 | You have to test your code first before pushing. Pushing while failing to do so, may result in failed builds. 81 | For testing you should use [PyCodeStyle](https://github.com/PyCQA/pycodestyle). 82 | You can test your code with the following command: 83 | ``` 84 | pycodestyle --show-source --show-pep8 ./ 85 | ``` 86 | If you want, you can let [autopep8](https://github.com/hhatto/autopep8) fix your issues with the following command: 87 | ``` 88 | autopep8 --in-place --aggressive --aggressive -r ./ 89 | ``` 90 | 91 | ## Contributors 92 | The core contributors are: 93 | - Patrick Spaans 94 | - Geerten Rijsdijk 95 | - Thom Visser 96 | - Joris Jonkers Both 97 | -------------------------------------------------------------------------------- /tree_processing.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import operator 3 | 4 | # Converts the tree species file to a list. 5 | treespecies = [] 6 | with open('treespecies.txt') as b: 7 | for species in b: 8 | treespecies.append(species) 9 | 10 | # Filters out all forest data from the dataset into a csv file. 11 | with open('joined_db2.csv') as f: 12 | with open('forest_filter_result.csv', 'a') as result_file: 13 | for line in f: 14 | if 'Forest' in line: 15 | for species in treespecies: 16 | if species in line: 17 | result_file.write(line) 18 | 19 | 20 | with open('forest_filter_result.csv') as f: 21 | with open('dominant_trees.csv', 'a') as result_file: 22 | convertedf = csv.reader(f, delimiter=';') 23 | # Groups all lines in the csv file on polygonID. 24 | sortedf = sorted(convertedf, key=operator.itemgetter(1)) 25 | current_polygon = '' 26 | dominant_tree = '' 27 | dominant_tree_ratio = 0 28 | for line in sortedf: 29 | # If a new polygonID is found, write the dominant tree of the 30 | # previous polygonID to the resultfile. 31 | if line[1] != current_polygon: 32 | if current_polygon != '': 33 | for data in dominant_tree: 34 | rebuildline = rebuildline + data 35 | if data != dominant_tree[-1]: 36 | rebuildline = rebuildline + ',' 37 | result_file.write(rebuildline + '\n') 38 | current_polygon = line[1] 39 | dominant_tree_ratio = 0 40 | rebuildline = '' 41 | # If current quantity percentage is higher than highest 42 | # percentage for this polygonID, replace the old one. 43 | if float(line[3]) > float(dominant_tree_ratio): 44 | dominant_tree = line 45 | dominant_tree_ratio = line[3] 46 | # Adds the last dominant_tree to the result file. 47 | for data in dominant_tree: 48 | rebuildline = rebuildline + data 49 | if data != dominant_tree[-1]: 50 | rebuildline = rebuildline + ',' 51 | result_file.write(rebuildline + '\n') 52 | 53 | # Adds two layers of polygons around a polygon as new polygon locations. 54 | growing_plots = [] 55 | with open('dominant_trees.csv') as f: 56 | convertedf = csv.reader(f, delimiter=',') 57 | # Retrieves the polygon coordinates from the dataset. 58 | for line in convertedf: 59 | X = [] 60 | Y = [] 61 | polyID = line[1] 62 | indice = line[5].find('(') + 2 63 | allCoords = line[5][indice:-2] 64 | coords = allCoords.split(',') 65 | for coord in coords: 66 | x, y = coord.split(' ') 67 | X.append(x) 68 | Y.append(y) 69 | size = int(max(X)) - int(min(X)) 70 | 71 | # Adds 5x5 polygons with the original in the center as polygon 72 | # coordinates. 73 | types = [0, -size, size, -size * 2, size * 2] 74 | typecombinations = [] 75 | for xtype in types: 76 | for ytype in types: 77 | typecombinations.append([xtype, ytype]) 78 | for ux, uy in typecombinations: 79 | growing_plots.append([polyID, 80 | int(min(X)) + ux, 81 | int(min(Y)) + uy, 82 | int(max(X)) + ux, 83 | int(max(Y)) + uy]) 84 | 85 | # Write all polygon coordinates for each polygonID to a file. 86 | with open('ID_growing_forest.csv', 'a') as result_file: 87 | result_file.write("polygonID min_x min_y max_x max_y\n") 88 | for line in growing_plots: 89 | stringLine = '' 90 | for value in line: 91 | stringLine = stringLine + str(value) 92 | if value != line[-1]: 93 | stringLine = stringLine + ' ' 94 | result_file.write(stringLine + '\n') 95 | -------------------------------------------------------------------------------- /treespecies.txt: -------------------------------------------------------------------------------- 1 | Abies alba 2 | Abies grandis 3 | Abies nordmanniana 4 | Acer campestre 5 | Acer negundo 6 | Acer platanoides 7 | Acer pseudoplatanus 8 | Aesculus hippocastanum 9 | Ailanthus altissima 10 | Alnus cordata 11 | Alnus glutinosa 12 | Alnus incana 13 | Amelanchier lamarckii 14 | Betula pendula 15 | Betula pubescens 16 | Buxus sempervirens 17 | Carpinus betulus 18 | Castanea sativa 19 | Chamaecyparis lawsoniana 20 | Crataegus monogyna 21 | Fagus sylvatica 22 | Frangula alnus 23 | Fraxinus excelsior 24 | Ilex aquifolium 25 | Isothecium alopecuroides 26 | Juglans regia 27 | Larix decidua 28 | Larix kaempferi 29 | Picea abies 30 | Picea pungens 31 | Picea sitchensis 32 | Pinus contorta 33 | Pinus nigra 34 | Pinus pinaster 35 | Pinus strobus 36 | Pinus sylvestris 37 | Platanus hispanica 38 | Populus alba 39 | Populus balsamifera 40 | Populus nigra 41 | Populus tremula 42 | Prunus avium 43 | Prunus domestica 44 | Prunus laurocerasus 45 | Prunus padus 46 | Prunus serotina 47 | Prunus spinosa 48 | Pseudotsuga menziesii 49 | Pyrus communis 50 | Quercus cerris 51 | Quercus petraea 52 | Quercus pubescens 53 | Quercus robur 54 | Quercus rosacea 55 | Quercus rubra 56 | Rhamnus cathartica 57 | Robinia pseudoacacia 58 | Salix alba 59 | Salix caprea 60 | Salix fragilis 61 | Salix pentandra 62 | Sorbus aria 63 | Sorbus aucuparia 64 | Sorbus intermedia 65 | Taxodium distichum 66 | Taxus baccata 67 | Thuja plicata 68 | Tilia cordata 69 | Tilia platyphyllos 70 | Tsuga heterophylla 71 | Ulmus laevis 72 | Ulmus glabra 73 | Ulmus minor 74 | --------------------------------------------------------------------------------