├── .codacy.yml ├── .editorconfig ├── .github └── ISSUE_TEMPLATE │ └── user-story.md ├── .gitignore ├── .prospector.yml ├── .pylintrc ├── .readthedocs.yml ├── .travis.yml ├── .zenodo.json ├── CHANGELOG.md ├── CITATION.cff ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.rst ├── doc ├── api │ ├── index.rst │ ├── satsense.bands.rst │ ├── satsense.extract.rst │ ├── satsense.features.rst │ ├── satsense.generators.rst │ ├── satsense.image.rst │ ├── satsense.performance.rst │ ├── satsense.rst │ └── satsense.util.rst ├── conf.py ├── index.rst ├── installation.rst ├── notebooks │ ├── feature_extraction.rst │ ├── feature_extraction_files │ │ ├── feature_extraction_11_0.png │ │ ├── feature_extraction_13_0.png │ │ ├── feature_extraction_15_0.png │ │ ├── feature_extraction_5_0.png │ │ └── feature_extraction_7_1.png │ └── index.rst └── requirements.txt ├── environment.yml ├── notebooks ├── Classification │ └── Classification_Example.ipynb ├── FeatureExtraction │ └── feature_extraction.ipynb ├── Performance │ ├── Conversions_binary_mask_multipolygon.ipynb │ └── JaccardIndex_Multipolygons.ipynb └── README.md ├── satsense ├── __init__.py ├── _version.py ├── bands.py ├── extract.py ├── features │ ├── __init__.py │ ├── feature.py │ ├── hog.py │ ├── lacunarity.py │ ├── ndxi.py │ ├── pantex.py │ ├── sift.py │ └── texton.py ├── generators.py ├── image.py ├── performance │ ├── __init__.py │ └── jaccard_similarity.py └── util │ ├── __init__.py │ ├── conversions.py │ ├── mask.py │ └── shapefile.py ├── setup.cfg ├── setup.py └── test ├── __init__.py ├── conftest.py ├── data ├── baseimage │ ├── section_2_sentinel_canny_edge.tif │ ├── section_2_sentinel_gray_ubyte.tif │ ├── section_2_sentinel_grayscale.tif │ └── section_2_sentinel_textons.tif ├── generate │ ├── baseimage.py │ └── feature_target.py ├── source │ ├── section_2_sentinel.tif │ ├── section_2_sentinel_l1c.tif │ ├── section_2_sentinel_rect.tif │ └── section_2_sentinel_rect_wgs84.tif └── target │ ├── hog.nc │ ├── lacunarity.nc │ ├── ndsi.tif │ ├── ndsi.tif.msk │ ├── ndvi.tif │ ├── ndvi.tif.msk │ ├── pantex.nc │ ├── rb_ndvi.tif │ ├── rb_ndvi.tif.msk │ ├── rg_ndvi.tif │ ├── rg_ndvi.tif.msk │ ├── sift.nc │ └── texton.nc ├── features ├── gen_target.py ├── test_baseimage.py └── test_features.py ├── strategies.py ├── test_extract.py ├── test_generators.py └── test_image.py /.codacy.yml: -------------------------------------------------------------------------------- 1 | # codacy configuration file 2 | 3 | --- 4 | 5 | engines: 6 | coverage: 7 | enabled: true 8 | exclude_paths: [ 9 | 'tests', 10 | ] 11 | metrics: 12 | enabled: true 13 | duplication: 14 | enabled: true 15 | prospector: 16 | enabled: true 17 | pylint: 18 | enabled: true 19 | python_version: 3 20 | 21 | exclude_paths: [ 22 | 'test/**', 23 | ] 24 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: http://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | trim_trailing_whitespace = true 11 | # Set default charset 12 | charset = utf-8 13 | 14 | # Matches multiple files with brace expansion notation 15 | 16 | # 4 space indentation 17 | [*.{py,java,r,R}] 18 | indent_size = 4 19 | 20 | # 2 space indentation 21 | [*.{js,json,yml,html,xml}] 22 | indent_size = 2 23 | 24 | [*.{md,Rmd}] 25 | trim_trailing_whitespace = false 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/user-story.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: User Story 3 | about: Create a scrum user story 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **As a** 11 | **I want to** 12 | **because** 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.asv 2 | .coverage* 3 | doc/_build/ 4 | .DS_Store 5 | .eggs/ 6 | .hypothesis 7 | .ipynb_checkpoints/ 8 | .project 9 | *.pyc 10 | __pycache__ 11 | .pydevproject 12 | .pytest_cache/ 13 | satsense.egg-info/ 14 | test-reports/ 15 | .vscode 16 | *.aux.xml 17 | -------------------------------------------------------------------------------- /.prospector.yml: -------------------------------------------------------------------------------- 1 | # prospector configuration file 2 | 3 | --- 4 | 5 | output-format: grouped 6 | 7 | strictness: veryhigh 8 | doc-warnings: true 9 | test-warnings: false 10 | member-warnings: false 11 | 12 | pyroma: 13 | run: true 14 | 15 | pep8: 16 | full: true 17 | 18 | pep257: 19 | # see http://pep257.readthedocs.io/en/latest/error_codes.html 20 | disable: [ 21 | # Disable because not part of PEP257 official convention: 22 | D203, # 1 blank line required before class docstring 23 | D212, # Multi-line docstring summary should start at the first line 24 | D213, # Multi-line docstring summary should start at the second line 25 | D404, # First word of the docstring should not be This 26 | ] 27 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # Specify a configuration file. 4 | #rcfile= 5 | 6 | # Python code to execute, usually for sys.path manipulation such as 7 | # pygtk.require(). 8 | #init-hook= 9 | 10 | # Add files or directories to the blacklist. They should be base names, not 11 | # paths. 12 | ignore=CVS 13 | 14 | # Add files or directories matching the regex patterns to the blacklist. The 15 | # regex matches against base names, not paths. 16 | ignore-patterns= 17 | 18 | # Pickle collected data for later comparisons. 19 | persistent=yes 20 | 21 | # List of plugins (as comma separated values of python modules names) to load, 22 | # usually to register additional checkers. 23 | load-plugins= 24 | 25 | # Use multiple processes to speed up Pylint. 26 | jobs=1 27 | 28 | # Allow loading of arbitrary C extensions. Extensions are imported into the 29 | # active Python interpreter and may run arbitrary code. 30 | unsafe-load-any-extension=no 31 | 32 | # A comma-separated list of package or module names from where C extensions may 33 | # be loaded. Extensions are loading into the active Python interpreter and may 34 | # run arbitrary code 35 | extension-pkg-whitelist= 36 | 37 | # Allow optimization of some AST trees. This will activate a peephole AST 38 | # optimizer, which will apply various small optimizations. For instance, it can 39 | # be used to obtain the result of joining multiple strings with the addition 40 | # operator. Joining a lot of strings can lead to a maximum recursion error in 41 | # Pylint and this flag can prevent that. It has one side effect, the resulting 42 | # AST will be different than the one from reality. This option is deprecated 43 | # and it will be removed in Pylint 2.0. 44 | optimize-ast=no 45 | 46 | 47 | [MESSAGES CONTROL] 48 | 49 | # Only show warnings with the listed confidence levels. Leave empty to show 50 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 51 | confidence= 52 | 53 | # Enable the message, report, category or checker with the given id(s). You can 54 | # either give multiple identifier separated by comma (,) or put this option 55 | # multiple time (only on the command line, not in the configuration file where 56 | # it should appear only once). See also the "--disable" option for examples. 57 | #enable= 58 | 59 | # Disable the message, report, category or checker with the given id(s). You 60 | # can either give multiple identifiers separated by comma (,) or put this 61 | # option multiple times (only on the command line, not in the configuration 62 | # file where it should appear only once).You can also use "--disable=all" to 63 | # disable everything first and then reenable specific checks. For example, if 64 | # you want to run only the similarities checker, you can use "--disable=all 65 | # --enable=similarities". If you want to run only the classes checker, but have 66 | # no Warning level messages displayed, use"--disable=all --enable=classes 67 | # --disable=W" 68 | disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,import-error 69 | 70 | 71 | [REPORTS] 72 | 73 | # Set the output format. Available formats are text, parseable, colorized, msvs 74 | # (visual studio) and html. You can also give a reporter class, eg 75 | # mypackage.mymodule.MyReporterClass. 76 | output-format=text 77 | 78 | # Put messages in a separate file for each module / package specified on the 79 | # command line instead of printing them on stdout. Reports (if any) will be 80 | # written in a file name "pylint_global.[txt|html]". This option is deprecated 81 | # and it will be removed in Pylint 2.0. 82 | files-output=no 83 | 84 | # Tells whether to display a full report or only the messages 85 | reports=yes 86 | 87 | # Python expression which should return a note less than 10 (10 is the highest 88 | # note). You have access to the variables errors warning, statement which 89 | # respectively contain the number of errors / warnings messages and the total 90 | # number of statements analyzed. This is used by the global evaluation report 91 | # (RP0004). 92 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 93 | 94 | # Template used to display messages. This is a python new-style format string 95 | # used to format the message information. See doc for all details 96 | #msg-template= 97 | 98 | 99 | [FORMAT] 100 | 101 | # Maximum number of characters on a single line. 102 | max-line-length=79 103 | 104 | # Regexp for a line that is allowed to be longer than the limit. 105 | ignore-long-lines=^\s*(# )??$ 106 | 107 | # Allow the body of an if to be on the same line as the test if there is no 108 | # else. 109 | single-line-if-stmt=no 110 | 111 | # List of optional constructs for which whitespace checking is disabled. `dict- 112 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. 113 | # `trailing-comma` allows a space between comma and closing bracket: (a, ). 114 | # `empty-line` allows space-only lines. 115 | no-space-check=trailing-comma,dict-separator 116 | 117 | # Maximum number of lines in a module 118 | max-module-lines=1000 119 | 120 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 121 | # tab). 122 | indent-string=' ' 123 | 124 | # Number of spaces of indent required inside a hanging or continued line. 125 | indent-after-paren=4 126 | 127 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 128 | expected-line-ending-format=LF 129 | 130 | 131 | [MISCELLANEOUS] 132 | 133 | # List of note tags to take in consideration, separated by a comma. 134 | notes=FIXME,FIX-ME,XXX,TODO 135 | 136 | 137 | [LOGGING] 138 | 139 | # Logging modules to check that the string format arguments are in logging 140 | # function parameter format 141 | logging-modules=logging 142 | 143 | 144 | [VARIABLES] 145 | 146 | # Tells whether we should check for unused import in __init__ files. 147 | init-import=no 148 | 149 | # A regular expression matching the name of dummy variables (i.e. expectedly 150 | # not used). 151 | dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy 152 | 153 | # List of additional names supposed to be defined in builtins. Remember that 154 | # you should avoid to define new builtins when possible. 155 | additional-builtins= 156 | 157 | # List of strings which can identify a callback function by name. A callback 158 | # name must start or end with one of those strings. 159 | callbacks=cb_,_cb 160 | 161 | # List of qualified module names which can have objects that can redefine 162 | # builtins. 163 | redefining-builtins-modules=six.moves,future.builtins 164 | 165 | 166 | [TYPECHECK] 167 | 168 | # Tells whether missing members accessed in mixin class should be ignored. A 169 | # mixin class is detected if its name ends with "mixin" (case insensitive). 170 | ignore-mixin-members=yes 171 | 172 | # List of module names for which member attributes should not be checked 173 | # (useful for modules/projects where namespaces are manipulated during runtime 174 | # and thus existing member attributes cannot be deduced by static analysis. It 175 | # supports qualified module names, as well as Unix pattern matching. 176 | ignored-modules= 177 | 178 | # List of class names for which member attributes should not be checked (useful 179 | # for classes with dynamically set attributes). This supports the use of 180 | # qualified names. 181 | ignored-classes=optparse.Values,thread._local,_thread._local 182 | 183 | # List of members which are set dynamically and missed by pylint inference 184 | # system, and so shouldn't trigger E1101 when accessed. Python regular 185 | # expressions are accepted. 186 | generated-members= 187 | 188 | # List of decorators that produce context managers, such as 189 | # contextlib.contextmanager. Add to this list to register other decorators that 190 | # produce valid context managers. 191 | contextmanager-decorators=contextlib.contextmanager 192 | 193 | 194 | [BASIC] 195 | 196 | # Good variable names which should always be accepted, separated by a comma 197 | good-names=i,j,k,ex,Run,_,logger 198 | 199 | # Bad variable names which should always be refused, separated by a comma 200 | bad-names=foo,bar,baz,toto,tutu,tata 201 | 202 | # Colon-delimited sets of names that determine each other's naming style when 203 | # the name regexes allow several styles. 204 | name-group= 205 | 206 | # Include a hint for the correct naming format with invalid-name 207 | include-naming-hint=yes 208 | 209 | # List of decorators that produce properties, such as abc.abstractproperty. Add 210 | # to this list to register other decorators that produce valid properties. 211 | property-classes=abc.abstractproperty 212 | 213 | # Regular expression matching correct function names 214 | function-rgx=[a-z_][a-z0-9_]{2,30}$ 215 | 216 | # Naming hint for function names 217 | function-name-hint=[a-z_][a-z0-9_]{2,30}$ 218 | 219 | # Regular expression matching correct variable names 220 | variable-rgx=[a-z_][a-z0-9_]{2,30}$ 221 | 222 | # Naming hint for variable names 223 | variable-name-hint=[a-z_][a-z0-9_]{2,30}$ 224 | 225 | # Regular expression matching correct constant names 226 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 227 | 228 | # Naming hint for constant names 229 | const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 230 | 231 | # Regular expression matching correct attribute names 232 | attr-rgx=[a-z_][a-z0-9_]{2,30}$ 233 | 234 | # Naming hint for attribute names 235 | attr-name-hint=[a-z_][a-z0-9_]{2,30}$ 236 | 237 | # Regular expression matching correct argument names 238 | argument-rgx=[a-z_][a-z0-9_]{2,30}$ 239 | 240 | # Naming hint for argument names 241 | argument-name-hint=[a-z_][a-z0-9_]{2,30}$ 242 | 243 | # Regular expression matching correct class attribute names 244 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 245 | 246 | # Naming hint for class attribute names 247 | class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 248 | 249 | # Regular expression matching correct inline iteration names 250 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 251 | 252 | # Naming hint for inline iteration names 253 | inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ 254 | 255 | # Regular expression matching correct class names 256 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 257 | 258 | # Naming hint for class names 259 | class-name-hint=[A-Z_][a-zA-Z0-9]+$ 260 | 261 | # Regular expression matching correct module names 262 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 263 | 264 | # Naming hint for module names 265 | module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 266 | 267 | # Regular expression matching correct method names 268 | method-rgx=[a-z_][a-z0-9_]{2,30}$ 269 | 270 | # Naming hint for method names 271 | method-name-hint=[a-z_][a-z0-9_]{2,30}$ 272 | 273 | # Regular expression which should only match function or class names that do 274 | # not require a docstring. 275 | no-docstring-rgx=^_ 276 | 277 | # Minimum line length for functions/classes that require docstrings, shorter 278 | # ones are exempt. 279 | docstring-min-length=-1 280 | 281 | 282 | [ELIF] 283 | 284 | # Maximum number of nested blocks for function / method body 285 | max-nested-blocks=5 286 | 287 | 288 | [SPELLING] 289 | 290 | # Spelling dictionary name. Available dictionaries: none. To make it working 291 | # install python-enchant package. 292 | spelling-dict= 293 | 294 | # List of comma separated words that should not be checked. 295 | spelling-ignore-words= 296 | 297 | # A path to a file that contains private dictionary; one word per line. 298 | spelling-private-dict-file= 299 | 300 | # Tells whether to store unknown words to indicated private dictionary in 301 | # --spelling-private-dict-file option instead of raising a message. 302 | spelling-store-unknown-words=no 303 | 304 | 305 | [SIMILARITIES] 306 | 307 | # Minimum lines number of a similarity. 308 | min-similarity-lines=4 309 | 310 | # Ignore comments when computing similarities. 311 | ignore-comments=yes 312 | 313 | # Ignore docstrings when computing similarities. 314 | ignore-docstrings=yes 315 | 316 | # Ignore imports when computing similarities. 317 | ignore-imports=no 318 | 319 | 320 | [DESIGN] 321 | 322 | # Maximum number of arguments for function / method 323 | max-args=5 324 | 325 | # Argument names that match this expression will be ignored. Default to name 326 | # with leading underscore 327 | ignored-argument-names=_.* 328 | 329 | # Maximum number of locals for function / method body 330 | max-locals=15 331 | 332 | # Maximum number of return / yield for function / method body 333 | max-returns=6 334 | 335 | # Maximum number of branch for function / method body 336 | max-branches=12 337 | 338 | # Maximum number of statements in function / method body 339 | max-statements=50 340 | 341 | # Maximum number of parents for a class (see R0901). 342 | max-parents=7 343 | 344 | # Maximum number of attributes for a class (see R0902). 345 | max-attributes=7 346 | 347 | # Minimum number of public methods for a class (see R0903). 348 | min-public-methods=2 349 | 350 | # Maximum number of public methods for a class (see R0904). 351 | max-public-methods=20 352 | 353 | # Maximum number of boolean expressions in a if statement 354 | max-bool-expr=5 355 | 356 | 357 | [IMPORTS] 358 | 359 | # Deprecated modules which should not be used, separated by a comma 360 | deprecated-modules=regsub,TERMIOS,Bastion,rexec,optparse 361 | 362 | # Create a graph of every (i.e. internal and external) dependencies in the 363 | # given file (report RP0402 must not be disabled) 364 | import-graph= 365 | 366 | # Create a graph of external dependencies in the given file (report RP0402 must 367 | # not be disabled) 368 | ext-import-graph= 369 | 370 | # Create a graph of internal dependencies in the given file (report RP0402 must 371 | # not be disabled) 372 | int-import-graph= 373 | 374 | # Force import order to recognize a module as part of the standard 375 | # compatibility libraries. 376 | known-standard-library= 377 | 378 | # Force import order to recognize a module as part of a third party library. 379 | known-third-party=enchant 380 | 381 | # Analyse import fallback blocks. This can be used to support both Python 2 and 382 | # 3 compatible code, which means that the block might have code that exists 383 | # only in one or another interpreter, leading to false positives when analysed. 384 | analyse-fallback-blocks=no 385 | 386 | 387 | [CLASSES] 388 | 389 | # List of method names used to declare (i.e. assign) instance attributes. 390 | defining-attr-methods=__init__,__new__,setUp 391 | 392 | # List of valid names for the first argument in a class method. 393 | valid-classmethod-first-arg=cls 394 | 395 | # List of valid names for the first argument in a metaclass class method. 396 | valid-metaclass-classmethod-first-arg=mcs 397 | 398 | # List of member names, which should be excluded from the protected access 399 | # warning. 400 | exclude-protected=_asdict,_fields,_replace,_source,_make 401 | 402 | 403 | [EXCEPTIONS] 404 | 405 | # Exceptions that will emit a warning when being caught. Defaults to 406 | # "Exception" 407 | overgeneral-exceptions=Exception 408 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | python: 2 | version: 3 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | sudo: required 4 | 5 | dist: xenial 6 | 7 | python: 8 | - "3.6" 9 | - "3.7" 10 | 11 | cache: 12 | pip: true 13 | directories: 14 | - .hypothesis 15 | - .eggs 16 | 17 | install: 18 | - sudo apt-add-repository -y ppa:nextgis/dev 19 | - sudo apt update 20 | - sudo apt install -y libgdal-dev libnetcdf-dev 21 | - pip install --no-binary netcdf4 netcdf4 # see https://github.com/mapbox/rasterio/issues/1574 22 | 23 | before_script: 24 | - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter 25 | - chmod +x ./cc-test-reporter 26 | - ./cc-test-reporter before-build 27 | 28 | script: python setup.py test 29 | 30 | after_success: 31 | if [[ "$TRAVIS_PULL_REQUEST" == "false" && "$TRAVIS_PYTHON_VERSION" == "3.7" ]]; then 32 | ./cc-test-reporter format-coverage -t coverage.py test-reports/coverage.xml; 33 | ./cc-test-reporter upload-coverage; 34 | pip install codacy-coverage; 35 | python-codacy-coverage -r test-reports/coverage.xml; 36 | fi 37 | 38 | deploy: 39 | provider: pypi 40 | user: "nlesc" 41 | skip_existing: true 42 | on: 43 | tags: true 44 | python: '3.7' 45 | password: 46 | secure: "HQnUOySc1Dn5JM7k4H5sGMOBxsh7mMJANbEUgbymQrDICXT2bL12EvCl/YKTOB2laqpL+8lnk8eSn6xKZRLbNB/vxvpiuAVqe2e6FAwTnwReQEJ3y691jYxxYBHAmttwB1qSMmrmkXLd9BMgaEj7Z75wj53L7wvOkbXTBpBZqpKXS3yWx6EJFp25ZCBzVgob6v9qAhnjIFdNxzyoeG4CFHIgCjtU2/RemS/75fBmqvsEWK/HL0/f//sNhorL2s/rzeCWJKIe+VsGJqA92n0/ODS8XafDCrL8KzQ1mPPx2756QaiBz6yXXZwo1/aF2FzrGQAtJAhyHYMi7ffRn3HOdqKHCv0+644H4TsxqnYPohfeoOxrmTbamnsyFkFwZW6RSl1Gmfl4uE4Rm6fxVjLXNtW6yj+g08KrH50OXIJArW5iXM8RTpvjsFC7lRKh6jdoNjYqFtgy/8fNPIlbEdPU8SMxma++xG51LZo07iEFUxmuakFN0NVoKkxlto6i5VmS1+VHfvtkeHi6D3a6Bliz+6QMZ2L+Hc6g27oIboro8kAxiEIf1KTZaksP5YTtdUo3T5xUMOdNw5W3s5Uqi3uDpsEnaDDndIb9Ud8KvI37OHHT09kqCwXLIKC5PjwCvGpsDvwyojYd+QaOiOzBr+D4SYVqi+FznnZWgO+dz4voMro=" 47 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "creators": [ 3 | { 4 | "affiliation": "Netherlands eScience Center", 5 | "name": "Weel, Berend" 6 | }, 7 | { 8 | "affiliation": "Netherlands eScience Center", 9 | "name": "Ranguelova, Elena" 10 | }, 11 | { 12 | "affiliation": "Netherlands eScience Center", 13 | "name": "Andela, Bouwe" 14 | }, 15 | { 16 | "affiliation": "University of Amsterdam", 17 | "name": "Filtenborg, Maximilian" 18 | }, 19 | { 20 | "affiliation": "University of Amsterdam", 21 | "name": "Barten, Derk" 22 | }, 23 | { 24 | "affiliation": "Netherlands eScience Center", 25 | "name": "Dzigan, Yifat" 26 | }, 27 | { 28 | "affiliation": "Netherlands eScience Center", 29 | "name": "van Haren, Ronald" 30 | }, 31 | { 32 | "affiliation": "Netherlands eScience Center", 33 | "name": "Drost, Niels" 34 | } 35 | 36 | ], 37 | "description": "Satsense is a library for land use classification using satellite imagery.\n", 38 | "license": { 39 | "id": "Apache-2.0" 40 | }, 41 | "title": "Satsense" 42 | } 43 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## version 0.9 4 | 5 | - Added a large amount of documentation 6 | - Available at https://satsense.readthedocs.io/en/latest/ 7 | - Includes: 8 | - Installation instructions 9 | - Example notebook for feature extraction 10 | - API Documentation and docstrings 11 | 12 | - Bug fixes: 13 | - Histogram of Gradients 14 | - fixed the absolute sine difference calculation 15 | - Fixed the padding around the data when splitting the generator. 16 | - Fixed the generation of windows 17 | 18 | - Development: 19 | - Added automated versioning 20 | - Increased code maintainability 21 | 22 | ## version 0.8 23 | - Initial release 24 | - Features included: 25 | - Histogram of Gradients 26 | - Pantex 27 | - NDVI 28 | - also available: 29 | - RgNDVI (Red-green based) 30 | - RbNDVI (Red-blue based) 31 | - NDSI (Snow Cover Index) 32 | - NDWI (Water Cover Index) 33 | - WVSI (Soil Cover Index) 34 | - Lacunarity 35 | - SIFT 36 | - Texton -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # YAML 1.2 2 | --- 3 | abstract: | 4 | Satsense is a library for land use classification using satellite imagery. 5 | authors: 6 | - 7 | affiliation: "Netherlands eScience Center" 8 | family-names: Weel 9 | given-names: Berend 10 | - 11 | affiliation: "Netherlands eScience Center" 12 | family-names: Ranguelova 13 | given-names: Elena 14 | - 15 | affiliation: "Netherlands eScience Center" 16 | family-names: Andela 17 | given-names: Bouwe 18 | - 19 | affiliation: "University of Amsterdam" 20 | family-names: Filtenborg 21 | given-names: Maximilian 22 | - 23 | affiliation: "University of Amsterdam" 24 | family-names: Barten 25 | given-names: Derk 26 | - 27 | affiliation: "Netherlands eScience Center" 28 | family-names: Dzigan 29 | given-names: Yifat 30 | - 31 | affiliation: "Netherlands eScience Center" 32 | family-names: van Haren 33 | given-names: Ronald 34 | - 35 | affiliation: "Netherlands eScience Center" 36 | family-names: Drost 37 | given-names: Niels 38 | cff-version: "1.0.3" 39 | license: "Apache-2.0" 40 | message: "If you use this software, please cite it using these metadata." 41 | repository-code: "https://github.com/DynaSlum/satsense" 42 | title: Satsense 43 | date-released: 2019-03-25 44 | version: "0.9" 45 | ... 46 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributions are very welcome. Please make sure there is a github issue 2 | associated with with every pull request. Creating an issue is also a good 3 | way to propose/discuss new features or get help with using satsense. 4 | 5 | # Installation for development 6 | 7 | Please follow the installation instructions on 8 | [readthedocs](https://satsense.readthedocs.io/en/latest/installation.html) 9 | to get started. 10 | 11 | # Testing 12 | 13 | Please add unit tests for the code you are writing (e.g. when fixing a bug, implement 14 | a test that demonstrates the bug is fixed). You can run the unit tests locally 15 | with the command 16 | 17 | ```python 18 | python setup.py test 19 | ``` 20 | 21 | # Coding style 22 | 23 | Please make sure your code is formatted according to 24 | [PEP8](https://www.python.org/dev/peps/pep-0008/) and docstrings are written 25 | according to [PEP257](https://www.python.org/dev/peps/pep-0257/). Publicly visible 26 | functions should have 27 | [numpy style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html). 28 | 29 | Please autoformat your code with the following commands before making a pull request: 30 | 31 | ```bash 32 | isort satsense/my_file.py 33 | yapf -i satsense/my_file.py 34 | ``` 35 | 36 | Please use prospector to check that your code meets our standards: 37 | 38 | ```bash 39 | prospector satsense/my_file.py 40 | ``` 41 | 42 | # Pull requests 43 | 44 | Please create a pull request early, to keep other developers informed of what you're doing. 45 | Limit the amount of work in a pull request to fixing a single bug or adding a single new feature. 46 | Make sure the unit tests on Travis pass and review the comments by Codacy (click the Travis/Codacy 47 | buttons below your pull request). Note that Codacy occasionally reports false positives, ask if in 48 | doubt. 49 | 50 | # Documentation 51 | 52 | All public functions should have 53 | [numpy style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html). 54 | You can build the documentation locally by running 55 | 56 | ```bash 57 | python setup.py build_sphinx 58 | ``` 59 | 60 | Use 61 | 62 | ```bash 63 | python setup.py build_sphinx -Ea 64 | ``` 65 | 66 | to build everying from scratch. Please check that there are no warnings. 67 | 68 | ## Converting Notebooks for documentation 69 | 70 | If you update the notebooks please update their counterparts in the doc folder by using `jupyter nbconvert` 71 | 72 | From the root of the project: 73 | ```bash 74 | jupyter nbconvert --to rst notebooks/**/*.ipynb --output-dir=doc/notebooks/ 75 | ``` 76 | 77 | # Creating a release 78 | 79 | Make sure to update the version number and release date in CITATION.cff. 80 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include the license file 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Satsense 2 | ======== 3 | 4 | |Build Status| |Codacy Badge| |Maintainability| |Test Coverage| 5 | |Documentation Status| |DOI| 6 | 7 | Satsense is an open source Python library for patch based land-use and 8 | land-cover classification, initially developed for a project on deprived 9 | neighborhood detection. However, many of the algorithms made available 10 | through Satsense can be applied in other domains, such as ecology and 11 | climate science. 12 | 13 | Satsense is based on readily available open source libraries, such as 14 | opencv for machine learning and the rasterio/gdal and netcdf libraries 15 | for data access. It has a modular design that makes it easy to add your 16 | own hand-crafted feature or use deep learning instead. 17 | 18 | Detection of deprived neighborhoods is a land-use classification problem 19 | that is traditionally solved using hand crafted features like HoG, 20 | Lacunarity, NDXI, Pantex, Texton, and SIFT, computed from very high 21 | resolution satellite images. One of the goals of Satsense is to 22 | facilitate assessing the performance of these features on practical 23 | applications. To achieve this Satsense provides an easy to use open 24 | source reference implementation for these and other features, as well as 25 | facilities to distribute feature computation over multiple cpu’s. In the 26 | future the library will also provide easy access to metrics for 27 | assessing algorithm performance. 28 | 29 | - satsense - library for analysing satellite images, performance 30 | evaluation, etc. 31 | - notebooks - IPython notebooks for illustrating and testing the usage 32 | of Satsense 33 | 34 | We are using python 3.6/3.7 and jupyter notebook for our code. 35 | 36 | Documentation 37 | ------------- 38 | Can be found on `readthedocs `__. 39 | 40 | Installation 41 | ------------ 42 | 43 | Please see the `installation guide on readthedocs `__. 44 | 45 | Contributing 46 | ------------ 47 | 48 | Contributions are very welcome! Please see 49 | `CONTRIBUTING.md `__ 50 | for our contribution guidelines. 51 | 52 | Citing Satsense 53 | --------------- 54 | 55 | If you use Satsense for scientific research, please cite it. You can 56 | download citation files from 57 | `research-software.nl `__. 58 | 59 | References 60 | ---------- 61 | 62 | The collection of algorithms made available trough this package is 63 | inspired by 64 | 65 | J. Graesser, A. Cheriyadat, R. R. Vatsavai, V. Chandola, 66 | J. Long and E. Bright, "Image Based Characterization of Formal and 67 | Informal Neighborhoods in an Urban Landscape", in IEEE Journal of 68 | Selected Topics in Applied Earth Observations and Remote Sensing, 69 | vol. 5, no. 4, pp. 1164-1176, Aug. 2012. doi: 70 | 10.1109/JSTARS.2012.2190383 71 | 72 | Jordan Graesser himself also maintains `a 73 | library `__ with many of these 74 | algorithms. 75 | 76 | Test Data 77 | ~~~~~~~~~ 78 | 79 | The test data has been extracted from the Copernicus Sentinel data 2018. 80 | 81 | .. |Build Status| image:: https://travis-ci.com/DynaSlum/satsense.svg?branch=master 82 | :target: https://travis-ci.com/DynaSlum/satsense 83 | .. |Codacy Badge| image:: https://api.codacy.com/project/badge/Grade/458c8543cd304b8387b7b114218dc57c 84 | :target: https://www.codacy.com/app/DynaSlum/satsense?utm_source=github.com&utm_medium=referral&utm_content=DynaSlum/satsense&utm_campaign=Badge_Grade 85 | .. |Maintainability| image:: https://api.codeclimate.com/v1/badges/ed3655f6056f89f5e107/maintainability 86 | :target: https://codeclimate.com/github/DynaSlum/satsense/maintainability 87 | .. |Test Coverage| image:: https://api.codeclimate.com/v1/badges/ed3655f6056f89f5e107/test_coverage 88 | :target: https://codeclimate.com/github/DynaSlum/satsense/test_coverage 89 | .. |Documentation Status| image:: https://readthedocs.org/projects/satsense/badge/?version=latest 90 | :target: https://satsense.readthedocs.io/en/latest/?badge=latest 91 | .. |DOI| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.1463015.svg 92 | :target: https://doi.org/10.5281/zenodo.1463015 93 | -------------------------------------------------------------------------------- /doc/api/index.rst: -------------------------------------------------------------------------------- 1 | Python API Reference 2 | ==================== 3 | 4 | .. toctree:: 5 | 6 | satsense 7 | -------------------------------------------------------------------------------- /doc/api/satsense.bands.rst: -------------------------------------------------------------------------------- 1 | satsense.bands 2 | ============== 3 | 4 | .. automodule:: satsense.bands 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /doc/api/satsense.extract.rst: -------------------------------------------------------------------------------- 1 | satsense.extract 2 | ================ 3 | 4 | .. automodule:: satsense.extract 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /doc/api/satsense.features.rst: -------------------------------------------------------------------------------- 1 | satsense.features 2 | ================= 3 | 4 | .. automodule:: satsense.features 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/satsense.generators.rst: -------------------------------------------------------------------------------- 1 | satsense.generators 2 | =================== 3 | 4 | .. automodule:: satsense.generators 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /doc/api/satsense.image.rst: -------------------------------------------------------------------------------- 1 | satsense.image 2 | ============== 3 | 4 | .. automodule:: satsense.image 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | :special-members: __getitem__ -------------------------------------------------------------------------------- /doc/api/satsense.performance.rst: -------------------------------------------------------------------------------- 1 | satsense.performance 2 | ==================== 3 | 4 | .. automodule:: satsense.performance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/api/satsense.rst: -------------------------------------------------------------------------------- 1 | satsense package 2 | ================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | .. toctree:: 8 | 9 | satsense.image 10 | satsense.bands 11 | satsense.features 12 | satsense.generators 13 | satsense.extract 14 | satsense.performance 15 | satsense.util 16 | 17 | Module contents 18 | --------------- 19 | 20 | .. automodule:: satsense 21 | :members: 22 | :undoc-members: 23 | :show-inheritance: 24 | -------------------------------------------------------------------------------- /doc/api/satsense.util.rst: -------------------------------------------------------------------------------- 1 | satsense.util 2 | ================= 3 | 4 | .. automodule:: satsense.util 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | import os 9 | import sys 10 | 11 | # -- Path setup -------------------------------------------------------------- 12 | 13 | # If extensions (or modules to document with autodoc) are in another directory, 14 | # add these directories to sys.path here. If the directory is relative to the 15 | # documentation root, use os.path.abspath to make it absolute, like shown here. 16 | # 17 | sys.path.insert(0, os.path.abspath('..')) 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = 'satsense' 22 | copyright = '2018, Netherlands eScience Center' 23 | author = 'Netherlands eScience Center' 24 | 25 | # The short X.Y version 26 | version = '' 27 | # The full version, including alpha/beta/rc tags 28 | release = '' 29 | 30 | # -- General configuration --------------------------------------------------- 31 | 32 | # If your documentation needs a minimal Sphinx version, state it here. 33 | # 34 | # needs_sphinx = '1.0' 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be 37 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 38 | # ones. 39 | extensions = [ 40 | 'sphinx.ext.autodoc', 41 | 'sphinx.ext.coverage', 42 | 'sphinx.ext.doctest', 43 | 'sphinx.ext.imgmath', 44 | 'sphinx.ext.intersphinx', 45 | 'sphinx.ext.napoleon', 46 | 'sphinx.ext.viewcode', 47 | ] 48 | 49 | # Add any paths that contain templates here, relative to this directory. 50 | templates_path = [] 51 | 52 | # The suffix(es) of source filenames. 53 | # You can specify multiple suffix as a list of string: 54 | # 55 | # source_suffix = ['.rst', '.md'] 56 | source_suffix = '.rst' 57 | 58 | # The master toctree document. 59 | master_doc = 'index' 60 | 61 | # The language for content autogenerated by Sphinx. Refer to documentation 62 | # for a list of supported languages. 63 | # 64 | # This is also used if you do content translation via gettext catalogs. 65 | # Usually you set "language" from the command line for these cases. 66 | language = None 67 | 68 | # List of patterns, relative to source directory, that match files and 69 | # directories to ignore when looking for source files. 70 | # This pattern also affects html_static_path and html_extra_path. 71 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 72 | 73 | # The name of the Pygments (syntax highlighting) style to use. 74 | pygments_style = None 75 | 76 | # -- Options for HTML output ------------------------------------------------- 77 | 78 | # The theme to use for HTML and HTML Help pages. See the documentation for 79 | # a list of builtin themes. 80 | # 81 | html_theme = 'sphinx_rtd_theme' 82 | 83 | # Theme options are theme-specific and customize the look and feel of a theme 84 | # further. For a list of options available for each theme, see the 85 | # documentation. 86 | # 87 | # html_theme_options = {} 88 | 89 | # Add any paths that contain custom static files (such as style sheets) here, 90 | # relative to this directory. They are copied after the builtin static files, 91 | # so a file named "default.css" will overwrite the builtin "default.css". 92 | html_static_path = [] 93 | 94 | # Custom sidebar templates, must be a dictionary that maps document names 95 | # to template names. 96 | # 97 | # The default sidebars (for documents that don't match any pattern) are 98 | # defined by theme itself. Builtin themes are using these templates by 99 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 100 | # 'searchbox.html']``. 101 | # 102 | # html_sidebars = {} 103 | 104 | # -- Options for HTMLHelp output --------------------------------------------- 105 | 106 | # Output file base name for HTML help builder. 107 | htmlhelp_basename = 'satsensedoc' 108 | 109 | # -- Options for LaTeX output ------------------------------------------------ 110 | 111 | latex_elements = { 112 | # The paper size ('letterpaper' or 'a4paper'). 113 | # 114 | # 'papersize': 'letterpaper', 115 | 116 | # The font size ('10pt', '11pt' or '12pt'). 117 | # 118 | # 'pointsize': '10pt', 119 | 120 | # Additional stuff for the LaTeX preamble. 121 | # 122 | # 'preamble': '', 123 | 124 | # Latex figure (float) alignment 125 | # 126 | # 'figure_align': 'htbp', 127 | } 128 | 129 | # Grouping the document tree into LaTeX files. List of tuples 130 | # (source start file, target name, title, 131 | # author, documentclass [howto, manual, or own class]). 132 | latex_documents = [ 133 | ( 134 | master_doc, 135 | 'satsense.tex', 136 | 'satsense Documentation', 137 | 'Netherlands eScience Center', 138 | 'manual', 139 | ), 140 | ] 141 | 142 | # -- Options for manual page output ------------------------------------------ 143 | 144 | # One entry per manual page. List of tuples 145 | # (source start file, name, description, authors, manual section). 146 | man_pages = [( 147 | master_doc, 148 | 'satsense', 149 | 'satsense Documentation', 150 | [author], 151 | 1, 152 | )] 153 | 154 | # -- Options for Texinfo output ---------------------------------------------- 155 | 156 | # Grouping the document tree into Texinfo files. List of tuples 157 | # (source start file, target name, title, author, 158 | # dir menu entry, description, category) 159 | texinfo_documents = [ 160 | ( 161 | master_doc, 162 | 'satsense', 163 | 'satsense Documentation', 164 | author, 165 | 'satsense', 166 | 'One line description of project.', 167 | 'Miscellaneous', 168 | ), 169 | ] 170 | 171 | # -- Options for Epub output ------------------------------------------------- 172 | 173 | # Bibliographic Dublin Core info. 174 | epub_title = project 175 | 176 | # The unique identifier of the text. This can be a ISBN number 177 | # or the project homepage. 178 | # 179 | # epub_identifier = '' 180 | 181 | # A unique identification for the text. 182 | # 183 | # epub_uid = '' 184 | 185 | # A list of files that should not be packed into the epub file. 186 | epub_exclude_files = ['search.html'] 187 | 188 | # -- Extension configuration ------------------------------------------------- 189 | 190 | # -- Options for intersphinx extension --------------------------------------- 191 | 192 | # Example configuration for intersphinx: refer to the Python standard library. 193 | intersphinx_mapping = { 194 | 'python': ('https://docs.python.org/3/', None), 195 | 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 196 | 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 197 | 'rasterio': ('https://rasterio.readthedocs.io/en/latest/', None), 198 | 'skimage': ('http://scikit-image.org/docs/dev/', None), 199 | 'sklearn': ('https://scikit-learn.org/stable/', None), 200 | } 201 | 202 | autodoc_mock_imports = [ 203 | 'cv2', 204 | 'descartes', 205 | 'fiona', 206 | 'netCDF4', 207 | 'numpy', 208 | 'rasterio', 209 | 'sklearn', 210 | 'skimage', 211 | 'scipy', 212 | 'shapely', 213 | 'affine', 214 | ] 215 | 216 | nitpicky = True 217 | 218 | autodoc_member_order = 'bysource' 219 | napoleon_google_docstring = False 220 | napoleon_numpy_docstring = True 221 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to satsense's documentation! 2 | ==================================== 3 | 4 | Satsense is a Python library for land use/cover classification using satellite imagery. 5 | 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | :caption: Contents: 10 | 11 | installation 12 | notebooks/index 13 | api/index 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /doc/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | Installing the dependencies 5 | --------------------------- 6 | Satsense has a few dependencies that cannot be installed from PyPI: 7 | 8 | - the dependencies of the `GDAL `_ Python package 9 | - the dependencies of the `netCDF4 `_ Python package 10 | 11 | **Ubuntu Linux 18.04 and later** 12 | 13 | To install the above mentioned dependencies, run 14 | 15 | .. code-block:: bash 16 | 17 | sudo apt install libgdal-dev libnetcdf-dev 18 | 19 | this probably also works for other Debian-based Linux distributions. 20 | 21 | **RPM-based Linux distributions** 22 | 23 | To install the above mentioned dependencies, run 24 | 25 | .. code-block:: bash 26 | 27 | sudo yum install gdal-devel netcdf-devel 28 | 29 | 30 | **Conda** 31 | 32 | Assuming you have `conda `_ installed and have downloaded 33 | the satsense 34 | `environment.yml `_ 35 | file to the current working directory, you can install 36 | all dependencies by running: 37 | 38 | .. code-block:: bash 39 | 40 | conda env create --file environment.yml --name satsense 41 | 42 | or you can install just the minimal dependencies by running 43 | 44 | .. code-block:: bash 45 | 46 | conda create --name satsense libgdal libnetcdf nb_conda 47 | 48 | Make sure to activate the environment after installation: 49 | 50 | .. code-block:: bash 51 | 52 | conda activate satsense 53 | 54 | 55 | Installing Satsense from PyPI 56 | ----------------------------- 57 | 58 | If you did not use conda to install the dependencies, you may still 59 | want to create and activate a virtual environment for satsense, e.g. using 60 | `venv `_ 61 | 62 | .. code-block:: bash 63 | 64 | python3 -m venv ~/venv/satsense 65 | source ~/venv/satsense/bin/activate 66 | 67 | Next, install satsense by running 68 | 69 | .. code-block:: bash 70 | 71 | pip install satsense 72 | 73 | If you are planning on using the :ref:`notebooks`, you can 74 | install the required extra dependencies with 75 | 76 | .. code-block:: bash 77 | 78 | pip install satsense[notebooks] 79 | 80 | Installing Satsense from source for development 81 | ----------------------------------------------- 82 | 83 | Clone the `satsense repository `_, 84 | install the dependencies as described above, go to the directory where 85 | you have checked out satsense and run 86 | 87 | .. code-block:: bash 88 | 89 | pip install -e .[dev] 90 | 91 | or 92 | 93 | .. code-block:: bash 94 | 95 | pip install -e .[dev,notebooks] 96 | 97 | if you would also like to use the :ref:`notebooks`. 98 | 99 | Please read our 100 | `contribution guidelines `_ 101 | before starting development. 102 | 103 | Known installation issues 104 | ------------------------- 105 | If you are experiencing 'NetCDF: HDF errors' after installation with pip, 106 | this may be resolved by using the following command to install 107 | 108 | .. code-block:: bash 109 | 110 | pip install --no-binary netcdf4 satsense 111 | 112 | see `this rasterio issue `_ 113 | for more information. 114 | -------------------------------------------------------------------------------- /doc/notebooks/feature_extraction.rst: -------------------------------------------------------------------------------- 1 | 2 | Feature Extraction Example 3 | -------------------------- 4 | 5 | In this example we will extract the Histogram of Gradients (HoG), 6 | Normalized Difference Vegetation Index (NDVI) and the Pantex features 7 | from a test satelite image. 8 | 9 | - The HoG feature captures the distribution of structure orientations. 10 | - The NDVI feature captures the level of vegetation. 11 | - The Pantex feature captures the level of built-up structures. 12 | 13 | The image will be split into blocks, in this example 20 by 20 pixels, 14 | and each feature is calculated for this block using a certain amount of 15 | context information called a window. A feature can be calculated on 16 | multiple windows to allow for context at different scales. 17 | 18 | In this example 19 | ~~~~~~~~~~~~~~~ 20 | 21 | - First we will define the Features we would like to extract and with 22 | which window shapes. 23 | - We will then load the image using the ``Image`` class. 24 | - Then we will split the image into blocks using the ``FullGenerator`` 25 | Class. 26 | - Then we will extract the features using the ``extract_features`` 27 | function. 28 | 29 | Live iPython Notebook 30 | ^^^^^^^^^^^^^^^^^^^^^ 31 | 32 | If you are reading this example on readthedocs.io a notebook of this 33 | example is available `in the 34 | repository `__ 35 | 36 | .. code:: ipython3 37 | 38 | # General imports 39 | import numpy as np 40 | 41 | import matplotlib.pyplot as plt 42 | import matplotlib.gridspec as gridspec 43 | %matplotlib inline 44 | 45 | # Satsense imports 46 | from satsense import Image 47 | from satsense.generators import FullGenerator 48 | from satsense.extract import extract_features 49 | from satsense.features import NirNDVI, HistogramOfGradients, Pantex 50 | 51 | Define the features to calculate 52 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 53 | 54 | First we define a list of windows for each of the features to use. 55 | 56 | Hog and Pantex will be calculated on 2 windows of 25x25 pixels and 23x27 57 | pixels. NDVI will be calculated on one window with 37x37 pixels. 58 | 59 | These window shapes are chose arbitrarily to show the capabilities of 60 | satsense, for your own feature extraction you should think and 61 | experiment with these window shapes to give you the best results. 62 | 63 | N.B. The NDVI feature here is called NirNDVI because that implementation 64 | uses the near-infrared band of the image, there are several other 65 | implementations of NDVI available in satsense, see `the 66 | documentation `__ 67 | 68 | .. code:: ipython3 69 | 70 | # Multiple windows 71 | two_windows = [(25, 25), (23, 37)] 72 | # Single window 73 | one_window = [(37, 37),] 74 | features = [ 75 | HistogramOfGradients(two_windows), 76 | NirNDVI(one_window), 77 | Pantex(two_windows), 78 | ] 79 | 80 | Load the image 81 | ~~~~~~~~~~~~~~ 82 | 83 | Here we load the image and normalize it to values between 0 and 1. 84 | Normalization by default is performed per band using the 2nd and 98th 85 | percentiles. 86 | 87 | The image class can provide the individual bands, or a number of useful 88 | derivatives such as the RGB image or Grayscale, we call these base 89 | images. More advanced base images are also available, for instance Canny 90 | Edge 91 | 92 | .. code:: ipython3 93 | 94 | image = Image('../../test/data/source/section_2_sentinel.tif', 95 | 'quickbird') 96 | image.precompute_normalization() 97 | 98 | fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 8), sharey=True) 99 | 100 | ax1.axis('off') 101 | ax1.imshow(image['rgb']) 102 | ax1.set_title('RGB image') 103 | 104 | ax2.axis('off') 105 | ax2.imshow(image['grayscale'], cmap="gray") 106 | ax2.set_title('Grayscale image') 107 | 108 | ax3.axis('off') 109 | ax3.imshow(image['canny_edge'], cmap="gray") 110 | ax3.set_title('Canny Edge Image') 111 | 112 | plt.show() 113 | 114 | 115 | 116 | .. image:: feature_extraction_files/feature_extraction_5_0.png 117 | 118 | 119 | Generator 120 | ~~~~~~~~~ 121 | 122 | Next we create a FullGenerator which creates patches of the image in 123 | steps of 20x20 pixels. 124 | 125 | In this cell we also show the images, therefore we load the rgb base 126 | image into the generator. This is only needed here so we can show the 127 | blocks using matplotlib. In the next section we will be using the 128 | ``extract_features`` function to extract features, which will be loading 129 | the correct base images for you based on the features that will be 130 | calculated. 131 | 132 | The patch sizes are determined by the list of window shapes that you 133 | supply the ``load_image`` function. This is normally also provided by 134 | the ``extract_features`` function. 135 | 136 | .. code:: ipython3 137 | 138 | generator = FullGenerator(image, (20, 20)) 139 | print("The generator is {} by {}".format(*generator.shape), " blocks") 140 | 141 | # Create a gridspec to show the images 142 | gs = gridspec.GridSpec(*generator.shape) 143 | gs.update(wspace=0.05, hspace=0.05) 144 | 145 | # Load a baseimage into the generator. 146 | # The window is the same as the block size to show the blocks used 147 | generator.load_image('rgb', ((20, 20),)) 148 | 149 | fig = plt.figure(figsize=(8, 8)) 150 | for i, img in enumerate(generator): 151 | ax = plt.subplot(gs[i]) 152 | ax.imshow(img.filled(0.5)) 153 | ax.axis('off') 154 | 155 | 156 | .. parsed-literal:: 157 | 158 | The generator is 8 by 8 blocks 159 | 160 | 161 | 162 | .. image:: feature_extraction_files/feature_extraction_7_1.png 163 | 164 | 165 | Calculate all the features and append them to a vector 166 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 167 | 168 | In this cell we use the ``extract_features`` function from satsense to 169 | extract all features. 170 | 171 | ``extract_features`` returns a python generator that we can loop over. 172 | Each invocation of this generator returns the feature vector for one 173 | feature in the order of the features list. The shape of this vector is 174 | (x, y, w, v) where: 175 | 176 | - x is the number of blocks of the generator in the x direction 177 | - y is the number of blocks of the generator in the y direction 178 | - w is the number of windows the feature is calculated on 179 | - v is the length of the feature per window 180 | 181 | We use a little numpy reshaping to merge these feature vectors into a 182 | single feature vector of shape (x, y, n) where n is the total length of 183 | all features over all windows. In this example it will be (8, 8, 13) 184 | because: 185 | 186 | - HoG has 5 numbers per window and 2 windows: 10 187 | - NirNDVI has 1 number per window and 1 window: 1 188 | - Pantex has 1 number per window and2 windows: 2 189 | - Total: 13 190 | 191 | .. code:: ipython3 192 | 193 | vector = [] 194 | for feature_vector in extract_features(features, generator): 195 | # The shape returned is (x, y, w, v) 196 | # Reshape the resulting vector so it is (x, y, w * v) 197 | # e.g. flattened along the windows and features 198 | data = feature_vector.vector.reshape( 199 | *feature_vector.vector.shape[0:2], -1) 200 | vector.append(data) 201 | # dstack reshapes the vector into and (x, y, n) 202 | # where n is the total length of all features 203 | featureset = np.dstack(vector) 204 | 205 | print("Feature set has shape:", featureset.shape) 206 | 207 | 208 | .. parsed-literal:: 209 | 210 | Feature set has shape: (8, 8, 13) 211 | 212 | 213 | Showing the resulting features 214 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 215 | 216 | Below we show the results for the calculated features. 217 | 218 | In the result images you can see the edges of the feature vector have 219 | been masked as the windows at the edge of the original image contain 220 | masked values. Furthermore, please keep in mind that the value for the 221 | feature in each block depends on an area around the block. 222 | 223 | HoG 224 | ^^^ 225 | 226 | Here is the result of the HoG feature, we display the first value for 227 | each window. 228 | 229 | Histogram of Gradients is a feature that first calculates a histogram of 230 | the gradient orientations in the window. Using this histogram 5 values 231 | are calculated. This first value is the 1st heaved central shift moment. 232 | Heaved central shift moments are a measure of spikiness of a histogram. 233 | 234 | The other values are: the 2nd heaved central shift moment, the 235 | orientation of the highest and second highest peaks and the sine of the 236 | absolute difference between the highest and second highest peak (this is 237 | 1 for right angles). 238 | 239 | .. code:: ipython3 240 | 241 | fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 8)) 242 | 243 | ax1.axis('off') 244 | ax1.imshow(image['rgb']) 245 | ax1.set_title('Input image') 246 | 247 | ax2.axis('off') 248 | ax2.imshow(featureset[:, :, 0], cmap="PRGn") 249 | ax2.set_title('Hog Feature for window {}'.format(two_windows[0])) 250 | 251 | ax3.axis('off') 252 | ax3.imshow(featureset[:, :, 5], cmap="PRGn") 253 | ax3.set_title('Hog Feature for window {}'.format(two_windows[1])) 254 | 255 | plt.show() 256 | 257 | 258 | 259 | .. image:: feature_extraction_files/feature_extraction_11_0.png 260 | 261 | 262 | Normalized Difference Vegetation Index 263 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 264 | 265 | Here we show the result for the NDVI feature The NDVI feature captures 266 | the level of vegetation, purple means very little vegetation, green 267 | means a lot of vegetation. 268 | 269 | .. code:: ipython3 270 | 271 | fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8)) 272 | 273 | ax1.axis('off') 274 | ax1.imshow(image['rgb']) 275 | ax1.set_title('Input image') 276 | 277 | ax2.axis('off') 278 | ax2.imshow(featureset[:, :, 10], cmap="PRGn") 279 | ax2.set_title('NirNDVI for window {}'.format(one_window[0])) 280 | 281 | plt.show() 282 | 283 | 284 | 285 | .. image:: feature_extraction_files/feature_extraction_13_0.png 286 | 287 | 288 | Pantex 289 | ^^^^^^ 290 | 291 | Here we show the results for the Pantex feature. The Pantex feature 292 | captures the level of built-up structures, purple means very little 293 | built-up while green means very built-up. 294 | 295 | .. code:: ipython3 296 | 297 | fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 8)) 298 | 299 | ax1.axis('off') 300 | ax1.imshow(image['rgb']) 301 | ax1.set_title('Input image') 302 | 303 | ax2.axis('off') 304 | ax2.imshow(featureset[:, :, 11], cmap="PRGn") 305 | ax2.set_title('Pantex for window {}'.format(two_windows[0])) 306 | 307 | ax3.axis('off') 308 | ax3.imshow(featureset[:, :, 12], cmap="PRGn") 309 | ax3.set_title('Pantex for window {}'.format(two_windows[1])) 310 | 311 | plt.show() 312 | 313 | 314 | 315 | .. image:: feature_extraction_files/feature_extraction_15_0.png 316 | 317 | 318 | -------------------------------------------------------------------------------- /doc/notebooks/feature_extraction_files/feature_extraction_11_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/doc/notebooks/feature_extraction_files/feature_extraction_11_0.png -------------------------------------------------------------------------------- /doc/notebooks/feature_extraction_files/feature_extraction_13_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/doc/notebooks/feature_extraction_files/feature_extraction_13_0.png -------------------------------------------------------------------------------- /doc/notebooks/feature_extraction_files/feature_extraction_15_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/doc/notebooks/feature_extraction_files/feature_extraction_15_0.png -------------------------------------------------------------------------------- /doc/notebooks/feature_extraction_files/feature_extraction_5_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/doc/notebooks/feature_extraction_files/feature_extraction_5_0.png -------------------------------------------------------------------------------- /doc/notebooks/feature_extraction_files/feature_extraction_7_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/doc/notebooks/feature_extraction_files/feature_extraction_7_1.png -------------------------------------------------------------------------------- /doc/notebooks/index.rst: -------------------------------------------------------------------------------- 1 | .. _notebooks: 2 | 3 | Demonstration Jupyter notebooks 4 | =============================== 5 | 6 | There are a number of demonstration `Jupyter Notebooks `_ 7 | available to help you get started with satsense. They can be found in the 8 | `notebooks folder of our github repository `_. 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | :caption: Contents: 13 | 14 | feature_extraction 15 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | ipython 2 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: satsense 4 | channels: 5 | - ioos 6 | 7 | dependencies: 8 | - affine 9 | - descartes 10 | - fiona 11 | - netcdf4 12 | - numpy 13 | - opencv<3.4.3 14 | - python>=3.6 15 | - rasterio 16 | - scikit-image>=0.14.2 17 | - scikit-learn 18 | - scipy 19 | - shapely 20 | # Notebooks 21 | - jupyter 22 | - matplotlib 23 | - nb_conda 24 | -------------------------------------------------------------------------------- /notebooks/Classification/Classification_Example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "# Define training and test data" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "from pathlib import Path\n", 27 | "\n", 28 | "sampling_step_size = 10, 10\n", 29 | "\n", 30 | "windows = (\n", 31 | " (50, 50),\n", 32 | " (100, 100),\n", 33 | " (200, 200),\n", 34 | ")\n", 35 | "\n", 36 | "home = Path.home()\n", 37 | "data = home / 'DynaSlum' / 'Work'\n", 38 | "\n", 39 | "train_files = (\n", 40 | " data / 'section_1_multiband.tif',\n", 41 | " data / 'section_2_multiband.tif',\n", 42 | ")\n", 43 | "\n", 44 | "test_files = (\n", 45 | " data / 'section_3_multiband.tif',\n", 46 | ")\n", 47 | "\n", 48 | "ground_truth_file = data / 'slum_approved.shp'\n", 49 | "\n", 50 | "# Path where temporary files are saved\n", 51 | "work = home / 'satsense_notebook'" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": {}, 57 | "source": [ 58 | "# Define the set of features for classification" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "from satsense.features import (NirNDVI, HistogramOfGradients, Pantex, Sift,\n", 68 | " Lacunarity, Texton)\n", 69 | "from satsense import Image\n", 70 | "\n", 71 | "train_images = [Image(file, 'worldview3') for file in train_files]\n", 72 | "\n", 73 | "ndvi = NirNDVI(windows)\n", 74 | "hog = HistogramOfGradients(windows)\n", 75 | "pantex = Pantex(windows)\n", 76 | "lacunarity = Lacunarity(windows)\n", 77 | "sift = Sift.from_images(windows, train_images)\n", 78 | "texton = Texton.from_images(windows, train_images)\n", 79 | "\n", 80 | "features = [\n", 81 | " ndvi,\n", 82 | " hog,\n", 83 | " pantex,\n", 84 | " lacunarity,\n", 85 | " sift,\n", 86 | " texton,\n", 87 | "]\n" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "# Compute and save features" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "import os\n", 104 | "from pathlib import Path\n", 105 | "\n", 106 | "from satsense import extract_features\n", 107 | "from satsense.generators import FullGenerator\n", 108 | "\n", 109 | "def compute_features(filenames):\n", 110 | " paths = []\n", 111 | " for filename in filenames:\n", 112 | " image = Image(filename, 'worldview3')\n", 113 | " path = str(work / Path(filename).stem) + os.sep\n", 114 | " paths.append(path) \n", 115 | " if not os.path.exists(path):\n", 116 | " os.makedirs(path)\n", 117 | " generator = FullGenerator(image, sampling_step_size)\n", 118 | " for feature_vector in extract_features(features, generator):\n", 119 | " feature_vector.save(path)\n", 120 | " return paths\n", 121 | " \n", 122 | "train_data_paths = compute_features(train_files)" 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "metadata": {}, 128 | "source": [ 129 | "# Load training data" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "import numpy as np\n", 139 | "\n", 140 | "from satsense import Image, FeatureVector\n", 141 | "from satsense.util.mask import get_ndxi_mask, load_mask_from_shapefile, resample, save_mask2file\n", 142 | "from satsense.features import NirNDVI, WVSI\n", 143 | "from satsense.generators import FullGenerator\n", 144 | "\n", 145 | "def load_feature_vector(features, path):\n", 146 | " \"\"\"Load feature values from file.\"\"\"\n", 147 | " feature_vector = []\n", 148 | " for feature in features:\n", 149 | " vector = FeatureVector.from_file(feature, path).vector\n", 150 | " # flatten window/feature_size dimensions\n", 151 | " vector.shape = (vector.shape[0], vector.shape[1], -1)\n", 152 | " feature_vector.append(vector)\n", 153 | " feature_vector = np.ma.dstack(feature_vector)\n", 154 | " return feature_vector\n", 155 | "\n", 156 | "def load_ground_truth(filename, sampling_step_size, path, shape, crs, transform):\n", 157 | " ground_truth = load_mask_from_shapefile(filename, shape, transform)\n", 158 | " mask_file = path / 'ground_truth_mask.tif'\n", 159 | " ground_truth_mask = save_mask2file(ground_truth, mask_file, crs, transform)\n", 160 | " ground_truth_image = Image(mask_file, 'monochrome', normalization_parameters=False)\n", 161 | " ground_truth = resample(FullGenerator(ground_truth_image, sampling_step_size))\n", 162 | " return ground_truth\n", 163 | "\n", 164 | "labels = {\n", 165 | " 'other': 0,\n", 166 | " 'deprived_neighbourhood': 1,\n", 167 | " 'vegetation': 2,\n", 168 | "}\n", 169 | "\n", 170 | "x_train = []\n", 171 | "y_train = []\n", 172 | "\n", 173 | "for path, image in zip(train_data_paths, train_images):\n", 174 | " print(\"Processing\", image.filename)\n", 175 | " # Load feature vector\n", 176 | " feature_vector = load_feature_vector(features, path)\n", 177 | " \n", 178 | " label_vector = np.zeros(feature_vector.shape[:2], dtype=np.uint8)\n", 179 | "\n", 180 | " # Create deprived neighbourhood labels\n", 181 | " ground_truth = load_ground_truth(\n", 182 | " ground_truth_file, sampling_step_size, path, image.shape, image.crs, image.transform)\n", 183 | " label_vector[ground_truth] = labels['deprived_neighbourhood']\n", 184 | "\n", 185 | " # Create vegetation labels\n", 186 | " generator = FullGenerator(image, sampling_step_size)\n", 187 | " vegetation_mask = get_ndxi_mask(generator, NirNDVI)\n", 188 | " label_vector[vegetation_mask] = labels['vegetation']\n", 189 | "\n", 190 | " # Create x_train and y_train\n", 191 | " feature_vector.shape = (-1, feature_vector.shape[2])\n", 192 | " label_vector.shape = (-1, )\n", 193 | "\n", 194 | " x_train.append(feature_vector)\n", 195 | " y_train.append(label_vector)\n", 196 | " \n", 197 | "x_train = np.concatenate(x_train)\n", 198 | "y_train = np.concatenate(y_train)" 199 | ] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "# Train a classifier" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": null, 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "from sklearn.ensemble import GradientBoostingClassifier\n", 215 | "\n", 216 | "classifier = GradientBoostingClassifier(verbose=True)\n", 217 | " \n", 218 | "classifier.fit(x_train, y_train)" 219 | ] 220 | }, 221 | { 222 | "cell_type": "markdown", 223 | "metadata": {}, 224 | "source": [ 225 | "# Load test data and assess performance" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": null, 231 | "metadata": {}, 232 | "outputs": [], 233 | "source": [ 234 | "from sklearn.metrics import classification_report, matthews_corrcoef, confusion_matrix\n", 235 | "\n", 236 | "test_data_paths = compute_features(test_files)\n", 237 | "\n", 238 | "test_images = [Image(f, 'worldview3') for f in test_files]\n", 239 | "\n", 240 | "for path, image in zip(test_data_paths, test_images):\n", 241 | " print('Performance on', image.filename)\n", 242 | " # Create x_test\n", 243 | " x_test = load_feature_vector(features, path)\n", 244 | " shape = x_test.shape\n", 245 | " x_test.shape = (-1, shape[2])\n", 246 | " \n", 247 | " # Predict the labels\n", 248 | " y_pred = classifier.predict(x_test)\n", 249 | " \n", 250 | " # Create y_test\n", 251 | " y_test = np.zeros(shape[:2], dtype=np.uint8)\n", 252 | " \n", 253 | " # Create deprived neighbourhood labels \n", 254 | " ground_truth = load_ground_truth(\n", 255 | " ground_truth_file, sampling_step_size, path, image.shape, image.crs, image.transform)\n", 256 | " y_test[ground_truth] = labels['deprived_neighbourhood']\n", 257 | "\n", 258 | " # Create vegetation labels\n", 259 | " generator = FullGenerator(image, sampling_step_size)\n", 260 | " vegetation_mask = get_ndxi_mask(generator, NirNDVI)\n", 261 | " y_test[vegetation_mask] = labels['vegetation']\n", 262 | " y_test.shape = (-1, )\n", 263 | " \n", 264 | " # Assess performance\n", 265 | "\n", 266 | " # Label the vegetation as buildings to create more accurate representation of the performance\n", 267 | " # y_pred[y_pred == labels['vegetation']] = labels['other']\n", 268 | " # y_test[y_test == labels['vegetation']] = labels['other']\n", 269 | "\n", 270 | " print(matthews_corrcoef(y_test, y_pred))\n", 271 | " print(classification_report(y_test, y_pred, labels=list(labels.values()), target_names=list(labels.keys())))\n", 272 | " print(confusion_matrix(y_test, y_pred))\n" 273 | ] 274 | } 275 | ], 276 | "metadata": { 277 | "kernelspec": { 278 | "display_name": "Python 3", 279 | "language": "python", 280 | "name": "python3" 281 | }, 282 | "language_info": { 283 | "codemirror_mode": { 284 | "name": "ipython", 285 | "version": 3 286 | }, 287 | "file_extension": ".py", 288 | "mimetype": "text/x-python", 289 | "name": "python", 290 | "nbconvert_exporter": "python", 291 | "pygments_lexer": "ipython3", 292 | "version": "3.6.7" 293 | } 294 | }, 295 | "nbformat": 4, 296 | "nbformat_minor": 2 297 | } 298 | -------------------------------------------------------------------------------- /notebooks/Performance/JaccardIndex_Multipolygons.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# Jaccard index between two multi-polygons \n", 10 | "## Simple example " 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "This notebook illustrates the computation of Jaccard similarity index between two simple multi-polygons using `shapely` and `satsense` python libraries." 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | " ### Creating two simple multi-polygons " 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 1, 30 | "metadata": {}, 31 | "outputs": [ 32 | { 33 | "name": "stdout", 34 | "output_type": "stream", 35 | "text": [ 36 | "Multi-polygon 1 valid? True\n", 37 | "Multi-polygon 2 valid? True\n" 38 | ] 39 | } 40 | ], 41 | "source": [ 42 | "# Python, shapely and satsense package imports\n", 43 | "from matplotlib import pyplot\n", 44 | "from shapely.geometry import MultiPolygon\n", 45 | "from satsense.util.shapefile import show_multipolygon as shmp # visualization of multipolygons\n", 46 | "\n", 47 | "# define the nodes of valid multi-polygons\n", 48 | "a = [(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]\n", 49 | "b = [(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)]\n", 50 | "c = [(2,3), (4,3), (4,4), (2,4), (2,3)]\n", 51 | "\n", 52 | "multi1 = MultiPolygon([[a, []], [b, []] , [c, []]])\n", 53 | "\n", 54 | "d = [(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]\n", 55 | "e = [(3, 3), (3, 4), (4, 4), (4, 3), (3, 3)]\n", 56 | "\n", 57 | "multi2 = MultiPolygon([[d, []], [e, []]])\n", 58 | "\n", 59 | "print(\"Multi-polygon 1 valid?\", multi1.is_valid)\n", 60 | "print(\"Multi-polygon 2 valid?\", multi2.is_valid)" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "### Visualizing the multi-polygons" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 2, 73 | "metadata": {}, 74 | "outputs": [ 75 | { 76 | "data": { 77 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcsAAADHCAYAAAB2t3iIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAN1wAADdcBQiibeAAAIABJREFUeJzt3Xt8XHd95//XZ66S7JGcGwYHCEqlUKt0SbmnchaKKW1I\nty1ZCgnpo0CXbllKt2Qp7S9dCoaypQvL0ktoH9sAzfYBOOWSwrIhNWlat7EaLwmXQitDPMSJkzix\nHSeWRiNpZjTz/f3xPZLPjEY6c6SRZsZ+Px8PP+Rz5pzv+c58P3M+c77n8jXnHCIiIrKyRKcrICIi\n0u2ULEVERCIoWYqIiERQshQREYmgZCkiIhJByVJERCSCkqWIiEgEJUsREZEISpYiIj3EzJyZvaPT\n9WgHM9tvZl8ITb/azN7ZZLlbzOy+za1dvVQnNy4iIhLyauB1wB82zP89oH/zq3OGkqWISA8ws37n\n3Fyn69EJzrkfdLoO6oYVEWmRmb3ezL5rZiUze9jM/puZpYLXhoMu0qsb1kma2eNm9sHQvOeZ2e1m\nVgj+fd7Mnh56/RVBWT9lZv/HzGaAm1ao09VmdqeZnTCzaTM7aGavDr0+FpT1iob1tprZjJn9xirv\n90Ez+x9m9v+Z2WNmNmVmHzXvNWb2r0H9v2Rm54XWe3Owza3NylthW3uAdwGXBOs6M7sleE3dsCIi\nvSBIQH8F/CXwbuDf4LsHLwDe5pw7YmZfB14P3B5a9eXAduDWoJwRYAK4D/hF/H7494CvmNlLXP3o\nFp8E/gLfLTm/QtWGga8CHwWqwFXAHWb2b51zE865STM7CLwZ2B9a7xeANPDpiLd+LfB14C3AC4EP\n4g+0/i3wu/ju0ZuADwFviyhrNZ8ARoFXAq8N5p1cR3ltpWQpItKaDwD7nXNvCqb/xswAPmRmH3TO\nPYJPiO8zs6xzrhQs9wbgX51z/xJMvw94HLjKOVcGMLPvAN8DXkN9ov28c+53V6uUc27piNPMEsDf\nAz8C/Ad8UgafdP/QzN7hnJsJ5r0F+Ipz7lTE+54HfsE5Vw3e888Bvw6MOueOBNt9PvAm1pEsnXOP\nmNljQMk5d3Ct5WwUdcOKiEQwsyTwAuDzDS/9FX4/ekUw/TlgEPjpYL0UcE2w3KJXAX8N1MwsFSxz\nBHgQeFFD+bcTwcyeaWb/28weBRaACv5Cmcsa6gn+aBIz+yFgF/6oNcr+IFEuygMPLibK0LyLzCzT\nQnnrtvi5hT6/DadkKSIS7UJ8l+XxhvmL0+cDOOceBQ7gjyYBdgfr3tpQ1m/jk1r436XAs1Yov6ng\nSPL/AD8OvBf4CeDFwB1A3+JyzrkCPpG/JZj1ZvzR7d+sVn7gdMN0eYV5BmxKsmT5Z7fh1A0rIhLt\nCfxO+WkN87cHf58Mzfsr4A/MrB+fNL/lnDscev1J/JHlJ1bYTphrskzYCPBj+C7dpcQXbLvRJ4AD\nZjYK/BLwlw1HjO20eH61MXme17jgGr24TeW0TMlSRCSCc65qZt/Ad2P+Weil1wM14J7QvM8Df4S/\nSOW1+Atfwu7Cn1P8RsPFPGuxmBQXz49iZpcA48B3Gt7DP5nZ94FPAc8GblnntlfzSPB3J8F5UzN7\nKb6LejVlQkfEK3HObfqVseqGFRFpzfuAnzCzvwhu6fhN/FWsNwcX9wDgnDuBv+r0fwDb8N2fYXuA\nHwVuN7PXBbeJXB/cHvGKmHX6Hj4xfTS4heRa4GvAoyss/0n8ucp7nHPfi7mtOL4e1OGPg1tMfhG4\nGZiOWO97wPbg1pMXmdlzNrCOsShZioi0wDn3NfxtFC8CvgK8E3+7RrNHz90KPAM46Jx7sKGc+4GX\nAbPAn+PPL74ff3SYj1mnEv4CogXgC/jk/SHgH1ZY5UvB30/F2U5cwVW+r8UfdX8Bf//kfwKeilj1\nc/gj3g8D9+J/WHQFW38vgIiI9AIzezs+Ee1wzkUd5UmIzlmKiJzlgu7My4DfAW5RooxPR5YiIme5\n4LFxb8R3z77eORfVHSoNlCxFREQi6AIfERGRCGdlsjSzPWb2RGj6smDetoblmj4Zf4PrVjfYaa8w\ns4yZfcTM7jazOTM7Z7okFE/tZ2YvDh7RdiSIp++b2fvMLPIeu25mZ9HAzGHBbRy3BO1UWxwNpNPM\nj96ybESVhmWea2Z/Zmb3m9msmT1gZn/U+P2NclYmyyYuw98j1fjh3I5/puPspteo9wwAb8V/Vv/U\n4bp0muJp/d6AHy3j9/GjZHwc+C/AZzpZKVnROP7+zHvxj8nrJT+J/17ehH9Q/QfxD5f4WvC4wJac\n01fDOudO0kVDwHQz59xpMzvfObf4y/mVna5Tt1E8xfIHzrnwo932m9k88L/M7BLn3EOdqpg09SfO\nuT8C6PS4kmuwF/h46GlJ+83sEWAfcCUr35NapyNHlosDeQZPnJgMDo1vN7PzzWzEzP7ezIrBMv8m\ntN5zgkPun2lW3grbegX+BmKAI8H6DwavtdRtFqrvz5vZ98xs3swOmNlYw3IDZvbH5gd6nTezey00\nCGuTclselNXM3mF+sNmi+YFWdzeu28r2F7vtzOyNZpY3P1jsHWb2zNU+A4A2PJprQyielpbvmXhq\nSJSLvhX83bHauu1mZwY4/t3gvc6Y2WfMbKhhueHgs5o2P+DxV8yPTblSuW8PymocAHlxYOfnB9NZ\n892Ep83slPnTHe+0hlMdrWw/KPc3zOz3zeyk+QGhP25m2dAy28zsE2Z2LGjXo2Z282qfkXOuFv1J\nNv0MftjMbg1ibdb8YNHvtNARXejzeIX5QbBnzHeVvr1JeW8Pxe1X8A9+WJVz7lSTfVfsWOtkN+yz\n8ePDvQf4j/in5v85/skXtwKvwx/53mrmB41bo28Cvxn8/xr84fhrV158RZcA/xP/hIw3AkPAPqs/\nx3Iz/qn+/y3YxsP4R1rtalagc24SWByUNaxuUFYzey3wJ/jRBV6Lf+bjJ5sU2er2X4p/6si78J/9\nC/CffS9TPPV+PF2Bf+LLD9aw7npdhx8661fw3cFXE3rQeZBs7sI/6/RX8J/xMPAPZnb+CmV+Fkji\nYy/sLcA3nXP/HEx/OCjv/cD1+Fh+V3iFmNt/Fz4J/CLwEeBXgd8Ivf4/8V2qNwA/hb/3cqN+CF8M\nHMbHx2vwMfV+/KgrjW4G/hkfa/uBj5vZSxZfND+O5seB/4v/7n2XtT+JaHFItftbXsM5t+n/8I8z\nWgB+KDTvw/gG+6XQvNcE83YG088Jpn+mSXn3hab3AE+Epn8mWO85Deu9OZi/tYX6OuDHQ/MuCd7D\n24Lpnfgv+ptCyySAfwH2hebtB74Qmn4rMBOuA/CPDcvcC9zeUKc/Der0ijVsfwo4LzTvnUFZ/S22\n3zsIDjS74Z/iqbfjKVjn6cAJ/A3zmx0/D+JHAgl/ZtcH738xVt4WtM+loWWeiX/w942heQ54R2j6\n08A/hKa3Bu3zjmD6AmAOeHdoGQP+Nfwdi7n9f2x4f1/CP3ZvcfpfgF9fx+d131raKXhfKXxyfiA0\n/xVBvT8QmpfGn9L4g9C8rwN3NJR5czhuW6zHAHAIP05ny/Xv5JHlg8658C/IxWci/l2TeRdvRoXM\nLGn1g4qGj0BOOOeWLmxx/pzKN4DFXz4vxgfD50PL1ILppkcCgVUHZTU/sOmP4Y8Cwhqn42z/Xld/\nU/Jk8HdTPucNonjyei6ezA8Y/Dl8ErmhlXU2wJ3OuZnQ9F/j3//iUFAvwR8NPrC4gPMPT59g9fb4\nJHClmV0aTL8enzA+G0z/KH6UjaXP3/k9+leoF2f7X2uYnsQn1kXfBt4ddGlexgYysz4ze7+Z5fHP\nvq3geyqGbfmgzUv1ds5V8EekzwzKSeF7LL7csM5tMetj+DZ5GvDLcdbtZLJsNnho4/zFeZt1Ofld\n1A8o+vLQayeaLH+CM33mzwBmnHONV0IeBwbC5wzCXPSgrBfiu3IaLxxpnI6z/ZU++16+bF/xRO/F\nU7Dz+kv8kFWvcZ17skxdewTve4b69mg2EPNxgoGfV7AfeIAzXeNvAb7snFsc//Lpwd9W2qPV7Tdr\nj3BbvAN/tPle4Ptmdtj8aCUb4b/jT1v8Ob5n58X4q1FheXysVu/FuG383jT7HkXV57XAz4d/eLSi\n166G3egBRX8VyIWmvx/6f+Ogr4vz/jX4/2PAVjMbaNjBbAdmnR8dYCWrDcr6BFAFLmpYp3F6Pds/\nVymezuhUPP0h8HPAT7qNHTIqSl17mNkAvsv0sWDWY/iE3mg79QM/13HOOTP7FPAfzezT+KPAq0KL\nLN6GcVFDOc3aI/b2V6jTaeA/A//Z/AVvvwV8xsy+4/x573b6BfyVtB9enGFmV6+hnMW4bfzeNPse\nNWVmN+AT97XOubvjVqDX7rM8gf+FvnNxRnCl2Y9HrNfSL13n3Pedc/eF/hVCLz/NzJa2Y2bPxncL\nfD2YdS++7/x1oWUsmD4Qsd1/wu9Ilw3K6pxbwF+59XMNq/1sw/Sat38OUzydsenxZGY34o9yftE5\n1+kY/Umrv2r1tfj3v3hV9P8DXmhmw4sLmNnF+FiJqvst+O7ET+LHeLwz9Np38T/altoj+Jz/XUMZ\n69n+ipxz3wHejc8FP7zWclbRT/3A1En8MGexrBK317Syvpldjx9O7b845xrHF21JTx1ZOudqZvZl\n4AYzewh/2P4u/Any1Sz+ov9VM7sV/8v4uzE3/wTwaTN7T7C99xNckBDU7ZCZ7QVuMrMc/oq+X8EH\n4H9qofxP4q9cazYo64eAL5rZTfhzG+P4q/XAX4TQju1HMrOrgC3A5cH04o70XteD98UpnjoXT2b2\nRvwDCW4BHjWzl4Ve/oHz96xupjn8lb4fwXd5fgT469CR1i34KzjvMLP34o9y3odvx/+1WsHOuWNm\n9jf4z/hDoaN8nHOnzN+28X4zq+AvPHkLMEj9Fapr3n4jMzuAPyf7L8E2fgUocuaHWrN1LuLMaYTz\ngEsWv//OudWeIHUn8GvBOcsngV8Dmp5CaMHvA7eZ2Z8F9X858NNRK5nZy/Hn7L8GHGyItUdcaODu\nVcW9oqkd/2i42jCY92YariSkydWK+G6HL+NH3H4If6l6XXk0XL0YzHtXsPwC/mKQpttcrb74XzH3\n438pTQDPa1huAH9J/vFgmfuAn2pYZj+hKxND80eCurx1hTr8On5E9Fngq/juDQdcvt7tc+ZqtOdF\nfA4PBss1/ntzJ+JI8dS78cSZK4I7Hk9BXH80aOfj+MSxF9jWsNyl+HN9Bfz5zP8LjDYsU3c1bGj+\nW4PXRpu81gf8Gf6q4qeAPw7qcrod22+MX/wPge8G5ZwG/h64MuIzWmzTZf8i1tuOT2zTwWf7YXxy\nXvqerBQvK8TWOxri9tVEXA0bvP+VYm1Pq3GiUUdaYP45iM9zzr1oA7cRa1DW4IjkvwLnO+eijoSk\niyieuov5h0p8wTn3m1HLrmMbnwOe4Zy7ssXl/xZIO+deHrmwbIqe6oY9G1kLg7IGXSA34n8BzuIf\n0fTbwCfPtR2brE7x1F3M7EeBF+F7EZqeqzOzn8A/2OGb+PsL3wDsJrj9R7qDkmXn7eHMoKy/u8Iy\nZfy5ol/CP+nlMeCPVllezl17UDx1k6/gb3v4U7fyub0Z4OfxP2D68PcXvnmV5aUD1A0rIiISoddu\nHREREdl0SpYiIiIRlCxFREQibNoFPtYwNpuc3Zxz6xkGa1WKpXOP4knaZa2xtKlXw+pionODrWu4\nyNYols4diidpl/XEkrphRUREIihZioiIRFCyFBERiaBkKSIiEkHJUkREJIKSpYiISAQlSxERkQhK\nliIiIhGULEVERCIoWYqIiERQshQREYmgZCkiIhJByVJERCSCkqWIiEgEJUsREZEIaxrP0sz6ge8C\nFzrntrW3Sr3NOUc+n2dycpJCoUAul2NsbIyRkZFNGZevFd1UR8XS5mt3+yueekcvtH2tVmNiYoJ8\nPk+lUiGdTjMyMsL4+DiJROeO72wtg56a2UeAFwAvbDUgzcyd7QOsOue46667OHLkSN1gsmbG8PAw\nu3fv7njC3Iw6mlnLo5ErljZXu9tf8dQ7eqHta7Uae/fupVgsLntty5YtXHfddetKmHFiqVHsrZrZ\nC4GfBv77WjZ4Nsvn88sCB3xQHTlyhHw+36GandFNdVQsbb52t7/iqXf0QttPTEw0TZQAxWKRiYmJ\n2GW2S6xuWDNLATcDv4bOdy4zOTlZFzjpuTnSpdLS9AMHDjC6ZUsnqlZXh/6nnlqarmSzVPr7AR/k\nk5OTjI6Obng9FEudsSxGU3Okk6EYPXyA0Utbj9EHDh+gPxOKp2qWyoLiqRs1tn2qVCJZqSxNHz54\nkNHzz2+5vMMHD5IpFM6UlyyRTC4sTT/4jTsZPS9eHY9PTjAYqlM50c98anBpOp/Pc+WVV8YrtE3i\nnrN8N/At59w/mtkrVlvQzPYA71tjvXpSIRQ46bk5rv7Yx+gPzTMz2LOnAzU7Y7xYrPvCzOVy3H7D\nDUsJM/weNphiqQPqYjQ1x9VXfIz+TEOMntrTcnnjP1zEPTcUT+Uct99zw1LCVDx1j3BbpEolxvfu\nZWBqamleIpmEz3ym5fKef+wYtWoVAOurse3XHyN5/plkaRh8872x6nj1M+brpsuFAb489VtLCbMS\nSqSbreVkaWYjwNuAH2tleefcHmBPaP2z/qRALpdjdnYWgHSpRH+hQCWTwQV97Kl0GgYHVytiw1XN\nWAgCzmo1+gsF0qXSUrLM5XIbXgfFUufUxWiyRH+mQKWawdVCMWqtx2jVheIpUaM/UyCdLC0lS8VT\n9wi3fbJSYWBqitLAALWUTwN9fX2wfXvL5VVLJebnfXJLbK1gT4PKTBa34GMpkUjSl4jXk1aZSeDw\nzZFIVsnkZsk8Ncc8PibT6XSs8topzpHlLmA7cH9w0jYN5MzsCeBq59z/24D69ZSxsTFOnDhRd+Tm\nEgkfjGZkcznIZDpYQ8jmcpQLBXCOxMJC3WtmxtjY2GZUQ7HUIU1jtJag5lKAkc3mwFqP0Ww2R7lc\nAByJ2gIkz7ymeOouzdq+lkpRDRLQ0Pbt0NfXcnlD27dTPHbMT6TBmVGrpXBVnyz7sgPg4u3vksl+\nyuXyiq+PjIzEKq+d4vTtfw4YAS4P/r0VKAT//1b7q9Z7RkZGGB4eXn4FmBnZTIZsNtuZioVks1my\nmQw01HHxCrZNCkbFUoesGKMY2Wz8GM1ms2SzGUDx1O1WbnvIDQ4yGLPXa3BwkNwK66TTGTJrODAY\nGBggYc3T0pYtWxgfH49dZru0fGTpnJsFZhenzeykn+0e2YiK9SIzY/fu3eTzeR44cAAzI5VOk83l\nyGazdMNdlob/YmRKJUqFAlYqcdFFF3Hprl2bdl+cYqlz6mL0cChGs0GMxmx+M8jlBslkSpRKBcyC\neBpVPHWbcNsfPniQRDJJX18fQ9u3Mzg4GLutzIyLd+xgeutWpsrHMUuQTCTJDPSTyWTWtL8zYHBo\nkNnZWarB+dBUOs3OnTt78z7LNW3oHLmXacmxY3DFFTA01PGu1xWVyzA1BffcAzt2tK3Y9dzL1GL5\n51YsbZTqMTh1BdhQrK7XFbkyuCm44B5IKp662qlTcP31/hxljK7XFfXPwzV/BzP9UE1GL98KK0Nm\nCl5wD1zYniuqN/U+SxERkXONkqWIiEgEJUsREZEISpYiIiIRlCxFREQiKFmKiIhEULIUERGJoGQp\nIiISQclSREQkgpKliIhIhLjjWUqEWq3GxMQEj957L1fPzFCpVklv3crWrVu74tmwAA4oBc+GTRaL\nTOzbt6nPhpXOWorRh+/l6pfOUFmoks4EMbqG5ncuiKdSgaQVmbhv36Y+G1Zat9j2D3/721z58MPM\nFwpsveACnv70p6+prZxzPP7448zUTvGM4gzV6QopyzIwMLDm/Z2D4Nmwc6S3znP7F7/I9rHxjj8b\nVsmyjWq1Gnv37qVYLDIQDH/lnGN+bo5yucz555/f8YTpgML0NKVymUSlQsI5Tp48ydH9+3nooYfY\nvXu3dnBnsboYzYZidD4UozGa3zkoFKYplcokrEIiHcTTo4qnbhNu+2wwBqmr1Th9+jQzMzOxf9w4\n58jn8ywsLJDcWvPzcJTLZRYqCwwODcbe3zlgemqamquRSPrn9S5UKhw6dIijR49y3XXXdSxhqhu2\njSYmJigWi01fq1WrzMzMbHKNliuVSpTKZb+XC3HOceTIEfL5fIdqJpth1RitxY9Rf0RZBhRP3W61\ntl9YWODxxx+PVd7jjz/OQsOYuItqrrY00HQcs7Oz1Fyt6WvFYpGJiYnYZbaLjizbqNmOIVGrQRBQ\nlZmZjo9AUioUSCyObF+rD0rnHJOTk4yOtucJ/9J9msZooga1IEbLM7EG7C2VCiQsiKeE4qmbNWv7\nZLAvAJg5dQrOO6/l8mZOnSIZ7EMSlQrmHInkAq7mj8Gq1SpYvBRTrc4tHVEmktWm7+HKK6+MVWa7\nKFm2USUUeJW+PhYyGVKNo35PTW1yreoli0USoaPKuVyOSmjA30Kh0IlqySapi9GFPhaqGVLJMoRH\nVXKtx2jSiiTSoXgq56hUFU/dKNz2C5kM5YEBMrOzMDd3ZqHjx1sur+/06aX/W6kGpx3prSUIjx+e\naX6UuJL01vm66XJhgHKif2k6/B42m5JlG6XT6aXGrPT18aUbbyQ9f6bxU6kU1157baeqB8DEvn2c\nPHlyabqSzVLpPxOMuVyuE9WSTVIXo9U+vnT3jaRToRhNp7j2Da3H6MR9DfFUzVJZUDx1o3DbVzMZ\n9r/pTXU/5tPpNG984xtbLu/uz362LnklT5ZJPXlmOpVO87p//+9j1fH2L36RhVCZ5UQ/86nBujp2\nipJlG42MjHDo0KGl6UpfH5XQwKo7d+5s6yDLa3Hprl0c3b+fZoPdmhljY2MdqJVslmUxWu2jUg3F\n6KU7Yw3cfOnoLo4+qnjqBY1tX81kqIZOC126cydccEHL5T3r8svrymu0c+fO2IM2bx8bX7XMkZGR\nWOW1ky7waaPx8XG2bNnS9LUtW7YwPj6+yTVabmRkhOHh4WVXvZkZw8PDHQ1G2XjtjlHFU+9od9tv\nxP6um/eh1uwX4YZsyMxt1rY6afE+pnw+T6VSIZ1OMzIy0vF7hMIWL/menJykUCiQy+UYGxtr231x\nZoZzbsPuFzhXYmmjtDtGFU+9o91tvxH7u43ch64nlpQspe20c5N2UjxJu6wnlrrjUEdERKSLKVmK\niIhEULIUERGJoGQpIiISQclSREQkgpKliIhIBCVLERGRCEqWIiIiEZQsRUREIihZioiIRFCyFBER\nidDyEF1mlgVuAl4FXAg8CnzYOfepDarbpmj3Q6A3+qHSZ4tuiKdeaKteqGOndUMsQW/sS3phoIdu\n1fKD1M1sC/DbwP8GHgBeCtwBvME597UW1u+6hxU757jrrrs4cuRI3Xh8i8ML7d69O1ZQtru8XtXK\nw4rXE0/tiKVeaKteqONmiIqnbtg39cK+pFarsXfvXorF4rLXtmzZwnXXXXfWJ8xNeZC6c67onHuv\nc+4HzjsI/D2way0b7gb5fH5ZMIIP1CNHjpDP5zta3tms0/HUC23VC3XsBp2OJeiNfcnExETTRAlQ\nLBaZmJiIXea5pOVu2EZm1ge8BPhs+6qzuSYnJ+uCMT03R7pUWpp+4MABRlcYiLSZBw4coP+pp5am\nK9kslf5+wAf55OQko6PxRg4/V2x2PC1r+9Qc6WSo7Q8fYPTS1tt+Izxw+AD9mVA8VbNUFhRPUTqx\nb2qMp1SyRDJRWZo+fP9BRn/o/JbLO3z/QTKpwpnyymWSlTPlPfiNOxk9L14dj09OMBgqo5zoZz41\nuDSdz+e58sor4xV6DllTsjR//P8J4DBw2wrL7AHet+aabYJC4UwwpufmuPpjH6M/NM/MYM+elssb\nLxbrvjBzuRy333DDUsIMb0/OiIqnjYilurZPzXH1FR+jP9PQ9qf2tHOTsY3/cBH33FA8lXPcfs8N\nSwlT8bRcp/ZN4bZIJUuMP28vA31TS/MSySQ89ZmWy3v+JceoPbMKgNVqbHvsMZILC0uvGwbffG+s\nOl79jPm66XJhgC9P/dZSwqyEEqksFztZBsH4p8BzgVc552rNlnPO7QH2hNbrrhOWQC6XY3Z2FoB0\nqUR/oUAlk8EF/fapdBoGB1crok7VjIUg4KxWo79QIF0qLSXLXC7X5nfQ+1qJp42Ipbq2T5bozxSo\nVDO4WqjtrfW23whVF4qnRI3+TIF0srSULBVP9Tq5bwrHUzJRYaBvilJlgFrN72L7+vogub3l8qq1\nEvMln9ysUmHbCUcllV3aNyUSSfoS8Xo+KjMJHP6tJpJVMrlZMk/NMY+P83Q6Hau8c02sZBkE48fx\nJ9B3O+emIlbpamNjY5w4caLuaNAlEtRSKTAjm8tBJtNyedlcjnKhAM6RCP0KBH+kMjY21ra6nw06\nGU9N276WoOZSgJHN5sBab/uNkM3mKJcLgCNRW4DkmdcUT/U6vW9qFk+1WopqzSegoW3bwfpaLm9o\n23aKx44BkKgBlQQ1S+GcT5Z92QFw8eIzmeynXC6v+PrIyEis8s41cS99ugkYB37SOfdU1MLdbmRk\nhOHh4eVXlZmRzWTIZrOxystms2QzGWgob/EKNgXjMh2LpxXbHiObjd/2GyGbzZLNZgDFUws6um9a\nOZ4glxtkMEYPFcDg4CC5XPN10ukMmRg/4hcNDAyQsOa7/C1btjA+Ph67zHNJnPssLwHeDpSAh0JB\n8Wnn3Ns2oG4bzszYvXs3+XyeBw4cwMxIpdNkczmy2Sxxry82IDc4SKZUolQoYKUSF110EZfu2qX7\n4hp0Op7q2v5wqO2zQdt3QVOZ+R1tJlOiVCpgFsTTqOIprNOxFNRhKZ4O33+QRDJJX18fQ9u2Mzg4\nGLutzIyLL97B9PRWpo6fwCxBMpkk099PJpOJvW8Cv38aHBpkdnaWatWfD02l0+zcuVP3Wbag5fss\n172hLrzPss6xY3DFFTA0FKvrdUXlMkxNwT33wI4d6y+vh6znXqYWy29vLFWPwakrwIY63vW6IlcG\nNwUX3AP0RUMVAAAOBUlEQVRJxVOby29vPNVOwVPX+3OUMbpeVzQ/D3/3d9DfD8lk9PKtsDJkpuAF\n98CF584V1Ztyn6WIiMi5SslSREQkgpKliIhIBCVLERGRCEqWIiIiEZQsRUREIihZioiIRFCyFBER\niaBkKSIiEkHJUkREJMKaB3/uBOcc+XyeyclJCoUCuVyOsbGxdT0ns1qtcscddzB16BA/OzNDuVol\n2d/P0NDQmp6/6IBS8GzYZLHIxL59ejasyDlgcf90+P6DPP+SY1RrpTU/GxagVnM8/PBRSlNT7JiZ\nYaFcJpnJsHXr1jXtm8Dvn8rlMuVKkfRAibv/9k6e80K0f2pBzyRL5xx33XUXR44cWRoGZ3Z2lhMn\nTvDQQw+xe/fu2I1drVa55ZZbqFarDCw+G9I5KuUyTzzxBBdeeGGsoHRAYXqaUrlMolIh4RwnT57k\n6P79a66jiHS/8P4pkypQe2aV+dI8xWPHKBRmuPjiHbG++7Wa4/77v49zrq77b2FhgdOnT7Nt27bY\nCdMBxeIslUqZRLKKw3HqiVM8qP1TS3qmGzafz9clykXOOY4cOUI+n49d5h133LH09P1lnGNqKt6Q\neKVSiVK5DG2so4h0v5X2TwCFwjTT09Oxynv44aNNy1o0MzMTu47lcplKZfl4lto/taZnjiwnJyfr\ngic9N0e6VFqafuDAAUa3xBs5fOrQoaUjyv7pacw5ErUaBAM3V6tV/6T/FpUKBRKLI9vX6gdpd84x\nOTnJ6Oi584R/kXNF4/7JajWsUvEDNwNTx08wFGOM1NLU1NKRTLKyAK5+n1Itl2GlH/orKM/NLZVh\npv1TXD2TLAuFwtL/03NzXP2xj9EfmmdmsGdPrDJ/NvzrzDn6gvJcuCsixtFlslgkEfrCzOVyVEJf\nkPB7EJGzR/i7nSqX2fbYY2w74aDiU55ZAvKHWy5vR92+CTLzc5Tprx8HfG4uVh0T8yXMBUkyXaOa\nSlFNp5u+B1muZ5JlLpdjdnYWgHSpRH+hQCWTwQUDlqbSaYg5Gnm5Wl3qMl38xfW1t7+duaAcM+P6\n669vubyJffs4efLk0nQlm6USOjLN5XKx6icivSG8f0pWKiQXFqikstTM72KTyWSsXqqF8pnuUqvV\nKNPP4yMjVNO+PAMuu+yyWHU8dfRh5uZ9gk0kKvQPFFk4noHKmfcgK+uZZDk2NsaJEyfqujpcIkEt\nlQIzsrlc7EGbk/39VIKgTCwsgBlzg4PMbtsGwI4dO2IN3Hzprl0c3b+/6bkGM2NsbCxW/USkN6y0\nf3LO/5jPxBy4OZnJsBCcDgLAoJpOUQuOBAcGBqAv3sDSQ9ufRvHYMV9cgqUDDdD+qRU9c4HPyMgI\nw8PDy6/WMiObyZCNcT5g0dDQEKxw9VcymeSqq65qSx3NjOHhYUZGRmLXUUS634r7JyCdzpCJ+UN+\n69atK75mZjzrWc+OXcfBwUFyueW9b9o/taZnjizNjN27d5PP53ngwAHMjFQ6TTaXI5vNrum+IwMu\nvPBCpqamlq6KNTN27NjBVVdd5btO1ljHdt4LKiLdLfzdf/Abd2IYiUSSvuwAmUwm9v7JgG3btjEz\nM+Mv5gnmDQwM8KxnPZtEIv6+xMy4+OIdTE9vZer0cRKJIhc97SJGL3uZ9k8tsNUuT27rhsxc27Z1\n7BhccQUMDcXuel1Ruewv5rnnnlhdr7KcmeGc27BvXltjCaB6DE5dATYE1qZ4ajdXBjcFF9wDyXMr\nPnsunp44DN+8AspD4NoQT9Wqv5jnla+M3fW6IjcP1eNw3mcgcUF7yuwB64mlnumGFRER6RQlSxER\nkQhKliIiIhGULEVERCIoWYqIiERQshQREYmgZCkiIhJByVJERCSCkqWIiEgEJUsREZEISpYiIiIR\nYiVLM0ub2U1m9pSZPWlmf2Jmm/Yw9lqtxt13382tt95KYWaGJ598ksLMDOt5qqMD5kslpqanmSkW\n2bdvH4cPH246zJa0V9fE01/dSqEQxFNhhm5qeudgfj6IzxnF50o6HUsACwsLfO5zn+PWW29lbn6e\nqelppqan1xxPDiiVyxRmZigWZzl69GGmpqbW1fbOOaampjh69CiPPnaMr97xVcVTi+IeWb4H2AWM\nAT8CXAn8Trsr1UytVmPv3r0cOnRoaZw35xzzc3M8+eSTa0qYDihMT1MoFFioVHDOcfLkSfbv389d\nd92lANp43RFPlVA8zQfx1AVN7xwUCorPFnUslsAnyk996lOcPn26bn6tVuP01OnY8eSAYnGW2dlZ\nqtUqztWYm5/j2LFjPProsTW1vXOORx89xrFjx5ifn6dWrXLyhOKpVXGT5S8DH3TOPeacewz4IPAf\n2l+t5SYmJigWi01fq1WrzMzMxC6zVCpRKpdpjGTnHEeOHCGfz6+prtKy7oyn2triqd1KpRKlUhlQ\nfLagY7EEcNttt636+nRhOlZ55XKZSqXc9LVCYZrp6XjlAUxPT1NoUg/FU2ta7qYws/OAZwLfDs3+\nNvBsMxtyzk21u3JhzRoyUatBcJRZmZmJPVxXqVAgUakAYLVa3WvOOSYnJxkdHV1jjWU1XRlPiRrU\ngngqz7RneKV1KJUKJCyIz4TicyWdjiVg2RElQCJZrZ9hzZNfM+VKcWl9sxqkayQSFSw4vJk6fZyh\nwXgD3k+dPk4y4eMpkVioe03xFC1On/7i0N3hqFgMwlzo/wCY2R7gfWuuWYNKkNQAKn19LGQypMoN\nwTcV7zuRLBZJhI4q53I5KtkzAVgoFNZWWWlFy/HU7liChnha6GOhmiGVLEN4vO+N38euKmlFEulQ\nfJZzVKqKzyY6um9qNJ/cwsJchlR/w/4p03o3Z3qghAv1KFRTKfoHiriEz5aJRBGq8ZJlMnGK/uyZ\nBD47P0S1ll6aVjytLk6yXOyXGgKeCP6/Lfi77FN2zu0B9ixOm9m6OsTT6fTSDq7S18eXbryR9Pz8\n0uupVIprr702VpkT+/Zx8uTJpelKNkulv39pOpfLrafKsrqW46ndsQQN8VTt40t330g6FYqndIpr\n3xAvntpt4r6G+KxmqSwoPpvo6L6pUTm5lS8++R76qvXd/HH2T3f/7Z2ceuLU0nQ1nWbh+Jmejoue\ndhEXj70mVr3++eBXOXniTDxVa2kWQglX8bS6lpOlc+4pM3sEuBz4QTD7cuDhzejmGBkZ4dChQ0vT\nlb4+KqFRw3fu3Ak74o0gf+muXRzdv7/piW0zY2xsbO0VllV1XTxV+6hUQ/F06U5Ixoundrt0dBdH\nH1V8Rul0LAFs27atriu2nNxKObm17nUubL2L8zkvhAcb901BZ4iZMXrZyyBxQaw6jl72Mh49pnha\nq7gX+PwF8F/N7Olm9gz81WafaH+1lhsfH2fLli1NX9uyZQvj4+OxyxwZGWF4eBgzq5tvZgwPDzMy\nMrKmukrLzqp4ajfFZywdiyWAa665Zl2vN9qItlc8rY/FuVzYzNLAHwJvDGZ9GrjBObew8lpL67r1\nXppcq9WYmJggn89TqVRIp9OMjIwwPj5OIrG25ys458jn80xOTlIoFMjlcoyNjTEyMrIsqKQ1ZoZz\nLvLDW2s8tSOWYGPiqd0Un63FU6f3TeBvH7ntttvqjjC3bdvGNddcQyoV/5bPjWj7cz2eWt03NV13\ns+6taVdASvdbT0C2WL5i6RyieJJ2WU8sdcfPZxERkS6mZCkiIhJByVJERCSCkqWIiEgEJUsREZEI\nSpYiIiIRlCxFREQiKFmKiIhEULIUERGJoGQpIiISQclSREQkgpKliIhIBCVLERGRCEqWIiIiEeIP\nsrYO58J4abI5FEvSToonibJp41m2WzAGXVdHuOrYO3rhc1Ade0cvfA6qYzzqhhUREYmgZCkiIhKh\nl5Pl+ztdgRaojr2jFz4H1bF39MLnoDrG0LPnLEVERDZLLx9ZioiIbAolSxERkQhKliIiIhGULEVE\nRCIoWYqIiETouWRpZmkzu8nMnjKzJ83sT8xsUx/btxozy5rZzWZ2xMwKZvY9M/vlTtdrJWbWb2Z5\nMzvd6bp0guKpvRRPiqd26bZY6rlkCbwH2AWMAT8CXAn8TkdrVC8FPAa8ChgE3gx81Mxe3clKreID\nwEOdrkQHKZ7aS/GkeGqX7ool51xP/QMeBl4Xmn4d8FCn6xVR59uAD3S6Hk3q9ULgu8CrgdOdrk+H\nPgPFU/vqpXhSPLWrTl0XSz11ZGlm5wHPBL4dmv1t4NlmNtSZWq3OzPqAlwDf6XRdwoKuoZuBXwPK\nHa5ORyie2kfxpHhql26NpZ5KlsDW4G+4D3sq+Jvb5LpEMj/uzyeAw/hfb93k3cC3nHP/2OmKdJDi\nqX0UT4qndunKWOqaE88tmgn+DgFPBP/fFvwtbH51VhYE4p8CzwVe5ZyrdbhKS8xsBHgb8GOdrkuH\nKZ7aQPG0RPG0Tt0cSz11ZOmcewp4BLg8NPty4GHn3FTztTZfEIgfB14KvLqb6hbYBWwH7jezJ4Av\nA4Nm9oSZvbSzVds8iqe2UTyheGqTro2lnnuQupl9APgZ4DWAAV8F/to594GOVizEzD6Ob/RXOudO\ndbo+jcxsADg/NOsKfHfMjwAnnHNdc55goyme1k/xdIbiaX26OZZ6rRsW4PeAC4BDwfSngd/vXHXq\nmdklwNuBEvCQ/xEHwKedc2/rWMVCnHOzwOzitJmd9LPdI52rVccontZJ8VRH8bQO3RxLPXdkKSIi\nstl66pyliIhIJyhZioiIRFCyFBERiaBkKSIiEkHJUkREJIKSpYiISAQlSxERkQhKliIiIhGULEVE\nRCIoWYqIiET4/wFj0fzK9eCSpwAAAABJRU5ErkJggg==\n", 78 | "text/plain": [ 79 | "" 80 | ] 81 | }, 82 | "metadata": {}, 83 | "output_type": "display_data" 84 | } 85 | ], 86 | "source": [ 87 | "# Visualization parameters\n", 88 | "RED = '#FF0000'\n", 89 | "YOLK = '#FFE600'\n", 90 | "al = 0.8\n", 91 | "al_over = al - 0.2\n", 92 | "show_verticies = True\n", 93 | "\n", 94 | "extent = [-1, -1, 5, 5] # format of extent is [xmin, ymin, xmax, ymax]\n", 95 | "\n", 96 | "# Visualize the multi-polygons\n", 97 | "fig = pyplot.figure(1, dpi=90)\n", 98 | "\n", 99 | "ax = fig.add_subplot(131)\n", 100 | "shmp(multi1, ax, show_verticies, extent, RED, al, 'multi-polygon 1')\n", 101 | "ax = fig.add_subplot(132)\n", 102 | "shmp(multi2, ax, show_verticies, extent, YOLK, al, 'multi-polygon 2')\n", 103 | "ax = fig.add_subplot(133)\n", 104 | "shmp(multi1, ax, show_verticies, extent, RED, al_over, '') \n", 105 | "shmp(multi2, ax, show_verticies, extent, YOLK, al_over, 'overlay multi-\\n polygons 1 and 2') \n", 106 | " \n", 107 | "pyplot.show()" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "metadata": { 113 | "collapsed": true 114 | }, 115 | "source": [ 116 | "### Jaccard Index between the two multi-polygons" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 3, 122 | "metadata": {}, 123 | "outputs": [ 124 | { 125 | "name": "stdout", 126 | "output_type": "stream", 127 | "text": [ 128 | "The area of the intersection between the 2 multi-polygons is 3.0\n", 129 | "The area of the uinion between the 2 multi-polygons is 6.0\n", 130 | "The Jaccard index between the 2 multi-polygons is 0.5\n" 131 | ] 132 | } 133 | ], 134 | "source": [ 135 | "# Satsense package import\n", 136 | "from satsense.performance.jaccard_similarity import jaccard_index_multipolygons as jim # jaccard index computation\n", 137 | "\n", 138 | "# intersections between the multi-polygons\n", 139 | "intersec = multi1.intersection(multi2).area\n", 140 | "print(\"The area of the intersection between the 2 multi-polygons is \",intersec)\n", 141 | "# union\n", 142 | "union = multi1.union(multi2).area\n", 143 | "print(\"The area of the uinion between the 2 multi-polygons is \",union)\n", 144 | " \n", 145 | "# compute the Jaccard index (defined as intersection/union)\n", 146 | "print(\"The Jaccard index between the 2 multi-polygons is \", jim(multi1, multi2))\n" 147 | ] 148 | } 149 | ], 150 | "metadata": { 151 | "anaconda-cloud": {}, 152 | "kernelspec": { 153 | "display_name": "Python [conda env:satelite]", 154 | "language": "python", 155 | "name": "conda-env-satelite-py" 156 | }, 157 | "language_info": { 158 | "codemirror_mode": { 159 | "name": "ipython", 160 | "version": 3 161 | }, 162 | "file_extension": ".py", 163 | "mimetype": "text/x-python", 164 | "name": "python", 165 | "nbconvert_exporter": "python", 166 | "pygments_lexer": "ipython3", 167 | "version": "3.5.3" 168 | } 169 | }, 170 | "nbformat": 4, 171 | "nbformat_minor": 2 172 | } 173 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # Notebooks accompanying the satsense Python package 2 | 3 | ## FeatureExtraction 4 | Notebooks demonstrating how to extract features from satellite images using 5 | satsense. 6 | 7 | ## Performance 8 | Notebooks demonstrating how to use the performance metrics with satsense and 9 | the utility functions to convert ground truth data to and from mask files and 10 | shapefiles. 11 | 12 | ## Classification 13 | 14 | Notebooks demonstrating classification using features calculated with Satsense. 15 | -------------------------------------------------------------------------------- /satsense/__init__.py: -------------------------------------------------------------------------------- 1 | """Satsense package.""" 2 | from ._version import __version__ 3 | from .bands import BANDS 4 | from .extract import extract_features 5 | from .image import FeatureVector, Image 6 | 7 | __all__ = [ 8 | '__version__', 9 | 'Image', 10 | 'BANDS', 11 | 'extract_features', 12 | 'FeatureVector', 13 | ] 14 | -------------------------------------------------------------------------------- /satsense/_version.py: -------------------------------------------------------------------------------- 1 | from pkg_resources import get_distribution, DistributionNotFound 2 | 3 | try: 4 | __version__ = get_distribution('satsense').version 5 | except DistributionNotFound: 6 | # package is not installed 7 | __version__ = 'unknown' 8 | -------------------------------------------------------------------------------- /satsense/bands.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mappings for satelite image bands 3 | 4 | 0-index based for python, when using gdal add 1 5 | 6 | Notes 7 | ===== 8 | Known satellites are 9 | * worldview2 - 7 bands 10 | * worldview3 - 7 bands 11 | * quickbird - 4 bands 12 | * pleiades - 5 bands 13 | * rgb - 3 bands 14 | * monochrome - 1 band 15 | 16 | Example 17 | ======= 18 | if you need direct access to the bands of the image 19 | you can find them using this package:: 20 | 21 | >>> from satsense.bands import BANDS 22 | >>> print(BANDS['worldview3']) 23 | {'coastal': 0, 'blue': 1, 'green': 2, 'yellow': 3, 24 | 'red': 4, 'red-edge': 5, 'nir-1': 6, 'nir-2': 7} 25 | """ 26 | BANDS = { 27 | 'worldview2': { 28 | 'coastal': 0, 29 | 'blue': 1, 30 | 'green': 2, 31 | 'yellow': 3, 32 | 'red': 4, 33 | 'red-edge': 5, 34 | 'nir-1': 6, 35 | 'nir-2': 7, 36 | }, 37 | 'worldview3': { 38 | 'coastal': 0, 39 | 'blue': 1, 40 | 'green': 2, 41 | 'yellow': 3, 42 | 'red': 4, 43 | 'red-edge': 5, 44 | 'nir-1': 6, 45 | 'nir-2': 7, 46 | }, 47 | 'quickbird': { 48 | 'blue': 0, 49 | 'green': 1, 50 | 'red': 2, 51 | 'nir-1': 3, 52 | }, 53 | 'rgb': { 54 | 'red': 0, 55 | 'green': 1, 56 | 'blue': 2, 57 | }, 58 | 'monochrome': { 59 | 'pan': 0, 60 | }, 61 | 'pleiades': { 62 | 'pan': 0, 63 | 'blue': 1, 64 | 'green': 2, 65 | 'red': 3, 66 | 'nir-1': 4, 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /satsense/extract.py: -------------------------------------------------------------------------------- 1 | """Module for computing features.""" 2 | import logging 3 | from concurrent.futures import ProcessPoolExecutor 4 | from functools import partial 5 | from itertools import groupby 6 | from os import cpu_count 7 | from typing import Iterator 8 | 9 | import numpy as np 10 | 11 | from .features import Feature 12 | from .generators import FullGenerator 13 | from .image import FeatureVector 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | def extract_features(features: Iterator[Feature], 19 | generator: FullGenerator, 20 | n_jobs: int = -1): 21 | """Compute features. 22 | 23 | Parameters 24 | ---------- 25 | features: 26 | Iterable of features. 27 | generator: 28 | Generator providing the required windows on the image. 29 | n_jobs: 30 | The maximum number of processes to use. The default is to use the 31 | value returned by :func:`os.cpu_count`. 32 | 33 | Yields 34 | ------ 35 | :obj:`satsense.FeatureVector` 36 | The requested feature vectors. 37 | 38 | Examples 39 | -------- 40 | Extracting features from an image:: 41 | 42 | import numpy as np 43 | from satsense import Image 44 | from satsense.generators import FullGenerator 45 | from satsense.extract import extract_features 46 | from satsense.features import NirNDVI, HistogramOfGradients, Pantex 47 | 48 | # Define the features to calculate 49 | features = [ 50 | HistogramOfGradients(((50, 50), (100, 100))), 51 | NirNDVI(((50, 50),)), 52 | Pantex(((50, 50), (100, 100))), 53 | ] 54 | 55 | # Load the image into a generator 56 | # This generator splits the image into chunks of 10x10 pixels 57 | image = Image('test/data/source/section_2_sentinel.tif', 'quickbird') 58 | image.precompute_normalization() 59 | generator = FullGenerator(image, (10, 10)) 60 | 61 | # Calculate all the features and append them to a list 62 | vector = [] 63 | for feature_vector in extract_features(features, generator): 64 | # The shape returned is (x, y, w, v) 65 | # where x is the number of chunks in the x direction 66 | # y is the number of chunks in the y direction 67 | # w is the number of windows the feature uses 68 | # v is the length of the feature per window 69 | # Reshape the resulting vector so it is (x, y, w * v) 70 | # e.g. flattened along the windows and features 71 | data = feature_vector.vector.reshape( 72 | *feature_vector.vector.shape[0:2], -1) 73 | vector.append(data) 74 | # dstack reshapes the vector into and (x, y, n) 75 | # where n is the total length of all features 76 | featureset = np.dstack(vector) 77 | """ 78 | if n_jobs == 1: 79 | yield from _extract_features(features, generator) 80 | else: 81 | yield from _extract_features_parallel(features, generator, n_jobs) 82 | 83 | 84 | def _extract_features_parallel(features, generator, n_jobs=-1): 85 | """Extract features in parallel.""" 86 | if n_jobs < 1: 87 | n_jobs = cpu_count() 88 | logger.info("Extracting features using at most %s processes", n_jobs) 89 | generator.image.precompute_normalization() 90 | 91 | # Split generator in chunks 92 | generators = tuple(generator.split(n_chunks=n_jobs)) 93 | 94 | with ProcessPoolExecutor(max_workers=n_jobs) as executor: 95 | for feature in features: 96 | extract = partial(extract_feature, feature) 97 | vector = np.ma.vstack(tuple(executor.map(extract, generators))) 98 | yield FeatureVector(feature, vector, generator.crs, 99 | generator.transform) 100 | 101 | 102 | def _extract_features(features, generator): 103 | """Compute features.""" 104 | generator.image.precompute_normalization() 105 | 106 | for itype, group in groupby(features, lambda f: f.base_image): 107 | group = list(group) 108 | logger.info("Loading base image %s", itype) 109 | window_shapes = { 110 | shape 111 | for feature in group for shape in feature.windows 112 | } 113 | generator.load_image(itype, window_shapes) 114 | for feature in group: 115 | vector = extract_feature(feature, generator) 116 | yield FeatureVector(feature, vector, generator.crs, 117 | generator.transform) 118 | 119 | 120 | def extract_feature(feature, generator): 121 | """Compute a single feature vector. 122 | 123 | Parameters 124 | ---------- 125 | feature : Feature 126 | The feature to calculate 127 | generator: 128 | Generator providing the required windows on the image. 129 | """ 130 | logger.info("Computing feature %s with windows %s and arguments %s", 131 | feature.__class__.__name__, feature.windows, feature.kwargs) 132 | if not generator.loaded_itype == feature.base_image: 133 | logger.info("Loading base image %s", feature.base_image) 134 | generator.load_image(feature.base_image, feature.windows) 135 | 136 | shape = generator.shape + (len(feature.windows), feature.size) 137 | vector = np.ma.zeros((np.prod(shape[:-1]), feature.size), dtype=np.float32) 138 | vector.mask = np.zeros_like(vector, dtype=bool) 139 | 140 | size = vector.shape[0] 141 | i = 0 142 | for window in generator: 143 | if window.shape[:2] not in feature.windows: 144 | continue 145 | if i % (size // 10 or 1) == 0: 146 | logger.info("%s%% ready", 100 * i // size) 147 | if window.mask.any(): 148 | vector.mask[i] = True 149 | else: 150 | vector[i] = feature(window) 151 | i += 1 152 | 153 | vector.shape = shape 154 | return vector 155 | -------------------------------------------------------------------------------- /satsense/features/__init__.py: -------------------------------------------------------------------------------- 1 | from .feature import Feature, FeatureSet 2 | from .hog import HistogramOfGradients 3 | from .lacunarity import Lacunarity 4 | from .ndxi import NDSI, NDWI, NDXI, WVSI, NirNDVI, RbNDVI, RgNDVI 5 | from .pantex import Pantex 6 | from .sift import Sift 7 | from .texton import Texton 8 | 9 | # Change the module for base classes so sphinx can find them. 10 | Feature.__module__ = __name__ 11 | NDXI.__module__ = __name__ 12 | 13 | __all__ = [ 14 | 'Feature', 15 | 'FeatureSet', 16 | 'HistogramOfGradients', 17 | 'Pantex', 18 | 'NDXI', 19 | 'NirNDVI', 20 | 'RgNDVI', 21 | 'RbNDVI', 22 | 'NDSI', 23 | 'NDWI', 24 | 'WVSI', 25 | 'Lacunarity', 26 | 'Sift', 27 | 'Texton', 28 | ] 29 | -------------------------------------------------------------------------------- /satsense/features/feature.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Feature(ABC): 5 | """ 6 | Feature superclass. 7 | 8 | Parameters 9 | ---------- 10 | window_shapes : list[tuple] 11 | List of tuples of window shapes to calculate the feature on 12 | **kwargs : dict 13 | Keyword arguments for the feature 14 | 15 | Attributes 16 | ---------- 17 | base_image 18 | """ 19 | 20 | base_image = None 21 | """ 22 | The base image this feature is calculated on 23 | ``Must be set by implementing classes`` 24 | """ 25 | 26 | size = None 27 | """ 28 | The size of the feature in array shape 29 | ``Must be set by implementing classes`` 30 | """ 31 | 32 | def __init__(self, window_shapes, **kwargs): 33 | self._indices = {} 34 | self._length = 0 35 | self._windows = tuple(sorted(window_shapes, reverse=True)) 36 | self.kwargs = kwargs 37 | self.name = self.__class__.__name__ 38 | 39 | def __call__(self, window): 40 | return self.compute(window, **self.kwargs) 41 | 42 | @staticmethod 43 | @abstractmethod 44 | def compute(window, **kwargs): 45 | """ 46 | Compute the feature on the window 47 | This function needs to be set by the implementation subclass 48 | ``compute = staticmethod(my_feature_calculation)`` 49 | Parameters 50 | ---------- 51 | window : tuple[int] 52 | The shape of the window 53 | **kwargs: dict 54 | The keyword arguments for the compustation 55 | """ 56 | pass 57 | 58 | @property 59 | def windows(self): 60 | """ 61 | Returns the windows this feature uses for calculation 62 | Returns 63 | ------- 64 | tuple[tuple[int]] 65 | """ 66 | return self._windows 67 | 68 | @windows.setter 69 | def windows(self, value): 70 | self._windows = tuple(sorted(value, reverse=True)) 71 | 72 | @property 73 | def indices(self): 74 | """ 75 | The indices for this feature in a feature set 76 | See Also 77 | -------- 78 | FeatureSet 79 | """ 80 | return self._indices 81 | 82 | @indices.setter 83 | def indices(self, value): 84 | self._indices = value 85 | 86 | 87 | class FeatureSet(): 88 | """ 89 | FeatureSet Class 90 | 91 | The FeatureSet class can be used to bundle a number of features together. 92 | this class then calculates the indices for each feature within a vector 93 | of all features stacked into a single 3 dimensional matrix. 94 | """ 95 | 96 | def __init__(self): 97 | self._features = {} 98 | self._cur_index = 0 99 | 100 | def __iter__(self): 101 | return iter(self._features) 102 | 103 | @property 104 | def items(self): 105 | return self._features.items() 106 | 107 | def add(self, feature, name=None): 108 | """ 109 | Parameters 110 | ---------- 111 | feature : Feature 112 | The feature to add to the set 113 | name : str 114 | The name to give the feature in the set. 115 | If none the features class name and length is used 116 | 117 | Returns: 118 | name : str 119 | The name of the added feature 120 | feature : Feature 121 | The added feature 122 | """ 123 | if not name: 124 | name = "{0}-{1}".format(feature.__class__.__name__, 125 | len(self._features) + 1) 126 | feature.name = name 127 | self._features[name] = (feature) 128 | self._recalculate_feature_indices() 129 | 130 | return name, feature 131 | 132 | def remove(self, name): 133 | """ 134 | Remove the feature from the set 135 | Parameters 136 | ---------- 137 | name : str 138 | The name of the feature to remove 139 | 140 | Returns 141 | ------- 142 | bool 143 | Wether the feature was succesfully removed 144 | """ 145 | if name in self._features: 146 | del self._features[name] 147 | self._recalculate_feature_indices() 148 | return True 149 | return False 150 | 151 | @property 152 | def index_size(self): 153 | """ 154 | The size of the index 155 | """ 156 | return self._cur_index 157 | 158 | def _recalculate_feature_indices(self): 159 | self._cur_index = 0 160 | for feature in self._features.values(): 161 | size = feature.size * len(feature.windows) 162 | feature.indices = slice(self._cur_index, self._cur_index + size) 163 | self._cur_index += size 164 | 165 | @property 166 | def base_images(self): 167 | """ 168 | list[str] 169 | List of base images that was used to calculate these features 170 | """ 171 | return {f.base_image for f in self._features.values()} 172 | -------------------------------------------------------------------------------- /satsense/features/hog.py: -------------------------------------------------------------------------------- 1 | """Histogram of Gradients feature.""" 2 | import cv2 3 | import numpy as np 4 | import scipy.stats 5 | 6 | from .feature import Feature 7 | 8 | 9 | def heaved_central_shift_moment(histogram, order): 10 | """Calculate the heaved central shift moment. 11 | 12 | Implementation is based on: 13 | Kumar, S., & Hebert, M. (2003, June). Man-made structure detection in 14 | natural images using a causal multiscale random field. In Computer vision 15 | and pattern recognition, 2003. proceedings. 2003 ieee computer society 16 | conference on (Vol. 1, pp. I-I). IEEE. 17 | 18 | Parameters 19 | ---------- 20 | histogram : numpy.ndarray 21 | The histogram to calculate the moments over. 22 | order : int 23 | The order of the moment to calculate, a number between [0, inf). 24 | 25 | Returns 26 | ------- 27 | float 28 | The heaved central shift moment. 29 | 30 | """ 31 | if len(histogram.shape) > 1: 32 | raise ValueError("Can only calculate moments on a 1d array histogram, " 33 | "but shape is: {}".format(histogram.shape)) 34 | 35 | if order < 0: 36 | raise ValueError("Order cannot be below 0") 37 | 38 | mean = np.mean(histogram) 39 | 40 | # Moment 0 is just the mean 41 | if order == 0: 42 | return mean 43 | 44 | # In the paper they say: sum over all bins 45 | # The difference of the bin with the mean of the histogram (v0) 46 | # and multiply with a step function which is 1 when the difference is > 0 47 | diff = histogram - mean 48 | # The step function is thus a selection method, which is more easily 49 | # written like this. 50 | positive_diff = diff[diff > 0] 51 | 52 | power = order + 1 53 | numerator = np.sum(np.power(positive_diff, power)) 54 | denominator = np.sum(positive_diff) 55 | 56 | if denominator == 0: 57 | moment = 0 58 | else: 59 | moment = numerator / denominator 60 | 61 | return moment 62 | 63 | 64 | # @inproceedings('kumar2003man', { 65 | # 'title': ('Man-made structure detection in natural images using a ' 66 | # 'causal multiscale random field'), 67 | # 'author': 'Kumar, Sanjiv and Hebert, Martial', 68 | # 'booktitle': ('Computer vision and pattern recognition, 2003. ' 69 | # 'proceedings. 2003 ieee computer society conference on'), 70 | # 'volume': '1', 71 | # 'pages': 'I--I', 72 | # 'year': '2003', 73 | # 'organization': 'IEEE' 74 | # }) 75 | def smoothe_histogram(histogram, kernel, bandwidth): 76 | """Vectorized histogram smoothing implementation. 77 | 78 | Implementation is based on: 79 | Kumar, S., & Hebert, M. (2003, June). Man-made structure detection in 80 | natural images using a causal multiscale random field. In Computer vision 81 | and pattern recognition, 2003. proceedings. 2003 ieee computer society 82 | conference on (Vol. 1, pp. I-I). IEEE. 83 | 84 | Parameters 85 | ---------- 86 | histogram : numpy.ndarray 87 | The histogram to smoothe. 88 | kernel : function or callable object 89 | The kernel to use for the smoothing. 90 | For instance :obj:`scipy.stats.norm().pdf`. 91 | bandwidth : int 92 | The bandwidth of the smoothing. 93 | 94 | Returns 95 | ------- 96 | numpy.ndarray 97 | The smoothed histogram. 98 | """ 99 | if len(histogram.shape) > 1: 100 | raise ValueError("Can only smooth a 1d array histogram") 101 | 102 | bins = histogram.shape[0] 103 | 104 | # Make a bins x bins matrix with the inter-bin distances 105 | # Equivalent to: 106 | # for i in bins: 107 | # for j in bins 108 | # matrix[i, j] = (i - j) / bandwidth 109 | matrix = np.array([i - np.arange(bins) 110 | for i in np.arange(bins)]) / bandwidth 111 | smoothing_matrix = kernel(matrix) 112 | 113 | smoothing_factor_totals = np.sum(smoothing_matrix, axis=1) 114 | pre_smooth_histogram = np.sum(smoothing_matrix * histogram, axis=1) 115 | 116 | smoothed_histogram = pre_smooth_histogram / smoothing_factor_totals 117 | 118 | return smoothed_histogram 119 | 120 | 121 | # My own binning algorithm. It does not do edge case detection because we're 122 | # using the full 360 degrees the edge between 360 and 0 is there. I'm assuming 123 | # no values over 360 exist. 124 | def orientation_histogram(angles, magnitudes, number_of_orientations): 125 | """Create a histogram of orientations. 126 | 127 | Bins are created in the full 360 degrees.abs 128 | 129 | Parameters 130 | ---------- 131 | angles : numpy.ndarray 132 | Angles of the orientations in degrees. 133 | magnitudes : numpy.ndarray 134 | Magnitude of the orientations. 135 | number_of_orientations: int 136 | The number of bins to use. 137 | 138 | Returns 139 | ------- 140 | histogram: numpy.ndarray 141 | The histogram of orientations of shape number_of_orientations. 142 | bin_centers: numpy.ndarray 143 | The centers of the created bins with angles in degrees. 144 | 145 | """ 146 | if len(angles.shape) > 2: 147 | raise ValueError("Only 2d windows are supported") 148 | 149 | if angles.shape != magnitudes.shape: 150 | raise ValueError( 151 | "Angle and magnitude arrays do not match shape: {} vs. {}".format( 152 | angles.shape, magnitudes.shape)) 153 | 154 | number_of_orientations_per_360 = 360. / number_of_orientations 155 | 156 | histogram = np.zeros(number_of_orientations) 157 | bin_centers = np.zeros(number_of_orientations) 158 | 159 | for i in range(number_of_orientations): 160 | orientation_end = number_of_orientations_per_360 * (i + 1) 161 | orientation_start = number_of_orientations_per_360 * i 162 | bin_centers[i] = (orientation_end + orientation_start) / 2. 163 | select = (orientation_start <= angles) & (angles < orientation_end) 164 | histogram[i] = np.sum(magnitudes[select]) 165 | 166 | return histogram, bin_centers 167 | 168 | 169 | def hog_features(window, bins=50, kernel=None, bandwidth=0.7): 170 | """Calculate the hog features on the window. 171 | 172 | Features are the 1st and 2nd order heaved central shift moments, 173 | the angle of the two highest peaks in the histogram, 174 | the absolute sine difference between the two highest peaks. 175 | 176 | Parameters 177 | ---------- 178 | window : numpy.ndarray 179 | The window to calculate the features on (grayscale). 180 | bands : dict 181 | A discription of the bands used in the window. 182 | bins : int 183 | The number of bins to use. 184 | kernel : :obj:`typing.Callable` 185 | The function to use for smoothing. The default is 186 | :obj:`scipy.stats.norm().pdf`. 187 | bandwidth: float 188 | The bandwidth for the smoothing. 189 | 190 | Returns 191 | ------- 192 | :obj:`numpy.ndarray` 193 | The 5 HoG feature values. 194 | 195 | """ 196 | if kernel is None: 197 | kernel = scipy.stats.norm().pdf 198 | 199 | mag, angle = cv2.cartToPolar( 200 | cv2.Sobel(window, cv2.CV_64F, 1, 0, ksize=3), 201 | cv2.Sobel(window, cv2.CV_64F, 0, 1, ksize=3), 202 | angleInDegrees=True, 203 | ) 204 | 205 | histogram, bin_centers = orientation_histogram(angle, mag, bins) 206 | 207 | histogram = smoothe_histogram(histogram, kernel, bandwidth) 208 | 209 | # Calculate the Heaved Central-shift Moments 210 | # The first and second order are used as features 211 | moment1 = heaved_central_shift_moment(histogram, 1) 212 | moment2 = heaved_central_shift_moment(histogram, 2) 213 | 214 | # Find the two highest peaks. This is used for the following three features 215 | peaks = np.argsort(histogram)[::-1][0:2] 216 | 217 | # Feature 3 and 4: The absolute 'location' of the highest peak. 218 | # We can only interpret this as either the bin number, or the orientation 219 | # at the center of the bin. 220 | # That still doesn't give us 2 values, so we decided to take the center 221 | # orientations of the bins of the two highest peaks in degrees. 222 | 223 | delta1 = bin_centers[peaks[0]] 224 | delta2 = bin_centers[peaks[1]] 225 | 226 | # Feature 5: The absolute sine difference between the two highest peaks 227 | # Will be 1 when the two peaks are 90 degrees from eachother 228 | centers = np.deg2rad(bin_centers) 229 | beta = np.abs(np.sin(centers[peaks[0]] - centers[peaks[1]])) 230 | 231 | return np.array([moment1, moment2, delta1, delta2, beta]) 232 | 233 | 234 | class HistogramOfGradients(Feature): 235 | """ 236 | Histogram of Oriented Gradient Feature Calculator 237 | 238 | The compute method calculates the feature on a particular 239 | window this returns the 1st and 2nd heaved central shift moments, the 240 | orientation of the first and second highest peaks and the absolute sine 241 | difference between the orientations of the highest peaks 242 | 243 | Parameters 244 | ---------- 245 | window_shapes: list[tuple] 246 | The window shapes to calculate the feature on. 247 | bins : int 248 | The number of bins to use. The default is 50 249 | kernel : :obj:`typing.Callable` 250 | The function to use for smoothing. The default is 251 | :obj:`scipy.stats.norm().pdf`. 252 | bandwidth: float 253 | The bandwidth for the smoothing. The default is 0.7 254 | 255 | 256 | Attributes 257 | ---------- 258 | size: int 259 | The size of the feature vector returned by this feature 260 | base_image: str 261 | The name of the base image used to calculate the feature 262 | 263 | 264 | Example 265 | ------- 266 | Calculating the HistogramOfGradients on an image using a generator:: 267 | 268 | from satsense import Image 269 | from satsense.generators import FullGenerator 270 | from satsense.extract import extract_feature 271 | from satsense.features import HistogramOfGradients 272 | 273 | windows = ((50, 50), ) 274 | hog = HistogramOfGradients(windows) 275 | 276 | image = Image('test/data/source/section_2_sentinel.tif', 277 | 'quickbird') 278 | image.precompute_normalization() 279 | generator = FullGenerator(image, (10, 10)) 280 | 281 | feature_vector = extract_feature(hog, generator) 282 | """ 283 | base_image = 'grayscale' 284 | size = 5 285 | compute = staticmethod(hog_features) 286 | -------------------------------------------------------------------------------- /satsense/features/lacunarity.py: -------------------------------------------------------------------------------- 1 | """Lacunarity feature implementation.""" 2 | import logging 3 | 4 | import numpy as np 5 | import scipy 6 | from skimage.feature import canny 7 | from skimage.filters.rank import equalize 8 | from skimage.morphology import disk 9 | 10 | from . import Feature 11 | from ..image import Image 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def get_canny_edge_image(image: Image, radius=30, sigma=0.5): 17 | """Compute Canny edge image.""" 18 | logger.debug("Computing Canny edge image") 19 | # local histogram equalization 20 | gray_ubyte = image['gray_ubyte'] 21 | mask = gray_ubyte.mask 22 | inverse_mask = ~mask 23 | result = equalize(gray_ubyte.data, selem=disk(radius), mask=inverse_mask) 24 | try: 25 | result = canny(result, sigma=sigma, mask=inverse_mask) 26 | except TypeError: 27 | logger.warning("Canny type error") 28 | result[:] = 0 29 | logger.debug("Done computing Canny edge image") 30 | return np.ma.array(result, mask=mask) 31 | 32 | 33 | Image.register('canny_edge', get_canny_edge_image) 34 | 35 | 36 | def lacunarity(edged_image, box_size): 37 | """ 38 | Calculate the lacunarity value over an image. 39 | 40 | The calculation is performed following these papers: 41 | 42 | Kit, Oleksandr, and Matthias Luedeke. "Automated detection of slum area 43 | change in Hyderabad, India using multitemporal satellite imagery." 44 | ISPRS journal of photogrammetry and remote sensing 83 (2013): 130-137. 45 | 46 | Kit, Oleksandr, Matthias Luedeke, and Diana Reckien. "Texture-based 47 | identification of urban slums in Hyderabad, India using remote sensing 48 | data." Applied Geography 32.2 (2012): 660-667. 49 | """ 50 | kernel = np.ones((box_size, box_size)) 51 | accumulator = scipy.signal.convolve2d(edged_image, kernel, mode='valid') 52 | mean_sqrd = np.mean(accumulator)**2 53 | if mean_sqrd == 0: 54 | return 0.0 55 | 56 | return np.var(accumulator) / mean_sqrd + 1 57 | 58 | 59 | def lacunarities(canny_edge_image, box_sizes): 60 | """Calculate the lacunarities for all box_sizes.""" 61 | result = [lacunarity(canny_edge_image, box_size) for box_size in box_sizes] 62 | return result 63 | 64 | 65 | class Lacunarity(Feature): 66 | """ 67 | Calculate the lacunarity value over an image. 68 | 69 | Lacunarity is a measure of 'gappiness' of the image. 70 | The calculation is performed following these papers: 71 | 72 | Kit, Oleksandr, and Matthias Luedeke. "Automated detection of slum area 73 | change in Hyderabad, India using multitemporal satellite imagery." 74 | ISPRS journal of photogrammetry and remote sensing 83 (2013): 130-137. 75 | 76 | Kit, Oleksandr, Matthias Luedeke, and Diana Reckien. "Texture-based 77 | identification of urban slums in Hyderabad, India using remote sensing 78 | data." Applied Geography 32.2 (2012): 660-667. 79 | """ 80 | 81 | base_image = 'canny_edge' 82 | compute = staticmethod(lacunarities) 83 | 84 | def __init__(self, windows=((25, 25), ), box_sizes=(10, 20, 30)): 85 | # Check input 86 | for window in windows: 87 | for box_size in box_sizes: 88 | if window[0] <= box_size or window[1] <= box_size: 89 | raise ValueError( 90 | "box_size {} must be smaller than window {}".format( 91 | box_size, window)) 92 | super().__init__(windows, box_sizes=box_sizes) 93 | self.size = len(box_sizes) 94 | -------------------------------------------------------------------------------- /satsense/features/ndxi.py: -------------------------------------------------------------------------------- 1 | """Implementation of the NDXI family of features.""" 2 | from functools import partial 3 | 4 | import numpy as np 5 | 6 | from ..image import Image 7 | from .feature import Feature 8 | 9 | NDXI_TYPES = { 10 | 'nir_ndvi': ('nir-1', 'red'), 11 | 'rg_ndvi': ('red', 'green'), 12 | 'rb_ndvi': ('red', 'blue'), 13 | 'ndsi': ('nir-1', 'green'), 14 | 'ndwi': ('coastal', 'nir-2'), 15 | 'wvsi': ('green', 'yellow'), 16 | } 17 | 18 | 19 | def ndxi_image(image: Image, ndxi_type): 20 | """Calculates the feature according to the ndxi option provided.""" 21 | band_0_name, band_1_name = NDXI_TYPES[ndxi_type] 22 | band_0 = image[band_0_name] 23 | band_1 = image[band_1_name] 24 | 25 | band_mix = band_0 + band_1 26 | # Ignore divide, this division may complain about division by 0 27 | # This usually happens in the edge, which is alright by us. 28 | old_settings = np.seterr(divide='ignore', invalid='ignore') 29 | ndxi = np.divide(band_0 - band_1, band_mix) 30 | ndxi[band_mix == 0] = 0 31 | np.seterr(**old_settings) 32 | 33 | return ndxi 34 | 35 | 36 | for itype in NDXI_TYPES: 37 | Image.register(itype, partial(ndxi_image, ndxi_type=itype)) 38 | 39 | 40 | def print_ndxi_statistics(ndxi, option): 41 | """Prints the ndvi matrix and the, min, max, mean and median.""" 42 | print('{o} matrix: '.format(o=option)) 43 | print(ndxi) 44 | 45 | print('\nMax {o}: {m}'.format(o=option, m=np.nanmax(ndxi))) 46 | print('Mean {o}: {m}'.format(o=option, m=np.nanmean(ndxi))) 47 | print('Median {o}: {m}'.format(o=option, m=np.nanmedian(ndxi))) 48 | print('Min {o}: {m}'.format(o=option, m=np.nanmin(ndxi))) 49 | 50 | 51 | class NDXI(Feature): 52 | """ 53 | The parent class of the family of NDXI features. 54 | 55 | Parameters 56 | ---------- 57 | window_shapes: list 58 | The window shapes to calculate the feature on. 59 | 60 | """ 61 | size = 1 62 | compute = staticmethod(np.mean) 63 | 64 | 65 | class NirNDVI(NDXI): 66 | """ 67 | The infrared-green normalized difference vegetation index. 68 | 69 | For more information see [2]_. 70 | 71 | Parameters 72 | ---------- 73 | window_shapes: list 74 | The window shapes to calculate the feature on. 75 | 76 | Notes 77 | ----- 78 | .. [2] https://en.wikipedia.org/wiki/Normalized_difference_vegetation_index 79 | """ 80 | base_image = 'nir_ndvi' 81 | 82 | 83 | class RgNDVI(NDXI): 84 | """ 85 | The red-green normalized difference vegetation index. 86 | 87 | For more information see [3]_. 88 | 89 | Parameters 90 | ---------- 91 | window_shapes: list 92 | The window shapes to calculate the feature on. 93 | 94 | Notes 95 | ----- 96 | .. [3] Motohka, T., Nasahara, K.N., Oguma, H. and Tsuchida, S., 2010. 97 | "Applicability of green-red vegetation index for remote sensing of 98 | vegetation phenology". Remote Sensing, 2(10), pp. 2369-2387. 99 | """ 100 | base_image = 'rg_ndvi' 101 | 102 | 103 | class RbNDVI(NDXI): 104 | """ 105 | The red-blue normalized difference vegetation index. 106 | 107 | For more information see [4]_. 108 | 109 | Parameters 110 | ---------- 111 | window_shapes: list 112 | The window shapes to calculate the feature on. 113 | 114 | Notes 115 | ----- 116 | .. [4] Tanaka, S., Goto, S., Maki, M., Akiyama, T., Muramoto, Y. and 117 | Yoshida, K., 2007. "Estimation of leaf chlorophyll concentration in 118 | winter wheat [Triticum aestivum] before maturing stage by a newly 119 | developed vegetation index-RBNDVI". Journal of the Japanese 120 | Agricultural Systems Society (Japan). 121 | """ 122 | base_image = 'rb_ndvi' 123 | 124 | 125 | class NDSI(NDXI): 126 | """ 127 | The snow cover index. 128 | 129 | Parameters 130 | ---------- 131 | window_shapes: list 132 | The window shapes to calculate the feature on. 133 | """ 134 | base_image = 'ndsi' 135 | 136 | 137 | class NDWI(NDXI): 138 | """ 139 | The water cover index. 140 | 141 | Parameters 142 | ---------- 143 | window_shapes: list 144 | The window shapes to calculate the feature on. 145 | """ 146 | base_image = 'ndwi' 147 | 148 | 149 | class WVSI(NDXI): 150 | """ 151 | The soil cover index. 152 | 153 | Parameters 154 | ---------- 155 | window_shapes: list 156 | The window shapes to calculate the feature on. 157 | """ 158 | base_image = 'wvsi' 159 | -------------------------------------------------------------------------------- /satsense/features/pantex.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sp 3 | from skimage.feature import greycomatrix, greycoprops 4 | 5 | from .feature import Feature 6 | 7 | 8 | def get_rii_dist_angles(): 9 | """ 10 | Get the angles and distances of the pixels used in the paper: 11 | 12 | Graesser, Jordan, et al. "Image based characterization of formal and 13 | informal neighborhoods in an urban landscape." IEEE Journal of Selected 14 | Topics in Applied Earth Observations and Remote Sensing 5.4 (2012): 15 | 1164-1176. 16 | """ 17 | # Result caching 18 | if not hasattr(get_rii_dist_angles, "offsets"): 19 | pixels_dist_14 = np.array([[1, -1], [1, 1]]) 20 | pixels_dist_1 = np.array([[0, 1], [1, 0]]) 21 | pixels_dist_2 = np.array([[2, 0], [0, 2]]) 22 | pixels_dist_223 = np.array([[1, -2], [2, -1], [2, 1], [1, 2]]) 23 | pixels_dist_3 = np.array([[2, 2], [2, -2]]) 24 | 25 | (distances_1, angles_1) = _get_rii_dist_angle(pixels_dist_1) 26 | (distances_14, angles_14) = _get_rii_dist_angle(pixels_dist_14) 27 | (distances_2, angles_2) = _get_rii_dist_angle(pixels_dist_2) 28 | (distances_223, angles_223) = _get_rii_dist_angle(pixels_dist_223) 29 | (distances_3, angles_3) = _get_rii_dist_angle(pixels_dist_3) 30 | 31 | offsets_1 = np.stack((distances_1, angles_1), axis=1) 32 | offsets_14 = np.stack((distances_14, angles_14), axis=1) 33 | offsets_2 = np.stack((distances_2, angles_2), axis=1) 34 | offsets_223 = np.stack((distances_223, angles_223), axis=1) 35 | offsets_3 = np.stack((distances_3, angles_3), axis=1) 36 | offsets = np.concatenate((offsets_1, offsets_14, offsets_2, 37 | offsets_223, offsets_3)) 38 | 39 | # Cache the results, this function is called often! 40 | get_rii_dist_angles.offsets = offsets 41 | 42 | # print(offsets.shape) 43 | # for i in range(len(offsets)): 44 | # print("Distance: {}, angle: {}".format(offsets[i][0], offsets[i][1])) 45 | 46 | return get_rii_dist_angles.offsets 47 | 48 | 49 | def _get_rii_dist_angle(pixels_dist): 50 | """Return angles and distances of the pixels.""" 51 | angle = np.arctan2(pixels_dist[:, 0], pixels_dist[:, 1]) 52 | distance = [ 53 | sp.spatial.distance.euclidean([0, 0], 54 | [pixels_dist[i, 0], pixels_dist[i, 1]]) 55 | for i in range(len(pixels_dist[:, 0])) 56 | ] 57 | return (distance, angle) 58 | 59 | 60 | def pantex(window, maximum=255): 61 | """Calculate the pantex feature on the given grayscale window. 62 | 63 | Parameters 64 | ---------- 65 | window: numpy.ndarray 66 | A window on an image. 67 | maximum: int 68 | The maximum value in the image. 69 | 70 | Returns 71 | ------- 72 | float 73 | Pantex feature value. 74 | 75 | """ 76 | offsets = get_rii_dist_angles() 77 | 78 | pan = np.zeros(len(offsets)) 79 | for i, offset in enumerate(offsets): 80 | glcm = greycomatrix( 81 | window, [offset[0]], [offset[1]], 82 | symmetric=True, 83 | normed=True, 84 | levels=maximum + 1) 85 | pan[i] = greycoprops(glcm, 'contrast') 86 | 87 | return pan.min() 88 | 89 | 90 | class Pantex(Feature): 91 | """ 92 | Pantext Feature Calculator 93 | 94 | The compute method calculates the feature on a particular 95 | window this returns the minimum of the grey level co-occurence 96 | matrix contrast property 97 | 98 | Parameters 99 | ---------- 100 | window_shapes: list 101 | The window shapes to calculate the feature on. 102 | maximum: int 103 | The maximum value in the image. 104 | 105 | 106 | Attributes 107 | ---------- 108 | size: int 109 | The size of the feature vector returned by this feature 110 | base_image: str 111 | The name of the base image used to calculate the feature 112 | 113 | 114 | Example 115 | ------- 116 | Calculating the Pantex on an image using a generator:: 117 | 118 | from satsense import Image 119 | from satsense.generators import FullGenerator 120 | from satsense.extract import extract_feature 121 | from satsense.features import Pantex 122 | 123 | windows = ((50, 50), ) 124 | pantex = Pantex(windows) 125 | 126 | image = Image('test/data/source/section_2_sentinel.tif', 127 | 'quickbird') 128 | image.precompute_normalization() 129 | generator = FullGenerator(image, (10, 10)) 130 | 131 | feature_vector = extract_feature(pantex, generator) 132 | """ 133 | base_image = 'gray_ubyte' 134 | size = 1 135 | compute = staticmethod(pantex) 136 | -------------------------------------------------------------------------------- /satsense/features/sift.py: -------------------------------------------------------------------------------- 1 | """Sift feature.""" 2 | from typing import Iterator 3 | 4 | import cv2 5 | import numpy as np 6 | from sklearn.cluster import MiniBatchKMeans 7 | 8 | from ..generators import FullGenerator 9 | from ..image import Image 10 | from .feature import Feature 11 | 12 | 13 | def sift_cluster(images: Iterator[Image], 14 | n_clusters=32, 15 | max_samples=100000, 16 | sample_window=(8192, 8192)) -> MiniBatchKMeans: 17 | """Create the clusters needed to compute the sift feature.""" 18 | nfeatures = int(max_samples / len(images)) 19 | descriptors = [] 20 | for image in images: 21 | image.precompute_normalization() 22 | 23 | chunk = np.minimum(image.shape, sample_window) 24 | 25 | generator = FullGenerator(image, chunk) 26 | generator.load_image('gray_ubyte', (chunk, )) 27 | 28 | max_features_per_window = int(nfeatures / np.prod(generator.shape)) 29 | sift_object = cv2.xfeatures2d.SIFT_create(max_features_per_window) 30 | 31 | for img in generator: 32 | inverse_mask = (~img.mask).astype(np.uint8) 33 | new_descr = sift_object.detectAndCompute(img, inverse_mask)[1] 34 | descriptors.append(new_descr) 35 | 36 | descriptors = np.vstack(descriptors) 37 | 38 | # Cluster the descriptors 39 | mbkmeans = MiniBatchKMeans( 40 | n_clusters=n_clusters, random_state=42).fit(descriptors) 41 | 42 | return mbkmeans 43 | 44 | 45 | SIFT = cv2.xfeatures2d.SIFT_create() 46 | """SIFT feature calculator used by the sift function.""" 47 | 48 | 49 | def sift(window_gray_ubyte, kmeans: MiniBatchKMeans, normalized=True): 50 | """ 51 | Calculate the Scale-Invariant Feature Transform feature 52 | 53 | The opencv SIFT features are first calculated on the window 54 | the codewords of these features are then extracted using the 55 | previously computed cluster centers. Finally a histogram of 56 | these codewords is returned 57 | 58 | Parameters 59 | ---------- 60 | window_gray_ubyte : ndarray 61 | The window to calculate the feature on 62 | kmeans : sklearn.cluster.MiniBatchKMeans 63 | The trained KMeans clustering from opencv, see `from_images` 64 | normalized : bool 65 | If True normalize the feature by the total number of clusters 66 | 67 | Returns 68 | ------- 69 | ndarray 70 | The histogram of sift feature codewords 71 | """ 72 | descriptors = SIFT.detectAndCompute(window_gray_ubyte, None)[1] 73 | 74 | # Is none if no descriptors are found, i.e. on 0 input range 75 | n_clusters = kmeans.n_clusters 76 | if descriptors is None: 77 | return np.zeros(n_clusters) 78 | 79 | codewords = kmeans.predict(descriptors) 80 | counts = np.bincount(codewords, minlength=n_clusters) 81 | 82 | # Perform normalization 83 | if normalized: 84 | counts = counts / n_clusters 85 | 86 | return counts 87 | 88 | 89 | class Sift(Feature): 90 | """ 91 | Scale-Invariant Feature Transform calculator 92 | 93 | First create a codebook of SIFT features from the suplied images using 94 | `from_images`. Then we can compute the histogram of codewords for a given 95 | window. 96 | 97 | See the opencv 98 | `SIFT intro 99 | `__ 100 | for more information 101 | 102 | Parameters 103 | ---------- 104 | window_shapes: list 105 | The window shapes to calculate the feature on. 106 | kmeans : sklearn.cluster.MiniBatchKMeans 107 | The trained KMeans clustering from opencv 108 | normalized : bool 109 | If True normalize the feature by the total number of clusters 110 | 111 | Example 112 | ------- 113 | Calculating the Sift feature on an image using a generator:: 114 | 115 | from satsense import Image 116 | from satsense.generators import FullGenerator 117 | from satsense.extract import extract_feature 118 | from satsense.features import Sift 119 | 120 | windows = ((50, 50), ) 121 | 122 | image = Image('test/data/source/section_2_sentinel.tif', 'quickbird') 123 | image.precompute_normalization() 124 | 125 | sift = Sift.from_images(windows, [image]) 126 | 127 | generator = FullGenerator(image, (10, 10)) 128 | 129 | feature_vector = extract_feature(sift, generator) 130 | print(feature_vector.shape) 131 | 132 | """ 133 | 134 | base_image = 'gray_ubyte' 135 | compute = staticmethod(sift) 136 | 137 | def __init__(self, windows, kmeans: MiniBatchKMeans, normalized=True): 138 | """Create sift feature.""" 139 | super().__init__(windows, kmeans=kmeans, normalized=normalized) 140 | self.size = kmeans.n_clusters 141 | 142 | @classmethod 143 | def from_images(cls, 144 | windows, 145 | images: Iterator[Image], 146 | n_clusters=32, 147 | max_samples=100000, 148 | sample_window=(8192, 8192), 149 | normalized=True): 150 | """ 151 | Create a codebook of SIFT features from the suplied images. 152 | 153 | Using the images `max_samples` SIFT features are extracted 154 | evenly from all images. These features are then clustered into 155 | `n_clusters` clusters. This codebook can then be used to 156 | calculate a histogram of this codebook. 157 | 158 | Parameters 159 | ---------- 160 | windows : list[tuple] 161 | The window shapes to calculate the feature on. 162 | images : Iterator[satsense.Image] 163 | Iterable for the images to calculate the codebook no 164 | n_cluster : int 165 | The number of clusters to create for the codebook 166 | max_samples : int 167 | The maximum number of samples to use for creating the codebook 168 | normalized : bool 169 | Wether or not to normalize the resulting feature with regards to 170 | the number of clusters 171 | """ 172 | kmeans = sift_cluster( 173 | images, n_clusters, max_samples, sample_window=sample_window) 174 | return cls(windows, kmeans, normalized) 175 | -------------------------------------------------------------------------------- /satsense/features/texton.py: -------------------------------------------------------------------------------- 1 | """Texton feature implementation.""" 2 | import logging 3 | from typing import Iterator 4 | 5 | import numpy as np 6 | from scipy.signal import convolve 7 | from skimage.filters import gabor_kernel, gaussian 8 | from sklearn.cluster import MiniBatchKMeans 9 | 10 | from ..generators import FullGenerator 11 | from ..image import Image 12 | from .feature import Feature 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def create_texton_kernels(): 18 | """Create filter bank kernels.""" 19 | kernels = [] 20 | angles = 8 21 | thetas = np.linspace(0, np.pi, angles) 22 | for theta in thetas: 23 | for sigma in (1, ): 24 | for frequency in (0.05, ): 25 | kernel = np.real( 26 | gabor_kernel( 27 | frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)) 28 | kernels.append(kernel) 29 | 30 | return kernels 31 | 32 | 33 | def get_texton_descriptors(image: Image): 34 | """Compute texton descriptors.""" 35 | logger.debug("Computing texton descriptors") 36 | kernels = create_texton_kernels() 37 | 38 | # Prepare input image 39 | array = image['grayscale'] 40 | mask = array.mask 41 | array = array.filled(fill_value=0) 42 | 43 | # Create result image 44 | shape = array.shape + (len(kernels) + 1, ) 45 | result = np.ma.empty(shape, dtype=array.dtype) 46 | result.mask = np.zeros(result.shape, dtype=bool) 47 | 48 | for k, kernel in enumerate(kernels): 49 | result[:, :, k] = convolve(array, kernel, mode='same') 50 | result.mask[:, :, k] = mask 51 | 52 | result[:, :, -1] = gaussian(array, sigma=1) - gaussian(array, sigma=3) 53 | result.mask[:, :, -1] = mask 54 | 55 | logger.debug("Done computing texton descriptors") 56 | return result 57 | 58 | 59 | Image.register('texton_descriptors', get_texton_descriptors) 60 | 61 | 62 | def texton_cluster(images: Iterator[Image], 63 | n_clusters=32, 64 | max_samples=100000, 65 | sample_window=(8192, 8192)) -> MiniBatchKMeans: 66 | """Compute texton clusters.""" 67 | nfeatures = int(max_samples / len(images)) 68 | descriptors = [] 69 | for image in images: 70 | image.precompute_normalization() 71 | 72 | chunk = np.minimum(image.shape, sample_window) 73 | 74 | generator = FullGenerator(image, chunk) 75 | generator.load_image('texton_descriptors', (chunk, )) 76 | 77 | max_features_per_window = int(nfeatures / np.prod(generator.shape)) 78 | 79 | rand_state = np.random.RandomState(seed=0) 80 | 81 | for array in generator: 82 | array = array.reshape(-1, array.shape[-1]) 83 | non_masked = ~array.mask.any(axis=-1) 84 | data = array.data[non_masked] 85 | if data.shape[0] > max_features_per_window: 86 | data = data[rand_state.choice( 87 | data.shape[0], max_features_per_window, replace=False)] 88 | descriptors.append(data) 89 | 90 | descriptors = np.vstack(descriptors) 91 | 92 | # Cluster the descriptors 93 | mbkmeans = MiniBatchKMeans( 94 | n_clusters=n_clusters, random_state=42).fit(descriptors) 95 | 96 | return mbkmeans 97 | 98 | 99 | def texton(descriptors, kmeans: MiniBatchKMeans, normalized=True): 100 | """Calculate the texton feature on the given window.""" 101 | n_clusters = kmeans.n_clusters 102 | 103 | shape = descriptors.shape 104 | descriptors = descriptors.reshape(shape[0] * shape[1], shape[2]) 105 | 106 | codewords = kmeans.predict(descriptors) 107 | counts = np.bincount(codewords, minlength=n_clusters) 108 | 109 | # Perform normalization 110 | if normalized: 111 | counts = counts / n_clusters 112 | 113 | return counts 114 | 115 | 116 | class Texton(Feature): 117 | """ 118 | Texton Feature Transform calculator 119 | 120 | First create a codebook of Texton features from the suplied images using 121 | `from_images`. Then we can compute the histogram of codewords for a given 122 | window. 123 | 124 | For more information see [1]_. 125 | 126 | Parameters 127 | ---------- 128 | window_shapes: list 129 | The window shapes to calculate the feature on. 130 | kmeans : sklearn.cluster.MiniBatchKMeans 131 | The trained KMeans clustering from opencv 132 | normalized : bool 133 | If True normalize the feature by the total number of clusters 134 | 135 | Example 136 | ------- 137 | Calculating the Texton feature on an image using a generator:: 138 | 139 | from satsense import Image 140 | from satsense.generators import FullGenerator 141 | from satsense.extract import extract_feature 142 | from satsense.features import Texton 143 | 144 | windows = ((50, 50), ) 145 | 146 | image = Image('test/data/source/section_2_sentinel.tif', 'quickbird') 147 | image.precompute_normalization() 148 | 149 | texton = Texton.from_images(windows, [image]) 150 | 151 | generator = FullGenerator(image, (10, 10)) 152 | 153 | feature_vector = extract_feature(texton, generator) 154 | print(feature_vector.shape) 155 | 156 | Notes 157 | ----- 158 | .. [1] Arbelaez, Pablo, et al., "Contour detection and hierarchical 159 | image segmentation," IEEE transactions on pattern analysis and 160 | machine intelligence (2011), vol. 33 no. 5, pp. 898-916. 161 | """ 162 | 163 | base_image = 'texton_descriptors' 164 | compute = staticmethod(texton) 165 | 166 | def __init__(self, windows, kmeans: MiniBatchKMeans, normalized=True): 167 | """Create Texton feature.""" 168 | super().__init__(windows, kmeans=kmeans, normalized=normalized) 169 | self.size = kmeans.n_clusters 170 | 171 | @classmethod 172 | def from_images(cls, 173 | windows, 174 | images: Iterator[Image], 175 | n_clusters=32, 176 | max_samples=100000, 177 | sample_window=(8192, 8192), 178 | normalized=True): 179 | """ 180 | Create a codebook of Texton features from the suplied images. 181 | 182 | Using the images `max_samples` Texton features are extracted 183 | evenly from all images. These features are then clustered into 184 | `n_clusters` clusters. This codebook can then be used to 185 | calculate a histogram of this codebook. 186 | 187 | Parameters 188 | ---------- 189 | windows : list[tuple] 190 | The window shapes to calculate the feature on. 191 | images : Iterator[satsense.Image] 192 | Iterable for the images to calculate the codebook no 193 | n_cluster : int 194 | The number of clusters to create for the codebook 195 | max_samples : int 196 | The maximum number of samples to use for creating the codebook 197 | normalized : bool 198 | Wether or not to normalize the resulting feature with regards to 199 | the number of clusters 200 | """ 201 | kmeans = texton_cluster( 202 | images, n_clusters, max_samples, sample_window=sample_window) 203 | return cls(windows, kmeans, normalized) 204 | -------------------------------------------------------------------------------- /satsense/generators.py: -------------------------------------------------------------------------------- 1 | """Module providing a generator to iterate over the image.""" 2 | import logging 3 | import math 4 | 5 | from .image import Image 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class BalancedGenerator(): 11 | """ 12 | Balanced window generator. 13 | 14 | Parameters 15 | ---------- 16 | image : Image 17 | Satellite image 18 | masks : 1-D array-like 19 | List of masks, one for each class, to use for generating patches 20 | A mask should have a positive value for the array positions that 21 | are included in the class 22 | p : 1-D array-like, optional 23 | The probabilities associated with each entry in masks. 24 | If not given the sample assumes a uniform distribution 25 | over all entries in a. 26 | samples : int, optional 27 | The maximum number of samples to generate, otherwise infinite 28 | 29 | Examples 30 | --------- 31 | Using BalancedGenerator 32 | 33 | >>> from satsense.generators import BalancedGenerator 34 | >>> BalancedGenerator(image, 35 | [class1_mask, class2_mask, class3_mask], 36 | [0.33, 0.33, 0.33]) 37 | """ 38 | 39 | def __init__(self, 40 | image: Image, 41 | masks, 42 | p=None, 43 | samples=None, 44 | offset=(0, 0), 45 | shape=None): 46 | raise NotImplementedError 47 | 48 | 49 | class FullGenerator(): 50 | """Window generator that covers the full image. 51 | 52 | Parameters 53 | ---------- 54 | image: Image 55 | Satellite image 56 | step_size: tuple(int, int) 57 | Size of the steps to use to iterate over the image (in pixels) 58 | offset: tuple(int, int) 59 | Offset from the (0, 0) point (in number of steps). 60 | shape: tuple(int, int) 61 | Shape of the generator (in number of steps) 62 | 63 | """ 64 | 65 | def __init__(self, 66 | image: Image, 67 | step_size: tuple, 68 | offset=(0, 0), 69 | shape=None): 70 | self.image = image 71 | 72 | self.step_size = step_size 73 | self.offset = offset 74 | 75 | if not shape: 76 | shape = tuple( 77 | math.ceil(image.shape[i] / step_size[i]) for i in range(2)) 78 | self.shape = shape 79 | 80 | self.crs = image.crs 81 | self.transform = image.scaled_transform(step_size) 82 | 83 | # set using load_image 84 | self.loaded_itype = None 85 | self._image_cache = None 86 | self._windows = None 87 | self._padding = None 88 | 89 | def load_image(self, itype, windows): 90 | """ 91 | Load image with sufficient additional data to cover windows. 92 | 93 | Parameters 94 | ---------- 95 | itype: str 96 | Image type 97 | windows: list[tuple] 98 | The list of tuples of window shapes that will be used 99 | with this generator 100 | """ 101 | self._windows = tuple(sorted(windows, reverse=True)) 102 | self._padding = tuple( 103 | max(math.ceil(0.5 * w[i]) for w in windows) for i in range(2)) 104 | 105 | block = self._get_blocks() 106 | image = self.image.copy_block(block) 107 | self._image_cache = image[itype] 108 | self.loaded_itype = itype 109 | 110 | def _get_blocks(self): 111 | """ 112 | Calculate the size of the subset needed to include enough 113 | data for the calculations of windows for this generator 114 | """ 115 | block = [] 116 | for i in range(2): 117 | offset = self.offset[i] * self.step_size[i] 118 | start = offset - self._padding[i] 119 | end = (offset + self._padding[i] + 120 | (self.shape[i] * self.step_size[i])) 121 | block.append((start, end)) 122 | 123 | return tuple(block) 124 | 125 | def _get_slices(self, index, window): 126 | """ 127 | Calculate the array slices needed to retrieve the window from the image 128 | at the provided index 129 | 130 | Parameters 131 | ---------- 132 | index: 1-D array-like 133 | The x and y coordinates for the slice in steps 134 | window: 1-D array-like 135 | The x and y size of the window 136 | 137 | Returns 138 | ------- 139 | tuple[tuple] 140 | The x-range and y-range slices for the index and 141 | window both with and without the padding included 142 | """ 143 | slices = [] 144 | 145 | for i in range(2): 146 | mid = self._padding[i] + math.floor( 147 | (index[i] + .5) * self.step_size[i]) 148 | start = mid - math.floor(.5 * window[i]) 149 | end = start + window[i] 150 | slices.append(slice(start, end)) 151 | 152 | return slices 153 | 154 | def __iter__(self): 155 | """ 156 | Iterate over the x and y coordinates of the generator and windows 157 | 158 | While iterating it will return for each x and y coordinate as defined 159 | by the step_size the part of the image as defined by the window. 160 | 161 | Consecutive calls will first return each window and then move to the 162 | next coordinates 163 | 164 | Returns 165 | ------- 166 | collections.Iterable[numpy.ndarray] 167 | """ 168 | if self._image_cache is None: 169 | raise RuntimeError("Please load an image first using load_image.") 170 | for i in range(self.shape[0]): 171 | for j in range(self.shape[1]): 172 | for window in self._windows: 173 | yield self[i, j, window] 174 | 175 | def __getitem__(self, index): 176 | """ 177 | Extract item from image. 178 | 179 | Parameters 180 | ---------- 181 | index: 1-D array-like 182 | An array wich specifies the x and y coordinates 183 | and the window shape to get from the generator 184 | 185 | Examples: 186 | --------- 187 | >>> generator[0, 0, (100, 100)] 188 | """ 189 | window = index[2] 190 | 191 | slices = self._get_slices(index, window) 192 | 193 | return self._image_cache[slices[0], slices[1]] 194 | 195 | def split(self, n_chunks): 196 | """ 197 | Split processing into chunks. 198 | 199 | Parameters 200 | ---------- 201 | n_chunks: int 202 | Number of chunks to split the image into 203 | """ 204 | chunk_size = math.ceil(self.shape[0] / n_chunks) 205 | for job in range(n_chunks): 206 | row_offset = self.offset[0] + job * chunk_size 207 | row_length = min(chunk_size, self.shape[0] - row_offset) 208 | if row_length <= 0: 209 | break 210 | yield FullGenerator( 211 | image=self.image, 212 | step_size=self.step_size, 213 | offset=(row_offset, self.offset[1]), 214 | shape=(row_length, self.shape[1])) 215 | -------------------------------------------------------------------------------- /satsense/performance/__init__.py: -------------------------------------------------------------------------------- 1 | """The initialization module for the performance measures package.""" 2 | from .jaccard_similarity import (jaccard_index_binary_masks, 3 | jaccard_index_multipolygons) 4 | 5 | __all__ = [ 6 | 'jaccard_index_binary_masks', 7 | 'jaccard_index_multipolygons', 8 | ] 9 | -------------------------------------------------------------------------------- /satsense/performance/jaccard_similarity.py: -------------------------------------------------------------------------------- 1 | """ 2 | Jaccard similarity module. 3 | 4 | Contains functions to calculate the Jaccard similarity index between 2 binary 5 | masks or multipolygonal shapes. 6 | """ 7 | 8 | from sklearn.metrics import jaccard_similarity_score as jss 9 | 10 | 11 | # JI between 2 binary masks 12 | def jaccard_index_binary_masks(truth_mask, predicted_mask): 13 | return jss(truth_mask, predicted_mask, normalize=True) 14 | 15 | 16 | # JI between 2 multipolygons 17 | def jaccard_index_multipolygons(truth_multi, predicted_multi): 18 | if not (truth_multi.is_valid): 19 | raise ('The truth multipolygon is not valid!') 20 | if not (predicted_multi.is_valid): 21 | raise ('The predicted multipolygon is not valid!') 22 | 23 | # intersection 24 | intersec = truth_multi.intersection(predicted_multi).area 25 | # union 26 | union = truth_multi.union(predicted_multi).area 27 | 28 | # Jaccard index is intersection over union 29 | return intersec / union 30 | -------------------------------------------------------------------------------- /satsense/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .conversions import mask2multipolygon, multipolygon2mask 2 | from .mask import load_mask_from_file, save_mask2file 3 | from .shapefile import (load_shapefile2multipolygon, 4 | save_multipolygon2shapefile, show_multipolygon) 5 | 6 | __all__ = [ 7 | 'save_mask2file', 8 | 'load_mask_from_file', 9 | 'show_multipolygon', 10 | 'load_shapefile2multipolygon', 11 | 'save_multipolygon2shapefile', 12 | 'multipolygon2mask', 13 | 'mask2multipolygon', 14 | ] 15 | -------------------------------------------------------------------------------- /satsense/util/conversions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Methods for conversion between shapely multipolygons and binary masks. 4 | 5 | Created on Wed Jul 12 17:16:51 2017 6 | 7 | @author: elena 8 | """ 9 | from rasterio.features import IDENTITY, rasterize, shapes 10 | from shapely.geometry import MultiPolygon, shape 11 | 12 | 13 | def multipolygon2mask(multipolygon, 14 | out_shape, 15 | transform=IDENTITY, 16 | all_touched=False): 17 | """Convert from shapely multipolygon to binary mask.""" 18 | mask = rasterize( 19 | multipolygon, 20 | out_shape=out_shape, 21 | transform=transform, 22 | all_touched=all_touched) 23 | 24 | return mask.astype(bool) 25 | 26 | 27 | def mask2multipolygon(mask_data, mask, transform=IDENTITY, connectivity=4): 28 | """Convert from binary mask to shapely multipolygon.""" 29 | geom_results = ({ 30 | 'properties': { 31 | 'raster_val': v 32 | }, 33 | 'geometry': s 34 | } for i, (s, v) in enumerate( 35 | shapes( 36 | mask_data, 37 | mask=mask, 38 | connectivity=connectivity, 39 | transform=transform))) 40 | geometries = list(geom_results) 41 | 42 | multi = MultiPolygon( 43 | [shape(geometries[i]['geometry']) for i in range(len(geometries))]) 44 | 45 | if not multi.is_valid: 46 | print('Not a valid polygon, using it' 's buffer!') 47 | multi = multi.buffer(0) 48 | 49 | return multi 50 | -------------------------------------------------------------------------------- /satsense/util/mask.py: -------------------------------------------------------------------------------- 1 | """Methods for loading and saving mask images.""" 2 | import numpy as np 3 | import rasterio 4 | from scipy.misc import imread 5 | from skimage.filters import threshold_otsu 6 | 7 | from ..extract import extract_features 8 | from ..features import NirNDVI 9 | from ..features.feature import Feature 10 | from .conversions import multipolygon2mask 11 | from .shapefile import load_shapefile2multipolygon 12 | 13 | 14 | def save_mask2file(mask, filename, crs=None, transform=None): 15 | """Save a mask to filename.""" 16 | height, width = mask.shape 17 | if mask.dtype == np.bool: 18 | mask = mask.astype(np.uint8) 19 | with rasterio.open( 20 | filename, 21 | 'w', 22 | driver='GTiff', 23 | dtype=mask.dtype, 24 | count=1, 25 | width=width, 26 | height=height, 27 | crs=crs, 28 | transform=transform, 29 | ) as dst: 30 | dst.write(mask, indexes=1) 31 | 32 | 33 | def load_mask_from_file(filename): 34 | """Load a binary mask from filename into a numpy array. 35 | 36 | @returns mask The mask image loaded as a numpy array 37 | """ 38 | mask = imread(filename) 39 | 40 | return mask 41 | 42 | 43 | def load_mask_from_shapefile(filename, shape, transform): 44 | """Load a mask from a shapefile.""" 45 | multipolygon, _ = load_shapefile2multipolygon(filename) 46 | mask = multipolygon2mask(multipolygon, shape, transform) 47 | return mask 48 | 49 | 50 | class _MeanFeature(Feature): 51 | base_image = 'pan' 52 | size = 1 53 | compute = staticmethod(np.mean) 54 | 55 | 56 | def resample(generator, threshold=0.8): 57 | """Extract the mask points generated by generator.""" 58 | windows = (generator.step_size, ) 59 | feature = _MeanFeature(windows) 60 | values = next(extract_features([feature], generator, n_jobs=1)).vector 61 | values.shape = (values.shape[0], values.shape[1]) 62 | return values > threshold 63 | 64 | 65 | def get_ndxi_mask(generator, feature=NirNDVI): 66 | """Compute a mask based on an NDXI feature.""" 67 | windows = (generator.step_size, ) 68 | values = next(extract_features([feature(windows)], generator)).vector 69 | values.shape = (values.shape[0], values.shape[1]) 70 | mask = np.array(values.mask) 71 | unmasked_values = np.array(values[~values.mask]) 72 | mask[~mask] = unmasked_values < threshold_otsu(unmasked_values) 73 | return mask 74 | -------------------------------------------------------------------------------- /satsense/util/shapefile.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Methods for loading and visualizion of shapefiles. 3 | 4 | Created on Wed May 17 10:26:10 2017 5 | 6 | @author: elena 7 | """ 8 | 9 | import fiona 10 | from descartes.patch import PolygonPatch 11 | from shapely.geometry import MultiPolygon, mapping, shape 12 | 13 | 14 | # visualization 15 | def plot_coords(ax, ob): 16 | x, y = ob.xy 17 | ax.plot(x, y, 'o', color='#999999', zorder=1) 18 | 19 | 20 | def show_multipolygon(multipolygon, axis, show_coords, extent, color, alpha, 21 | title): 22 | """Visualize multipolygon in plot.""" 23 | for polygon in multipolygon: 24 | if show_coords: 25 | plot_coords(axis, polygon.exterior) 26 | patch = PolygonPatch( 27 | polygon, facecolor=color, edgecolor=color, alpha=alpha, zorder=2) 28 | axis.add_patch(patch) 29 | 30 | xmin, ymin, xmax, ymax = extent 31 | xrange = [xmin, xmax] 32 | yrange = [ymin, ymax] 33 | axis.set_xlim(*xrange) 34 | # axis.set_xticks(range(*xrange)) 35 | axis.set_ylim(*yrange) 36 | # axis.set_yticks(range(*yrange)) 37 | axis.set_aspect(1) 38 | 39 | axis.set_title(title) 40 | 41 | return axis 42 | 43 | 44 | def load_shapefile2multipolygon(filename): 45 | """Load a shapefile as a MultiPolygon.""" 46 | with fiona.open(filename) as file: 47 | multipolygon = MultiPolygon(shape(p['geometry']) for p in file) 48 | bounds = file.bounds 49 | 50 | return multipolygon, bounds 51 | 52 | 53 | def save_multipolygon2shapefile(multipolygon, shapefilename): 54 | """Save a MultiPolygon to a shapefile.""" 55 | # define the schema 56 | schema = { 57 | 'geometry': 'Polygon', 58 | 'properties': { 59 | 'id': 'int' 60 | }, 61 | } 62 | 63 | # write to a shapefile 64 | with fiona.open(shapefilename, 'w', 'ESRI Shapefile', schema) as file: 65 | for i, poly in enumerate(multipolygon, start=1): 66 | file.write({ 67 | 'geometry': mapping(poly), 68 | 'properties': { 69 | 'id': i 70 | }, 71 | }) 72 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | [tool:pytest] 4 | addopts = 5 | --flake8 6 | --cov=satsense 7 | --cov-report=term 8 | --cov-report=xml:test-reports/coverage.xml 9 | --cov-report=html:test-reports/coverage_html 10 | --html=test-reports/report.html 11 | [coverage:run] 12 | parallel = true 13 | [build_sphinx] 14 | build-dir = doc/_build 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Satsense package.""" 2 | import re 3 | 4 | from setuptools import find_packages, setup 5 | 6 | 7 | def read(filename): 8 | """Read the contents of `filename`.""" 9 | with open(filename) as file: 10 | return file.read() 11 | 12 | 13 | def read_authors(citation_file): 14 | """Read the list of authors from .cff file.""" 15 | authors = re.findall( 16 | r'family-names: (.*)$\s*given-names: (.*)', 17 | read(citation_file), 18 | re.MULTILINE, 19 | ) 20 | return ', '.join(' '.join(author[::-1]) for author in authors) 21 | 22 | 23 | setup( 24 | name='satsense', 25 | use_scm_version=True, 26 | url='https://github.com/dynaslum/satsense', 27 | license='Apache Software License', 28 | keywords=('classification ' 29 | 'earth-observation ' 30 | 'land-cover ' 31 | 'land-use ' 32 | 'machine-learning ' 33 | 'machine-learning-algorithms ' 34 | 'satellite-images '), 35 | author=read_authors('CITATION.cff'), 36 | setup_requires=[ 37 | 'pytest-runner', 38 | 'setuptools_scm', 39 | ], 40 | tests_require=[ 41 | 'hypothesis[numpy]', 42 | 'pytest', 43 | 'pytest-cov', 44 | 'pytest-env', 45 | 'pytest-flake8', 46 | 'pytest-html', 47 | ], 48 | install_requires=[ 49 | 'affine', 50 | 'descartes', 51 | 'fiona', 52 | 'netCDF4', 53 | 'numpy', 54 | 'opencv-contrib-python-headless<3.4.3', 55 | 'rasterio', 56 | 'scikit-image>=0.14.2,<0.15', 57 | 'scikit-learn', 58 | 'scipy<1.3', 59 | 'shapely', 60 | ], 61 | extras_require={ 62 | 'dev': [ 63 | 'hypothesis[numpy]', 64 | 'isort', 65 | 'pycodestyle', 66 | 'pyflakes', 67 | 'prospector[with_pyroma]', 68 | 'pytest', 69 | 'pytest-cov', 70 | 'pytest-env', 71 | 'pytest-flake8', 72 | 'pytest-html', 73 | 'sphinx', 74 | 'sphinx_rtd_theme', 75 | 'yamllint', 76 | 'yapf', 77 | ], 78 | 'notebooks': [ 79 | 'jupyter', 80 | 'matplotlib', 81 | 'nblint', 82 | ], 83 | }, 84 | author_email='b.weel@esiencecenter.nl', 85 | description=( 86 | 'Library for land use/cover classification using satellite images.'), 87 | long_description=read('README.rst'), 88 | packages=find_packages(), 89 | include_package_data=True, 90 | zip_safe=False, 91 | platforms='any', 92 | classifiers=[ 93 | 'Development Status :: 4 - Beta', 94 | 'Intended Audience :: Developers', 95 | 'Intended Audience :: Science/Research', 96 | 'License :: OSI Approved :: Apache Software License', 97 | 'Natural Language :: English', 98 | 'Operating System :: OS Independent', 99 | 'Programming Language :: Python', 100 | 'Programming Language :: Python :: 3', 101 | 'Programming Language :: Python :: 3.6', 102 | 'Programming Language :: Python :: 3.7', 103 | 'Topic :: Scientific/Engineering :: Artificial Intelligence', 104 | 'Topic :: Scientific/Engineering :: GIS', 105 | 'Topic :: Scientific/Engineering :: Image Recognition', 106 | 'Topic :: Software Development :: Libraries :: Python Modules', 107 | ]) 108 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/__init__.py -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from satsense.image import Image 4 | 5 | 6 | @pytest.fixture 7 | def image(): 8 | """ 9 | Load image as a satsense image 10 | """ 11 | # Sentinel is not actually quickbird, but I stored the bands this way 12 | return Image('test/data/source/section_2_sentinel.tif', 'quickbird') 13 | -------------------------------------------------------------------------------- /test/data/baseimage/section_2_sentinel_canny_edge.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/baseimage/section_2_sentinel_canny_edge.tif -------------------------------------------------------------------------------- /test/data/baseimage/section_2_sentinel_gray_ubyte.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/baseimage/section_2_sentinel_gray_ubyte.tif -------------------------------------------------------------------------------- /test/data/baseimage/section_2_sentinel_grayscale.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/baseimage/section_2_sentinel_grayscale.tif -------------------------------------------------------------------------------- /test/data/baseimage/section_2_sentinel_textons.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/baseimage/section_2_sentinel_textons.tif -------------------------------------------------------------------------------- /test/data/generate/baseimage.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import rasterio 3 | 4 | # import below has side effects 5 | from satsense.features import Lacunarity # noqa: F401 6 | from satsense.features import Texton # noqa: F401 7 | from satsense.image import Image 8 | 9 | 10 | def write_target(target_image, image_name, crs, transform): 11 | with rasterio.open( 12 | image_name, 13 | 'w', 14 | driver='GTiff', 15 | height=target_image.shape[1], 16 | width=target_image.shape[2], 17 | count=target_image.shape[0], 18 | dtype=str(target_image.dtype), 19 | crs=crs, 20 | transform=transform, 21 | nodata=target_image.fill_value) as target: 22 | target.write(target_image.filled()) 23 | 24 | 25 | def generate_grayscale(img, prefix): 26 | grayscale = img['grayscale'][np.newaxis, :, :] 27 | 28 | write_target(grayscale, prefix + 'grayscale.tif', img.crs, img.transform) 29 | 30 | 31 | def generate_canny_edge(img, prefix): 32 | canny = img['canny_edge'][np.newaxis, :, :].astype(np.uint8) 33 | canny.set_fill_value(255) 34 | write_target(canny, prefix + 'canny_edge.tif', img.crs, img.transform) 35 | 36 | 37 | def generate_gray_ubyte(img, prefix): 38 | gray_ubyte = img['gray_ubyte'][np.newaxis, :, :] 39 | gray_ubyte[gray_ubyte == 255] = 254 40 | 41 | gray_ubyte.set_fill_value(255) 42 | 43 | write_target(gray_ubyte, prefix + 'gray_ubyte.tif', img.crs, img.transform) 44 | 45 | 46 | if __name__ == "__main__": 47 | image = Image('../source/section_2_sentinel.tif', 'quickbird') 48 | image.precompute_normalization() 49 | 50 | generate_grayscale(image, '../baseimage/section_2_sentinel_') 51 | generate_canny_edge(image, '../baseimage/section_2_sentinel_') 52 | generate_gray_ubyte(image, '../baseimage/section_2_sentinel_') 53 | -------------------------------------------------------------------------------- /test/data/generate/feature_target.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import rasterio 3 | from netCDF4 import Dataset 4 | 5 | from satsense.features.hog import hog_features 6 | from satsense.features.lacunarity import lacunarity 7 | from satsense.features.pantex import pantex 8 | from satsense.features.sift import sift, sift_cluster 9 | from satsense.features.texton import (get_texton_descriptors, texton, 10 | texton_cluster) 11 | from satsense.image import Image 12 | 13 | 14 | def write_target(array, name, fname, w): 15 | dataset = Dataset(name, 'w', format="NETCDF4") 16 | 17 | dataset.createDimension('window_len', 6) 18 | variable = dataset.createVariable( 19 | 'window', 'i4', dimensions=('window_len')) 20 | variable[:] = [ 21 | w[0].start, w[0].stop, w[0].step, w[1].start, w[1].stop, w[1].step 22 | ] 23 | 24 | dataset.createDimension('length', len(array)) 25 | variable = dataset.createVariable(fname, 'f8', dimensions=('length')) 26 | variable[:] = array 27 | 28 | dataset.close() 29 | 30 | 31 | def hog_target(): 32 | with rasterio.open( 33 | '../baseimage/section_2_sentinel_grayscale.tif') as gray: 34 | source = gray.read(1, masked=True) 35 | 36 | window = (slice(100, 125, 1), slice(100, 125, 1)) 37 | features = hog_features(source[window]) 38 | 39 | write_target(features, '../target/hog.nc', 'hog', window) 40 | 41 | 42 | def lacunarity_target(): 43 | with rasterio.open( 44 | '../baseimage/section_2_sentinel_canny_edge.tif') as canny: 45 | source = canny.read(1, masked=True).astype('bool') 46 | 47 | window = (slice(100, 125, 1), slice(100, 125, 1)) 48 | 49 | box_sizes = (10, 20) 50 | result = np.zeros(len(box_sizes)) 51 | for i, box_size in enumerate(box_sizes): 52 | win = source[window].filled() 53 | result[i] = lacunarity(win, box_size) 54 | 55 | write_target(result, '../target/lacunarity.nc', 'lacunarity', window) 56 | 57 | 58 | def pantex_target(): 59 | with rasterio.open( 60 | '../baseimage/section_2_sentinel_gray_ubyte.tif') as gray: 61 | source = gray.read(1, masked=True) 62 | 63 | window = (slice(100, 125, 1), slice(100, 125, 1)) 64 | 65 | result = [pantex(source[window])] 66 | 67 | write_target(result, '../target/pantex.nc', 'pantex', window) 68 | 69 | 70 | def sift_target(): 71 | image = Image('../source/section_2_sentinel.tif', 'quickbird') 72 | image.precompute_normalization() 73 | 74 | clusters = sift_cluster([image]) 75 | 76 | window = (slice(100, 125, 1), slice(100, 125, 1)) 77 | ubyte = image['gray_ubyte'] 78 | features = sift(ubyte[window], clusters) 79 | 80 | write_target(features, '../target/sift.nc', 'sift', window) 81 | 82 | 83 | def texton_target(): 84 | image = Image('../source/section_2_sentinel.tif', 'quickbird') 85 | image.precompute_normalization() 86 | 87 | clusters = texton_cluster([image]) 88 | descriptors = get_texton_descriptors(image) 89 | 90 | window = (slice(100, 125, 1), slice(100, 125, 1)) 91 | 92 | win = descriptors[window] 93 | features = texton(win, clusters) 94 | 95 | write_target(features, '../target/texton.nc', 'texton', window) 96 | 97 | 98 | if __name__ == '__main__': 99 | hog_target() 100 | lacunarity_target() 101 | pantex_target() 102 | sift_target() 103 | texton_target() 104 | -------------------------------------------------------------------------------- /test/data/source/section_2_sentinel.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/source/section_2_sentinel.tif -------------------------------------------------------------------------------- /test/data/source/section_2_sentinel_l1c.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/source/section_2_sentinel_l1c.tif -------------------------------------------------------------------------------- /test/data/source/section_2_sentinel_rect.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/source/section_2_sentinel_rect.tif -------------------------------------------------------------------------------- /test/data/source/section_2_sentinel_rect_wgs84.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/source/section_2_sentinel_rect_wgs84.tif -------------------------------------------------------------------------------- /test/data/target/hog.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/hog.nc -------------------------------------------------------------------------------- /test/data/target/lacunarity.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/lacunarity.nc -------------------------------------------------------------------------------- /test/data/target/ndsi.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/ndsi.tif -------------------------------------------------------------------------------- /test/data/target/ndsi.tif.msk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/ndsi.tif.msk -------------------------------------------------------------------------------- /test/data/target/ndvi.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/ndvi.tif -------------------------------------------------------------------------------- /test/data/target/ndvi.tif.msk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/ndvi.tif.msk -------------------------------------------------------------------------------- /test/data/target/pantex.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/pantex.nc -------------------------------------------------------------------------------- /test/data/target/rb_ndvi.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/rb_ndvi.tif -------------------------------------------------------------------------------- /test/data/target/rb_ndvi.tif.msk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/rb_ndvi.tif.msk -------------------------------------------------------------------------------- /test/data/target/rg_ndvi.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/rg_ndvi.tif -------------------------------------------------------------------------------- /test/data/target/rg_ndvi.tif.msk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/rg_ndvi.tif.msk -------------------------------------------------------------------------------- /test/data/target/sift.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/sift.nc -------------------------------------------------------------------------------- /test/data/target/texton.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DynaSlum/satsense/b0fa650193995a30328f26a36ebab2437c0e37ef/test/data/target/texton.nc -------------------------------------------------------------------------------- /test/features/gen_target.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import rasterio 5 | 6 | 7 | def normalized_image(): 8 | """Get the test image and normalize it.""" 9 | filename = Path(__file__).parent / 'data' / 'section_2_sentinel.tif' 10 | with rasterio.open(filename) as dataset: 11 | image_in = dataset.read(masked=True).astype('float32') 12 | image = np.empty_like(image_in) 13 | 14 | # Normalization 15 | percentiles = [2, 98] 16 | for i in range(dataset.count): 17 | band = image_in[i] 18 | data = band[~band.mask] 19 | 20 | lower, upper = np.nanpercentile(data, percentiles) 21 | band -= lower 22 | band /= upper - lower 23 | np.clip(band, a_min=0, a_max=1, out=band) 24 | 25 | image[i] = band 26 | 27 | return image, dataset.crs, dataset.transform 28 | 29 | 30 | def write_target(target_image, image_name, crs, transform): 31 | with rasterio.open( 32 | image_name, 33 | 'w', 34 | driver='GTiff', 35 | height=target_image.shape[0], 36 | width=target_image.shape[1], 37 | count=1, 38 | dtype=str(target_image.dtype), 39 | crs=crs, 40 | transform=transform, 41 | nodata=target_image.fill_value, 42 | ) as target: 43 | target.write(target_image, 1) 44 | target.write_mask(~target_image.mask) 45 | 46 | 47 | def ndxi_target(b1, b2, name): 48 | image, crs, transform = normalized_image() 49 | 50 | band1 = image[b1] 51 | band2 = image[b2] 52 | 53 | target = np.divide(band1 - band2, band1 + band2) 54 | 55 | write_target(target, 'target_' + name + '.tif', crs, transform) 56 | 57 | 58 | def ndvi_target(): 59 | ndxi_target(3, 2, 'ndvi') 60 | 61 | 62 | def rg_ndvi_target(): 63 | ndxi_target(2, 1, 'rg_ndvi') 64 | 65 | 66 | def rb_ndvi_target(): 67 | ndxi_target(2, 0, 'rb_ndvi') 68 | 69 | 70 | def ndsi_target(): 71 | ndxi_target(3, 1, 'ndsi') 72 | 73 | 74 | if __name__ == '__main__': 75 | ndvi_target() 76 | rg_ndvi_target() 77 | rb_ndvi_target() 78 | ndsi_target() 79 | -------------------------------------------------------------------------------- /test/features/test_baseimage.py: -------------------------------------------------------------------------------- 1 | import rasterio 2 | 3 | from satsense.features.lacunarity import get_canny_edge_image 4 | 5 | 6 | def test_canny_edge(image): 7 | with rasterio.open('test/data/baseimage/section_2_sentinel_canny_edge.tif' 8 | ) as dataset: 9 | target = dataset.read(1, masked=True) 10 | 11 | result = get_canny_edge_image(image) 12 | 13 | same = target == result 14 | 15 | assert same.all() 16 | -------------------------------------------------------------------------------- /test/features/test_features.py: -------------------------------------------------------------------------------- 1 | """ 2 | Testing features 3 | """ 4 | import numpy as np 5 | import pytest 6 | import rasterio 7 | from netCDF4 import Dataset 8 | from sklearn.cluster import MiniBatchKMeans 9 | 10 | from satsense.features.hog import hog_features 11 | from satsense.features.lacunarity import lacunarities 12 | from satsense.features.ndxi import ndxi_image 13 | from satsense.features.pantex import pantex 14 | from satsense.features.sift import sift, sift_cluster 15 | from satsense.features.texton import (get_texton_descriptors, texton, 16 | texton_cluster) 17 | 18 | 19 | def test_ndvi(image): 20 | """ 21 | Test ndvi 22 | """ 23 | with rasterio.open('test/data/target/ndvi.tif') as dataset: 24 | target = dataset.read(masked=True) 25 | 26 | result = ndxi_image(image, 'nir_ndvi') 27 | 28 | same = result == target 29 | 30 | assert same.all() 31 | 32 | 33 | def test_rg_ndvi(image): 34 | """ 35 | Test ndvi 36 | """ 37 | with rasterio.open('test/data/target/rg_ndvi.tif') as dataset: 38 | target = dataset.read(masked=True) 39 | 40 | result = ndxi_image(image, 'rg_ndvi') 41 | 42 | same = result == target 43 | 44 | assert same.all() 45 | 46 | 47 | def test_rb_ndvi(image): 48 | """ 49 | Test ndvi 50 | """ 51 | with rasterio.open('test/data/target/rb_ndvi.tif') as dataset: 52 | target = dataset.read(masked=True) 53 | 54 | result = ndxi_image(image, 'rb_ndvi') 55 | 56 | same = result == target 57 | 58 | assert same.all() 59 | 60 | 61 | def test_ndsi(image): 62 | """ 63 | Test ndvi 64 | """ 65 | with rasterio.open('test/data/target/ndsi.tif') as dataset: 66 | target = dataset.read(masked=True) 67 | 68 | result = ndxi_image(image, 'ndsi') 69 | 70 | same = result == target 71 | 72 | assert same.all() 73 | 74 | 75 | def test_hog(): 76 | """ 77 | Test hog 78 | """ 79 | with Dataset("test/data/target/hog.nc", "r", format="NETCDF4") as dataset: 80 | target = dataset.variables['hog'][:] 81 | slices = dataset.variables['window'][:] 82 | 83 | window = slice(*slices[0:3]), slice(*slices[3:6]) 84 | 85 | with rasterio.open( 86 | 'test/data/baseimage/section_2_sentinel_grayscale.tif') as file: 87 | source = file.read(1, masked=True) 88 | 89 | features = hog_features(source[window]) 90 | 91 | assert np.allclose(features, target) 92 | 93 | 94 | def test_lacunarity(): 95 | """ 96 | Test lacunarity 97 | """ 98 | with Dataset( 99 | "test/data/target/lacunarity.nc", "r", 100 | format="NETCDF4") as dataset: 101 | target = dataset.variables['lacunarity'][:] 102 | slices = dataset.variables['window'][:] 103 | 104 | window = slice(*slices[0:3]), slice(*slices[3:6]) 105 | 106 | with rasterio.open( 107 | 'test/data/baseimage/section_2_sentinel_canny_edge.tif') as file: 108 | source = file.read(1, masked=True).astype('bool') 109 | 110 | win = source[window] 111 | box_sizes = (10, 20) 112 | features = lacunarities(win, box_sizes) 113 | 114 | same = target == features 115 | 116 | assert same.all() 117 | 118 | 119 | def test_pantex(): 120 | """ 121 | Test Pantex Feature 122 | """ 123 | with Dataset( 124 | "test/data/target/pantex.nc", "r", format="NETCDF4") as dataset: 125 | target = dataset.variables['pantex'][:] 126 | slices = dataset.variables['window'][:] 127 | 128 | window = slice(*slices[0:3]), slice(*slices[3:6]) 129 | 130 | with rasterio.open( 131 | 'test/data/baseimage/section_2_sentinel_gray_ubyte.tif') as file: 132 | source = file.read(1, masked=True) 133 | 134 | features = [pantex(source[window])] 135 | 136 | same = target == features 137 | 138 | assert same.all() 139 | 140 | 141 | def test_sift(image): 142 | """Sift feature test.""" 143 | with Dataset("test/data/target/sift.nc", "r", format="NETCDF4") as dataset: 144 | target = dataset.variables['sift'][:] 145 | slices = dataset.variables['window'][:] 146 | 147 | window = slice(*slices[0:3]), slice(*slices[3:6]) 148 | 149 | clusters = sift_cluster([image, image]) 150 | 151 | win = image['gray_ubyte'][window] 152 | features = sift(win, clusters) 153 | 154 | same = target == features 155 | 156 | assert same.all() 157 | 158 | 159 | def test_texton(image): 160 | """Texton feature test.""" 161 | with Dataset( 162 | "test/data/target/texton.nc", "r", format="NETCDF4") as dataset: 163 | target = dataset.variables['texton'][:] 164 | slices = dataset.variables['window'][:] 165 | 166 | window = slice(*slices[0:3]), slice(*slices[3:6]) 167 | 168 | clusters = texton_cluster([image, image], max_samples=1000) 169 | descriptors = get_texton_descriptors(image) 170 | 171 | win = descriptors[window] 172 | features = texton(win, clusters) 173 | 174 | same = target == features 175 | 176 | assert same.all() 177 | 178 | 179 | @pytest.mark.parametrize('cluster_function', [sift_cluster, texton_cluster]) 180 | def test_cluster_max_samples(image, monkeypatch, cluster_function): 181 | max_samples = 1000 182 | 183 | def mock_fit(self, descriptors): 184 | samples = len(descriptors) 185 | assert 0 < samples <= max_samples 186 | 187 | monkeypatch.setattr(MiniBatchKMeans, 'fit', mock_fit) 188 | cluster_function([image, image, image], 189 | max_samples=max_samples, 190 | sample_window=(100, 100)) 191 | -------------------------------------------------------------------------------- /test/strategies.py: -------------------------------------------------------------------------------- 1 | import hypothesis.strategies as st 2 | from hypothesis.extra.numpy import (floating_dtypes, integer_dtypes, 3 | unsigned_integer_dtypes) 4 | from rasterio import check_dtype 5 | 6 | st_rasterio_dtypes = st.one_of( 7 | integer_dtypes(), 8 | unsigned_integer_dtypes(), 9 | floating_dtypes(), 10 | ).filter(check_dtype) 11 | -------------------------------------------------------------------------------- /test/test_extract.py: -------------------------------------------------------------------------------- 1 | """Test feature extraction related functions.""" 2 | import hypothesis.strategies as st 3 | import numpy as np 4 | import pytest 5 | from hypothesis import given, settings 6 | 7 | from satsense.bands import BANDS 8 | from satsense.extract import extract_features 9 | from satsense.features import Feature 10 | from satsense.generators import FullGenerator 11 | 12 | from .test_generators import create_test_image 13 | 14 | 15 | class BaseTestFeature(Feature): 16 | size = 1 17 | compute = staticmethod(lambda a: np.mean(a, axis=(0, 1))) 18 | 19 | 20 | class GrayscaleFeature(BaseTestFeature): 21 | base_image = 'grayscale' 22 | 23 | 24 | class GrayUbyteFeature(BaseTestFeature): 25 | base_image = 'gray_ubyte' 26 | 27 | 28 | class RGBFeature(BaseTestFeature): 29 | base_image = 'rgb' 30 | size = 3 31 | 32 | 33 | @pytest.fixture 34 | def generator(tmpdir): 35 | image_shape = (10, 10) 36 | step_size = (3, 3) 37 | satellite = 'worldview3' 38 | 39 | n_bands = len(BANDS[satellite]) 40 | shape = (n_bands, ) + image_shape 41 | array = np.array(range(np.prod(shape)), dtype=float) 42 | array.shape = shape 43 | image = create_test_image(tmpdir, array) 44 | gen = FullGenerator(image, step_size) 45 | return gen 46 | 47 | 48 | def test_extract_features(generator): 49 | """Test that features can be computed.""" 50 | features = [ 51 | RGBFeature(window_shapes=((3, 2), )), 52 | GrayUbyteFeature(window_shapes=( 53 | (3, 2), 54 | (4, 4), 55 | )), 56 | GrayUbyteFeature(window_shapes=( 57 | (4, 4), 58 | (2, 5), 59 | )) 60 | ] 61 | 62 | results = tuple(extract_features(features, generator, n_jobs=1)) 63 | 64 | assert len(results) == 3 65 | for result, feature in zip(results, features): 66 | assert result.feature == feature 67 | assert result.vector.any() 68 | shape = generator.shape + (len(feature.windows), feature.size) 69 | assert result.vector.shape == shape 70 | 71 | 72 | @given(st.integers(min_value=-1, max_value=10)) 73 | @settings(deadline=2000) 74 | def test_extract_features_parallel(generator, n_jobs): 75 | """Test that parallel feature computation produces identical results.""" 76 | window_shapes = ( 77 | (3, 3), 78 | (5, 5), 79 | ) 80 | features = [ 81 | GrayscaleFeature(window_shapes), 82 | ] 83 | print("Computing reference features") 84 | references = list(extract_features(features, generator, n_jobs=1)) 85 | assert len(references) == 1 86 | 87 | print("Computing features in parallel") 88 | results = list(extract_features(features, generator, n_jobs=n_jobs)) 89 | assert len(results) == 1 90 | 91 | for reference, result in zip(references, results): 92 | np.testing.assert_array_equal(result.vector.mask, 93 | reference.vector.mask) 94 | np.testing.assert_array_almost_equal_nulp( 95 | result.vector[~result.vector.mask], 96 | reference.vector[~reference.vector.mask]) 97 | -------------------------------------------------------------------------------- /test/test_generators.py: -------------------------------------------------------------------------------- 1 | import hypothesis.strategies as st 2 | import numpy as np 3 | import pytest 4 | import rasterio 5 | from hypothesis import given, settings 6 | from hypothesis.extra.numpy import arrays 7 | from rasterio.transform import from_origin 8 | 9 | from satsense.bands import BANDS 10 | from satsense.generators import FullGenerator 11 | from satsense.image import Image 12 | 13 | from .strategies import st_rasterio_dtypes 14 | 15 | 16 | def create_test_file(filename, array): 17 | """Write an array of shape (bands, width, height) to file.""" 18 | array = np.ma.asanyarray(array) 19 | crs = rasterio.crs.CRS(init='epsg:4326') 20 | transform = from_origin(52, 4, 10, 10) 21 | with rasterio.open( 22 | filename, 23 | mode='w', 24 | driver='GTiff', 25 | count=array.shape[0], 26 | height=array.shape[1], 27 | width=array.shape[2], 28 | dtype=array.dtype, 29 | crs=crs, 30 | transform=transform) as dataset: 31 | for band, data in enumerate(array, start=1): 32 | dataset.write(data, band) 33 | 34 | 35 | def create_test_image(dirname, array, normalization=None): 36 | """Create a test Image instance.""" 37 | filename = str(dirname / 'tmp.tif') 38 | create_test_file(filename, array) 39 | satellite = 'quickbird' 40 | image = Image(filename, satellite, normalization_parameters=normalization) 41 | return image 42 | 43 | 44 | def create_mono_image(dirname, array, normalization=None): 45 | """Create a test Image instance.""" 46 | filename = str(dirname / 'tmp.tif') 47 | create_test_file(filename, array) 48 | satellite = 'monochrome' 49 | image = Image(filename, satellite, normalization_parameters=normalization) 50 | return image 51 | 52 | 53 | WINDOW_TEST_DATA = [ 54 | { 55 | 'image_shape': (3, 3), 56 | 'step_size': (2, 1), 57 | 'window_shape': (3, 4), 58 | 'window_ref_arrays': [ 59 | np.ma.array( 60 | [ 61 | [0, 0, 0, 1], 62 | [0, 0, 3, 4], 63 | [0, 0, 6, 7], 64 | ], 65 | mask=[ 66 | [True, True, False, False], 67 | [True, True, False, False], 68 | [True, True, False, False], 69 | ] 70 | ), 71 | np.ma.array( 72 | [ 73 | [0, 0, 1, 2], 74 | [0, 3, 4, 5], 75 | [0, 6, 7, 8], 76 | ], 77 | mask=[ 78 | [True, False, False, False], 79 | [True, False, False, False], 80 | [True, False, False, False], 81 | ] 82 | ), 83 | np.ma.array( 84 | [ 85 | [0, 1, 2, 0], 86 | [3, 4, 5, 0], 87 | [6, 7, 8, 0], 88 | ], 89 | mask=[ 90 | [False, False, False, True], 91 | [False, False, False, True], 92 | [False, False, False, True], 93 | ] 94 | ), 95 | np.ma.array( 96 | [ 97 | [0, 0, 6, 7], 98 | [0, 0, 0, 0], 99 | [0, 0, 0, 0], 100 | ], 101 | mask=[ 102 | [True, True, False, False], 103 | [True, True, True, True], 104 | [True, True, True, True], 105 | ] 106 | ), 107 | np.ma.array( 108 | [ 109 | [0, 6, 7, 8], 110 | [0, 0, 0, 0], 111 | [0, 0, 0, 0], 112 | ], 113 | mask=[ 114 | [True, False, False, False], 115 | [True, True, True, True], 116 | [True, True, True, True], 117 | ] 118 | ), 119 | np.ma.array( 120 | [ 121 | [6, 7, 8, 0], 122 | [0, 0, 0, 0], 123 | [0, 0, 0, 0], 124 | ], 125 | mask=[ 126 | [False, False, False, True], 127 | [True, True, True, True], 128 | [True, True, True, True], 129 | ] 130 | ), 131 | ] 132 | }, 133 | { 134 | 'image_shape': (6, 6), 135 | 'step_size': (2, 3), 136 | 'window_shape': (5, 5), 137 | 'window_ref_arrays': [ 138 | np.ma.array( 139 | [ 140 | [0, 0, 0, 0, 0], 141 | [0, 0, 1, 2, 3], 142 | [0, 6, 7, 8, 9], 143 | [0, 12, 13, 14, 15], 144 | [0, 18, 19, 20, 21], 145 | ], 146 | mask=[ 147 | [True, True, True, True, True], 148 | [True, False, False, False, False], 149 | [True, False, False, False, False], 150 | [True, False, False, False, False], 151 | [True, False, False, False, False], 152 | ] 153 | ), 154 | np.ma.array( 155 | [ 156 | [0, 0, 0, 0, 0], 157 | [2, 3, 4, 5, 0], 158 | [8, 9, 10, 11, 0], 159 | [14, 15, 16, 17, 0], 160 | [20, 21, 22, 23, 0], 161 | ], 162 | mask=[ 163 | [True, True, True, True, True], 164 | [False, False, False, False, True], 165 | [False, False, False, False, True], 166 | [False, False, False, False, True], 167 | [False, False, False, False, True], 168 | ] 169 | ), 170 | np.ma.array( 171 | [ 172 | [0, 6, 7, 8, 9], 173 | [0, 12, 13, 14, 15], 174 | [0, 18, 19, 20, 21], 175 | [0, 24, 25, 26, 27], 176 | [0, 30, 31, 32, 33], 177 | ], 178 | mask=[ 179 | [True, False, False, False, False], 180 | [True, False, False, False, False], 181 | [True, False, False, False, False], 182 | [True, False, False, False, False], 183 | [True, False, False, False, False], 184 | ] 185 | ), 186 | np.ma.array( 187 | [ 188 | [8, 9, 10, 11, 0], 189 | [14, 15, 16, 17, 0], 190 | [20, 21, 22, 23, 0], 191 | [26, 27, 28, 29, 0], 192 | [32, 33, 34, 35, 0], 193 | ], 194 | mask=[ 195 | [False, False, False, False, True], 196 | [False, False, False, False, True], 197 | [False, False, False, False, True], 198 | [False, False, False, False, True], 199 | [False, False, False, False, True], 200 | ] 201 | ), 202 | np.ma.array( 203 | [ 204 | [0, 18, 19, 20, 21], 205 | [0, 24, 25, 26, 27], 206 | [0, 30, 31, 32, 33], 207 | [0, 0, 0, 0, 0], 208 | [0, 0, 0, 0, 0], 209 | ], 210 | mask=[ 211 | [True, False, False, False, False], 212 | [True, False, False, False, False], 213 | [True, False, False, False, False], 214 | [True, True, True, True, True], 215 | [True, True, True, True, True], 216 | ] 217 | ), 218 | np.ma.array( 219 | [ 220 | [20, 21, 22, 23, 0], 221 | [26, 27, 28, 29, 0], 222 | [32, 33, 34, 35, 0], 223 | [0, 0, 0, 0, 0], 224 | [0, 0, 0, 0, 0], 225 | ], 226 | mask=[ 227 | [False, False, False, False, True], 228 | [False, False, False, False, True], 229 | [False, False, False, False, True], 230 | [True, True, True, True, True], 231 | [True, True, True, True, True], 232 | ] 233 | ) 234 | ], 235 | }, 236 | { 237 | 'image_shape': (4, 6), 238 | 'step_size': (2, 3), 239 | 'window_shape': (5, 2), 240 | 'window_ref_arrays': [ 241 | np.ma.array( 242 | [ 243 | [0, 0], 244 | [0, 1], 245 | [6, 7], 246 | [12, 13], 247 | [18, 19], 248 | ], 249 | mask=[ 250 | [True, True], 251 | [False, False], 252 | [False, False], 253 | [False, False], 254 | [False, False], 255 | ] 256 | ), 257 | np.ma.array( 258 | [ 259 | [0, 0], 260 | [3, 4], 261 | [9, 10], 262 | [15, 16], 263 | [21, 22], 264 | ], 265 | mask=[ 266 | [True, True], 267 | [False, False], 268 | [False, False], 269 | [False, False], 270 | [False, False], 271 | ] 272 | ), 273 | np.ma.array( 274 | [ 275 | [6, 7], 276 | [12, 13], 277 | [18, 19], 278 | [0, 0], 279 | [0, 0], 280 | ], 281 | mask=[ 282 | [False, False], 283 | [False, False], 284 | [False, False], 285 | [True, True], 286 | [True, True], 287 | ] 288 | ), 289 | np.ma.array( 290 | [ 291 | [9, 10], 292 | [15, 16], 293 | [21, 22], 294 | [0, 0], 295 | [0, 0], 296 | ], 297 | mask=[ 298 | [False, False], 299 | [False, False], 300 | [False, False], 301 | [True, True], 302 | [True, True], 303 | ] 304 | ) 305 | ] 306 | } 307 | ] 308 | 309 | 310 | @pytest.mark.parametrize(list(WINDOW_TEST_DATA[0]), 311 | [tuple(d.values()) for d in WINDOW_TEST_DATA]) 312 | def test_windows(tmpdir, image_shape, step_size, window_shape, 313 | window_ref_arrays): 314 | satellite = 'monochrome' 315 | itype = 'pan' 316 | 317 | n_bands = len(BANDS[satellite]) 318 | shape = (n_bands, ) + image_shape 319 | image_array = np.array(range(np.prod(shape)), dtype=float) 320 | image_array.shape = shape 321 | print('image_array= ', image_array) 322 | image = create_mono_image(tmpdir, image_array, normalization=False) 323 | generator = FullGenerator(image, step_size) 324 | generator.load_image(itype, (window_shape, )) 325 | windows = [window for window in generator] 326 | print('generator._image_cache:\n', generator._image_cache) 327 | 328 | assert len(window_ref_arrays) == len(windows) 329 | for window, reference in zip(windows, window_ref_arrays): 330 | np.testing.assert_array_equal(window, reference) 331 | assert np.all(window.mask == reference.mask) 332 | 333 | 334 | def test_full_generator_windows(tmpdir): 335 | image_shape = (5, 5) 336 | window_shapes = ((5, 5), ) 337 | step_size = (3, 3) 338 | satellite = 'quickbird' 339 | itype = 'grayscale' 340 | 341 | n_bands = len(BANDS[satellite]) 342 | shape = (n_bands, ) + image_shape 343 | array = np.array(range(np.prod(shape)), dtype=float) 344 | array.shape = shape 345 | 346 | image = create_test_image(tmpdir, array, normalization=False) 347 | generator = FullGenerator(image, step_size) 348 | generator.load_image(itype, window_shapes) 349 | 350 | print('generator._image_cache:\n', generator._image_cache) 351 | 352 | assert generator.offset == (0, 0) 353 | assert generator.shape == (2, 2) 354 | 355 | windows = [window for window in generator] 356 | assert len(windows) == 4 357 | 358 | for i, window in enumerate(windows): 359 | print('window', i, '\n', window) 360 | 361 | # window center pixels are correct 362 | image._block = None 363 | original_image = image[itype] 364 | print('original image:\n', original_image) 365 | assert windows[0][2][2] == original_image[1][1] 366 | assert windows[1][2][2] == original_image[1][4] 367 | assert windows[2][2][2] == original_image[4][1] 368 | assert windows[3][2][2] == original_image[4][4] 369 | 370 | # horizontal edges are masked 371 | assert np.all(windows[0].mask[0]) 372 | assert np.all(windows[1].mask[0]) 373 | assert np.all(windows[2].mask[-1]) 374 | assert np.all(windows[3].mask[-1]) 375 | 376 | # vertical edges are masked 377 | assert np.all(windows[0].mask[:, 0]) 378 | assert np.all(windows[1].mask[:, 3:]) 379 | assert np.all(windows[2].mask[:, 0]) 380 | assert np.all(windows[3].mask[:, 3:]) 381 | 382 | # data is not masked 383 | assert not np.any(windows[0].mask[1:, 1:]) 384 | assert not np.any(windows[1].mask[1:, :3]) 385 | assert not np.any(windows[2].mask[:3, 1:]) 386 | assert not np.any(windows[3].mask[:3, :3]) 387 | 388 | 389 | st_window_shape = st.tuples( 390 | st.integers(min_value=1, max_value=10), 391 | st.integers(min_value=1, max_value=10)) 392 | 393 | st_window_shapes = st.lists( 394 | st_window_shape, min_size=1, max_size=10, unique=True) 395 | 396 | 397 | def create_step_and_image_strategy(limit, dtype): 398 | step_size = st.tuples( 399 | st.integers(min_value=1, max_value=limit[0]), 400 | st.integers(min_value=1, max_value=limit[1]), 401 | ) 402 | 403 | image_array = arrays( 404 | dtype=dtype, 405 | shape=st.tuples( 406 | st.just(len(BANDS['quickbird'])), 407 | st.integers(min_value=limit[0], max_value=10), 408 | st.integers(min_value=limit[1], max_value=10), 409 | ), 410 | ) 411 | 412 | return st.tuples(step_size, image_array) 413 | 414 | 415 | st_step_and_image = st.tuples( 416 | st.tuples( 417 | st.integers(min_value=1, max_value=10), 418 | st.integers(min_value=1, max_value=10), 419 | ), 420 | st_rasterio_dtypes, 421 | ).flatmap(lambda args: create_step_and_image_strategy(*args)) 422 | 423 | 424 | @given(st_window_shapes, st_step_and_image) 425 | def test_full_generator(tmpdir, window_shapes, step_and_image): 426 | step_size, image_array = step_and_image 427 | 428 | image = create_test_image(tmpdir, image_array, normalization=False) 429 | generator = FullGenerator(image, step_size) 430 | itype = 'grayscale' 431 | generator.load_image(itype, window_shapes) 432 | assert generator.loaded_itype == itype 433 | 434 | windows = [] 435 | for window in generator: 436 | assert window.shape in window_shapes 437 | windows.append(window) 438 | assert np.prod(generator.shape) == len(windows) // len(window_shapes) 439 | 440 | 441 | @given(st_window_shapes, st_step_and_image, 442 | st.integers(min_value=1, max_value=5)) 443 | @settings(deadline=1000) 444 | def test_full_generator_split(tmpdir, window_shapes, step_and_image, n_chunks): 445 | step_size, image_array = step_and_image 446 | 447 | image = create_test_image(tmpdir, image_array, normalization=False) 448 | generator = FullGenerator(image, step_size) 449 | itype = 'grayscale' 450 | generator.load_image(itype, window_shapes) 451 | reference = list(generator) 452 | 453 | windows = [] 454 | for gen in generator.split(n_chunks): 455 | gen.load_image(itype, window_shapes) 456 | windows.extend(gen) 457 | 458 | for i, window in enumerate(windows): 459 | np.testing.assert_array_equal(reference[i].mask, window.mask) 460 | np.testing.assert_array_equal(reference[i][~reference[i].mask], 461 | window[~window.mask]) 462 | 463 | assert len(reference) == len(windows) 464 | -------------------------------------------------------------------------------- /test/test_image.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pytest 5 | import rasterio 6 | from hypothesis import given 7 | from netCDF4 import Dataset 8 | 9 | from satsense.image import FeatureVector 10 | 11 | from .test_extract import RGBFeature 12 | from .test_generators import st_window_shape as st_image_shape 13 | from .test_generators import st_window_shapes 14 | 15 | 16 | def create_featurevector(image_shape, window_shapes): 17 | """Create a FeatureVector instance for testing.""" 18 | feature = RGBFeature(window_shapes, test=True) 19 | vector = np.ma.empty((*image_shape, len(feature.windows), feature.size)) 20 | vector.mask = np.zeros_like(vector, dtype=bool) 21 | for i in range(len(feature.windows)): 22 | for j in range(feature.size): 23 | vector[:, :, i, j] = 100 * i + j 24 | vector.mask[0, 0, i, j] = True 25 | crs = rasterio.crs.CRS(init='epsg:4326') 26 | transform = rasterio.transform.from_origin(52, 4, 10, 10) 27 | featurevector = FeatureVector(feature, vector, crs, transform) 28 | return feature, vector, featurevector 29 | 30 | 31 | @given(st_image_shape, st_window_shapes) 32 | @pytest.mark.parametrize('extension', ['nc', 'tif']) 33 | def test_save_load_roundtrip(tmpdir, extension, image_shape, window_shapes): 34 | """Test that saving and loading does not modify a FeatureVector.""" 35 | feature, _, feature_vector = create_featurevector(image_shape, 36 | window_shapes) 37 | 38 | prefix = os.path.join(tmpdir, 'test') 39 | feature_vector.save(prefix, extension) 40 | restored_vector = feature_vector.from_file(feature, prefix) 41 | 42 | np.testing.assert_array_equal(feature_vector.vector.mask, 43 | restored_vector.vector.mask) 44 | np.testing.assert_array_almost_equal_nulp( 45 | feature_vector.vector.compressed(), 46 | restored_vector.vector.compressed()) 47 | 48 | 49 | @given(st_image_shape, st_window_shapes) 50 | def test_save_as_netcdf(tmpdir, image_shape, window_shapes): 51 | feature, vector, featurevector = create_featurevector( 52 | image_shape, window_shapes) 53 | filenames = featurevector.save(tmpdir, extension='nc') 54 | 55 | assert len(filenames) == len(feature.windows) 56 | for i, (filename, window_shape) in enumerate( 57 | zip(filenames, feature.windows)): 58 | with Dataset(filename) as dataset: 59 | assert tuple(dataset.window) == window_shape 60 | assert dataset.arguments == repr(feature.kwargs) 61 | data = dataset.variables[feature.name][:] 62 | assert data.shape == (feature.size, ) + vector.shape[:2] 63 | for j in range(feature.size): 64 | try: 65 | np.testing.assert_equal(data.mask[j], 66 | vector.mask[..., i, j]) 67 | np.testing.assert_array_almost_equal_nulp( 68 | data[j].compressed(), vector[..., i, j].compressed()) 69 | except AssertionError: 70 | print("Error in window", i, "feature element", j) 71 | print('saved:\n', data[j]) 72 | print('reference:\n', vector[..., i, j]) 73 | raise 74 | 75 | 76 | @given(st_image_shape, st_window_shapes) 77 | def test_save_as_geotiff(tmpdir, image_shape, window_shapes): 78 | feature, vector, featurevector = create_featurevector( 79 | image_shape, window_shapes) 80 | filenames = featurevector.save(tmpdir, extension='tif') 81 | 82 | assert len(filenames) == len(feature.windows) 83 | for i, (filename, window_shape) in enumerate( 84 | zip(filenames, feature.windows)): 85 | with rasterio.open(filename) as dataset: 86 | assert dataset.tags()['window'] == repr(window_shape) 87 | assert dataset.tags()['arguments'] == repr(feature.kwargs) 88 | assert dataset.shape == vector.shape[:2] 89 | assert dataset.count == feature.size 90 | for j in range(feature.size): 91 | band = j + 1 92 | data = dataset.read(band, masked=True) 93 | try: 94 | np.testing.assert_equal(data.mask, vector.mask[..., i, j]) 95 | np.testing.assert_array_almost_equal_nulp( 96 | data.compressed(), vector[..., i, j].compressed()) 97 | except AssertionError: 98 | print("Error in window", i, "feature element", j) 99 | print('saved:\n', data) 100 | print('reference:\n', vector[..., i, j]) 101 | raise 102 | 103 | 104 | def test_normalization_partial_fails(image): 105 | """Check that computing the normalization on part of an image fails.""" 106 | block = [(0, 50), (100, 200)] 107 | partial_image = image.copy_block(block) 108 | 109 | with pytest.raises(ValueError) as exc: 110 | partial_image.precompute_normalization() 111 | msg = str(exc.value) 112 | assert "Unable to compute normalization on part of the image." in msg 113 | --------------------------------------------------------------------------------