├── .gitignore
├── .pylintrc
├── LICENSE
├── README.rst
├── pysemseg
├── __init__.py
├── datasets
│ ├── __init__.py
│ ├── base.py
│ ├── camvid
│ │ ├── __init__.py
│ │ └── camvid.py
│ ├── pascal_voc
│ │ ├── __init__.py
│ │ ├── pascal.py
│ │ └── prepare_dataset.py
│ └── transformer.py
├── evaluate.py
├── loggers
│ ├── __init__.py
│ ├── console_logger.py
│ ├── tensorboard_logger.py
│ └── visdom_logger.py
├── losses
│ ├── __init__.py
│ ├── binary.py
│ └── focal.py
├── lr_schedulers
│ ├── __init__.py
│ ├── constant.py
│ └── poly.py
├── metrics.py
├── models
│ ├── __init__.py
│ ├── deeplab.py
│ ├── densenet.py
│ ├── fcn.py
│ ├── resnet.py
│ └── unet.py
├── train.py
├── transforms
│ ├── __init__.py
│ ├── convert.py
│ ├── loaders.py
│ └── transforms.py
└── utils.py
├── requirements.txt
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | *.pyc
3 | pysemseg.egg-info/
4 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MASTER]
2 |
3 | # A comma-separated list of package or module names from where C extensions may
4 | # be loaded. Extensions are loading into the active Python interpreter and may
5 | # run arbitrary code.
6 | extension-pkg-whitelist=cv2,scipy.io
7 |
8 | # Add files or directories to the blacklist. They should be base names, not
9 | # paths.
10 | ignore=CVS
11 |
12 | # Add files or directories matching the regex patterns to the blacklist. The
13 | # regex matches against base names, not paths.
14 | ignore-patterns=
15 |
16 | # Python code to execute, usually for sys.path manipulation such as
17 | # pygtk.require().
18 | #init-hook=
19 |
20 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
21 | # number of processors available to use.
22 | jobs=1
23 |
24 | # Control the amount of potential inferred values when inferring a single
25 | # object. This can help the performance when dealing with large functions or
26 | # complex, nested conditions.
27 | limit-inference-results=100
28 |
29 | # List of plugins (as comma separated values of python modules names) to load,
30 | # usually to register additional checkers.
31 | load-plugins=
32 |
33 | # Pickle collected data for later comparisons.
34 | persistent=yes
35 |
36 | # Specify a configuration file.
37 | #rcfile=
38 |
39 | # When enabled, pylint would attempt to guess common misconfiguration and emit
40 | # user-friendly hints instead of false-positive error messages.
41 | suggestion-mode=yes
42 |
43 | # Allow loading of arbitrary C extensions. Extensions are imported into the
44 | # active Python interpreter and may run arbitrary code.
45 | unsafe-load-any-extension=no
46 |
47 |
48 | [MESSAGES CONTROL]
49 |
50 | # Only show warnings with the listed confidence levels. Leave empty to show
51 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
52 | confidence=
53 |
54 | # Disable the message, report, category or checker with the given id(s). You
55 | # can either give multiple identifiers separated by comma (,) or put this
56 | # option multiple times (only on the command line, not in the configuration
57 | # file where it should appear only once). You can also use "--disable=all" to
58 | # disable everything first and then reenable specific checks. For example, if
59 | # you want to run only the similarities checker, you can use "--disable=all
60 | # --enable=similarities". If you want to run only the classes checker, but have
61 | # no Warning level messages displayed, use "--disable=all --enable=classes
62 | # --disable=W".
63 | disable=print-statement,
64 | parameter-unpacking,
65 | unpacking-in-except,
66 | old-raise-syntax,
67 | backtick,
68 | long-suffix,
69 | old-ne-operator,
70 | old-octal-literal,
71 | import-star-module-level,
72 | non-ascii-bytes-literal,
73 | raw-checker-failed,
74 | bad-inline-option,
75 | locally-disabled,
76 | locally-enabled,
77 | file-ignored,
78 | suppressed-message,
79 | useless-suppression,
80 | deprecated-pragma,
81 | use-symbolic-message-instead,
82 | apply-builtin,
83 | basestring-builtin,
84 | buffer-builtin,
85 | cmp-builtin,
86 | coerce-builtin,
87 | execfile-builtin,
88 | file-builtin,
89 | long-builtin,
90 | raw_input-builtin,
91 | reduce-builtin,
92 | standarderror-builtin,
93 | unicode-builtin,
94 | xrange-builtin,
95 | coerce-method,
96 | delslice-method,
97 | getslice-method,
98 | setslice-method,
99 | no-absolute-import,
100 | old-division,
101 | dict-iter-method,
102 | dict-view-method,
103 | next-method-called,
104 | metaclass-assignment,
105 | indexing-exception,
106 | raising-string,
107 | reload-builtin,
108 | oct-method,
109 | hex-method,
110 | nonzero-method,
111 | cmp-method,
112 | input-builtin,
113 | round-builtin,
114 | intern-builtin,
115 | unichr-builtin,
116 | map-builtin-not-iterating,
117 | zip-builtin-not-iterating,
118 | range-builtin-not-iterating,
119 | filter-builtin-not-iterating,
120 | using-cmp-argument,
121 | eq-without-hash,
122 | div-method,
123 | idiv-method,
124 | rdiv-method,
125 | exception-message-attribute,
126 | invalid-str-codec,
127 | sys-max-int,
128 | bad-python3-import,
129 | deprecated-string-function,
130 | deprecated-str-translate-call,
131 | deprecated-itertools-function,
132 | deprecated-types-field,
133 | next-method-defined,
134 | dict-items-not-iterating,
135 | dict-keys-not-iterating,
136 | dict-values-not-iterating,
137 | deprecated-operator-function,
138 | deprecated-urllib-function,
139 | xreadlines-attribute,
140 | deprecated-sys-function,
141 | exception-escape,
142 | comprehension-escape,
143 | C0111, # missing docstring
144 | C0414, # Import alias does not rename original package (useless-import-alias)
145 | too-few-public-methods,
146 |
147 | # Enable the message, report, category or checker with the given id(s). You can
148 | # either give multiple identifier separated by comma (,) or put this option
149 | # multiple time (only on the command line, not in the configuration file where
150 | # it should appear only once). See also the "--disable" option for examples.
151 | enable=c-extension-no-member
152 |
153 |
154 | [REPORTS]
155 |
156 | # Python expression which should return a note less than 10 (10 is the highest
157 | # note). You have access to the variables errors warning, statement which
158 | # respectively contain the number of errors / warnings messages and the total
159 | # number of statements analyzed. This is used by the global evaluation report
160 | # (RP0004).
161 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
162 |
163 | # Template used to display messages. This is a python new-style format string
164 | # used to format the message information. See doc for all details.
165 | #msg-template=
166 |
167 | # Set the output format. Available formats are text, parseable, colorized, json
168 | # and msvs (visual studio). You can also give a reporter class, e.g.
169 | # mypackage.mymodule.MyReporterClass.
170 | output-format=text
171 |
172 | # Tells whether to display a full report or only the messages.
173 | reports=no
174 |
175 | # Activate the evaluation score.
176 | score=yes
177 |
178 |
179 | [REFACTORING]
180 |
181 | # Maximum number of nested blocks for function / method body
182 | max-nested-blocks=5
183 |
184 | # Complete name of functions that never returns. When checking for
185 | # inconsistent-return-statements if a never returning function is called then
186 | # it will be considered as an explicit return statement and no message will be
187 | # printed.
188 | never-returning-functions=sys.exit
189 |
190 |
191 | [FORMAT]
192 |
193 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
194 | expected-line-ending-format=
195 |
196 | # Regexp for a line that is allowed to be longer than the limit.
197 | ignore-long-lines=^\s*(# )??$
198 |
199 | # Number of spaces of indent required inside a hanging or continued line.
200 | indent-after-paren=4
201 |
202 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
203 | # tab).
204 | indent-string=' '
205 |
206 | # Maximum number of characters on a single line.
207 | max-line-length=100
208 |
209 | # Maximum number of lines in a module.
210 | max-module-lines=1000
211 |
212 | # List of optional constructs for which whitespace checking is disabled. `dict-
213 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
214 | # `trailing-comma` allows a space between comma and closing bracket: (a, ).
215 | # `empty-line` allows space-only lines.
216 | no-space-check=trailing-comma,
217 | dict-separator
218 |
219 | # Allow the body of a class to be on the same line as the declaration if body
220 | # contains single statement.
221 | single-line-class-stmt=no
222 |
223 | # Allow the body of an if to be on the same line as the test if there is no
224 | # else.
225 | single-line-if-stmt=no
226 |
227 |
228 | [TYPECHECK]
229 |
230 | # List of decorators that produce context managers, such as
231 | # contextlib.contextmanager. Add to this list to register other decorators that
232 | # produce valid context managers.
233 | contextmanager-decorators=contextlib.contextmanager
234 |
235 | # List of members which are set dynamically and missed by pylint inference
236 | # system, and so shouldn't trigger E1101 when accessed. Python regular
237 | # expressions are accepted.
238 | generated-members=
239 |
240 | # Tells whether missing members accessed in mixin class should be ignored. A
241 | # mixin class is detected if its name ends with "mixin" (case insensitive).
242 | ignore-mixin-members=yes
243 |
244 | # Tells whether to warn about missing members when the owner of the attribute
245 | # is inferred to be None.
246 | ignore-none=yes
247 |
248 | # This flag controls whether pylint should warn about no-member and similar
249 | # checks whenever an opaque object is returned when inferring. The inference
250 | # can return multiple potential results while evaluating a Python object, but
251 | # some branches might not be evaluated, which results in partial inference. In
252 | # that case, it might be useful to still emit no-member and other checks for
253 | # the rest of the inferred objects.
254 | ignore-on-opaque-inference=yes
255 |
256 | # List of class names for which member attributes should not be checked (useful
257 | # for classes with dynamically set attributes). This supports the use of
258 | # qualified names.
259 | ignored-classes=optparse.Values,thread._local,_thread._local
260 |
261 | # List of module names for which member attributes should not be checked
262 | # (useful for modules/projects where namespaces are manipulated during runtime
263 | # and thus existing member attributes cannot be deduced by static analysis. It
264 | # supports qualified module names, as well as Unix pattern matching.
265 | ignored-modules=
266 |
267 | # Show a hint with possible names when a member name was not found. The aspect
268 | # of finding the hint is based on edit distance.
269 | missing-member-hint=yes
270 |
271 | # The minimum edit distance a name should have in order to be considered a
272 | # similar match for a missing member name.
273 | missing-member-hint-distance=1
274 |
275 | # The total number of similar names that should be taken in consideration when
276 | # showing a hint for a missing member.
277 | missing-member-max-choices=1
278 |
279 |
280 | [BASIC]
281 |
282 | # Naming style matching correct argument names.
283 | argument-naming-style=snake_case
284 |
285 | # Regular expression matching correct argument names. Overrides argument-
286 | # naming-style.
287 | #argument-rgx=
288 |
289 | # Naming style matching correct attribute names.
290 | attr-naming-style=snake_case
291 |
292 | # Regular expression matching correct attribute names. Overrides attr-naming-
293 | # style.
294 | #attr-rgx=
295 |
296 | # Bad variable names which should always be refused, separated by a comma.
297 | bad-names=foo,
298 | bar,
299 | baz,
300 | toto,
301 | tutu,
302 | tata
303 |
304 | # Naming style matching correct class attribute names.
305 | class-attribute-naming-style=any
306 |
307 | # Regular expression matching correct class attribute names. Overrides class-
308 | # attribute-naming-style.
309 | #class-attribute-rgx=
310 |
311 | # Naming style matching correct class names.
312 | class-naming-style=PascalCase
313 |
314 | # Regular expression matching correct class names. Overrides class-naming-
315 | # style.
316 | #class-rgx=
317 |
318 | # Naming style matching correct constant names.
319 | const-naming-style=UPPER_CASE
320 |
321 | # Regular expression matching correct constant names. Overrides const-naming-
322 | # style.
323 | #const-rgx=
324 |
325 | # Minimum line length for functions/classes that require docstrings, shorter
326 | # ones are exempt.
327 | docstring-min-length=-1
328 |
329 | # Naming style matching correct function names.
330 | function-naming-style=snake_case
331 |
332 | # Regular expression matching correct function names. Overrides function-
333 | # naming-style.
334 | #function-rgx=
335 |
336 | # Good variable names which should always be accepted, separated by a comma.
337 | good-names=i,
338 | j,
339 | k,
340 | ex,
341 | Run,
342 | _
343 |
344 | # Include a hint for the correct naming format with invalid-name.
345 | include-naming-hint=no
346 |
347 | # Naming style matching correct inline iteration names.
348 | inlinevar-naming-style=any
349 |
350 | # Regular expression matching correct inline iteration names. Overrides
351 | # inlinevar-naming-style.
352 | #inlinevar-rgx=
353 |
354 | # Naming style matching correct method names.
355 | method-naming-style=snake_case
356 |
357 | # Regular expression matching correct method names. Overrides method-naming-
358 | # style.
359 | #method-rgx=
360 |
361 | # Naming style matching correct module names.
362 | module-naming-style=snake_case
363 |
364 | # Regular expression matching correct module names. Overrides module-naming-
365 | # style.
366 | #module-rgx=
367 |
368 | # Colon-delimited sets of names that determine each other's naming style when
369 | # the name regexes allow several styles.
370 | name-group=
371 |
372 | # Regular expression which should only match function or class names that do
373 | # not require a docstring.
374 | no-docstring-rgx=^_
375 |
376 | # List of decorators that produce properties, such as abc.abstractproperty. Add
377 | # to this list to register other decorators that produce valid properties.
378 | # These decorators are taken in consideration only for invalid-name.
379 | property-classes=abc.abstractproperty
380 |
381 | # Naming style matching correct variable names.
382 | variable-naming-style=snake_case
383 |
384 | # Regular expression matching correct variable names. Overrides variable-
385 | # naming-style.
386 | #variable-rgx=
387 |
388 |
389 | [VARIABLES]
390 |
391 | # List of additional names supposed to be defined in builtins. Remember that
392 | # you should avoid to define new builtins when possible.
393 | additional-builtins=
394 |
395 | # Tells whether unused global variables should be treated as a violation.
396 | allow-global-unused-variables=yes
397 |
398 | # List of strings which can identify a callback function by name. A callback
399 | # name must start or end with one of those strings.
400 | callbacks=cb_,
401 | _cb
402 |
403 | # A regular expression matching the name of dummy variables (i.e. expected to
404 | # not be used).
405 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
406 |
407 | # Argument names that match this expression will be ignored. Default to name
408 | # with leading underscore.
409 | ignored-argument-names=_.*|^ignored_|^unused_
410 |
411 | # Tells whether we should check for unused import in __init__ files.
412 | init-import=no
413 |
414 | # List of qualified module names which can have objects that can redefine
415 | # builtins.
416 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
417 |
418 |
419 | [SPELLING]
420 |
421 | # Limits count of emitted suggestions for spelling mistakes.
422 | max-spelling-suggestions=4
423 |
424 | # Spelling dictionary name. Available dictionaries: none. To make it working
425 | # install python-enchant package..
426 | spelling-dict=
427 |
428 | # List of comma separated words that should not be checked.
429 | spelling-ignore-words=
430 |
431 | # A path to a file that contains private dictionary; one word per line.
432 | spelling-private-dict-file=
433 |
434 | # Tells whether to store unknown words to indicated private dictionary in
435 | # --spelling-private-dict-file option instead of raising a message.
436 | spelling-store-unknown-words=no
437 |
438 |
439 | [LOGGING]
440 |
441 | # Logging modules to check that the string format arguments are in logging
442 | # function parameter format.
443 | logging-modules=logging
444 |
445 |
446 | [SIMILARITIES]
447 |
448 | # Ignore comments when computing similarities.
449 | ignore-comments=yes
450 |
451 | # Ignore docstrings when computing similarities.
452 | ignore-docstrings=yes
453 |
454 | # Ignore imports when computing similarities.
455 | ignore-imports=no
456 |
457 | # Minimum lines number of a similarity.
458 | min-similarity-lines=4
459 |
460 |
461 | [MISCELLANEOUS]
462 |
463 | # List of note tags to take in consideration, separated by a comma.
464 | notes=FIXME,
465 | XXX,
466 | TODO
467 |
468 |
469 | [IMPORTS]
470 |
471 | # Allow wildcard imports from modules that define __all__.
472 | allow-wildcard-with-all=no
473 |
474 | # Analyse import fallback blocks. This can be used to support both Python 2 and
475 | # 3 compatible code, which means that the block might have code that exists
476 | # only in one or another interpreter, leading to false positives when analysed.
477 | analyse-fallback-blocks=no
478 |
479 | # Deprecated modules which should not be used, separated by a comma.
480 | deprecated-modules=optparse,tkinter.tix
481 |
482 | # Create a graph of external dependencies in the given file (report RP0402 must
483 | # not be disabled).
484 | ext-import-graph=
485 |
486 | # Create a graph of every (i.e. internal and external) dependencies in the
487 | # given file (report RP0402 must not be disabled).
488 | import-graph=
489 |
490 | # Create a graph of internal dependencies in the given file (report RP0402 must
491 | # not be disabled).
492 | int-import-graph=
493 |
494 | # Force import order to recognize a module as part of the standard
495 | # compatibility libraries.
496 | known-standard-library=
497 |
498 | # Force import order to recognize a module as part of a third party library.
499 | known-third-party=enchant
500 |
501 |
502 | [DESIGN]
503 |
504 | # Maximum number of arguments for function / method.
505 | max-args=5
506 |
507 | # Maximum number of attributes for a class (see R0902).
508 | max-attributes=7
509 |
510 | # Maximum number of boolean expressions in an if statement.
511 | max-bool-expr=5
512 |
513 | # Maximum number of branch for function / method body.
514 | max-branches=12
515 |
516 | # Maximum number of locals for function / method body.
517 | max-locals=15
518 |
519 | # Maximum number of parents for a class (see R0901).
520 | max-parents=7
521 |
522 | # Maximum number of public methods for a class (see R0904).
523 | max-public-methods=20
524 |
525 | # Maximum number of return / yield for function / method body.
526 | max-returns=6
527 |
528 | # Maximum number of statements in function / method body.
529 | max-statements=50
530 |
531 | # Minimum number of public methods for a class (see R0903).
532 | min-public-methods=2
533 |
534 |
535 | [CLASSES]
536 |
537 | # List of method names used to declare (i.e. assign) instance attributes.
538 | defining-attr-methods=__init__,
539 | __new__,
540 | setUp
541 |
542 | # List of member names, which should be excluded from the protected access
543 | # warning.
544 | exclude-protected=_asdict,
545 | _fields,
546 | _replace,
547 | _source,
548 | _make
549 |
550 | # List of valid names for the first argument in a class method.
551 | valid-classmethod-first-arg=cls
552 |
553 | # List of valid names for the first argument in a metaclass class method.
554 | valid-metaclass-classmethod-first-arg=cls
555 |
556 |
557 | [EXCEPTIONS]
558 |
559 | # Exceptions that will emit a warning when being caught. Defaults to
560 | # "Exception".
561 | overgeneral-exceptions=Exception
562 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | PySemSeg
2 | ========
3 |
4 | PySemSeg is a library for training Deep Learning Models for Semantic Segmentation in Pytorch.
5 | The goal of the library is to provide implementations of SOTA segmentation models, with pretrained versions
6 | on popular datasets, as well as an easy-to-use training loop for new models and datasets. Most Semantic Segmentation datasets
7 | with fine-grained annotations are small, so Transfer Learning is crucial for success and is a core capability of the library. PySemSeg can use visdom or tensorboardX for training summary visualialization.
8 |
9 |
10 | Installation
11 | =============
12 |
13 | Using pip:
14 |
15 | .. code:: bash
16 |
17 | pip install git+https://github.com/petko-nikolov/pysemseg
18 |
19 |
20 | Models
21 | ======
22 |
23 | - FCN [`paper `_] - FCN32, FCN16, FCN8 with pre-trained VGG16
24 | - UNet [`paper `_]
25 | - Tiramisu (FC DenseNets)[`paper `_] - FC DenseNet 56, FC DenseNet 67, FC DensetNet 103 with efficient checkpointing
26 | - DeepLab V3 [`paper `_] - Multi-grid, ASPP and BatchNorm fine-tuning with pre-trained resnets backbone
27 | - DeepLab V3+ [`paper `_]
28 | - RefineNet [`paper `_] - [Upcoming ...]
29 | - PSPNet [`paper `_] - [Upcoming ...]
30 |
31 |
32 | Datasets
33 | ========
34 | - `Pascal VOC `_
35 | - `CamVid `_
36 | - Cityscapes [Upcoming ...]
37 | - ADE20K [Upcoming ...]
38 |
39 |
40 | Train a model from command line
41 | ===============================
42 |
43 | The following is an example command to train a VGGFCN8 model on the Pascal VOC 2012 dataset. In addition to the dataset and the model, a transformer class should be passed (PascalVOCTransform in this case) - a callable where all input image and mask augmentations and tensor transforms are implemented. Run :code:`pysemseg-train -h` for a full list of options.
44 |
45 | .. code:: bash
46 |
47 | pysemseg-train \
48 | --model VGGFCN8 \
49 | --model-dir ~/models/vgg8_pascal_model/ \
50 | --dataset PascalVOCSegmentation \
51 | --data-dir ~/datasets/PascalVOC/ \
52 | --batch-size 4 \
53 | --test-batch-size 1 \
54 | --epochs 40 \
55 | --lr 0.001 \
56 | -- optimizer SGD \
57 | -- optimizer-args '{"weight_decay": 0.0005, "momentum": 0.9}' \
58 | --transformer PascalVOCTransform \
59 | --lr-scheduler PolyLR \
60 | --lr-scheduler_args '{"max_epochs": 40, "gamma": 0.8}'
61 |
62 |
63 | or pass a YAML config
64 |
65 |
66 |
67 | .. code:: bash
68 |
69 | pysemseg-train --config config.yaml
70 |
71 |
72 | .. code:: YAML
73 |
74 | model: VGGFCN32
75 | model-dir: models/vgg8_pascal_model/
76 | dataset: PascalVOCSegmentation
77 | data-dir: datasets/PascalVOC/
78 | batch-size: 4
79 | test-batch-size: 1
80 | epochs: 40
81 | lr: 0.001
82 | optimizer: SGD
83 | optimizer-args:
84 | weight_decay: 0.0005
85 | momentum: 0.9
86 | transformer: PascalVOCTransform
87 | no-cuda: true
88 | lr-scheduler: PolyLR
89 | lr-scheduler-args:
90 | max_epochs: 40
91 | gamma: 0.8
92 |
93 | Load and predict with a trained model
94 | =====================================
95 |
96 | To use a checkpoint for inference you have to call :code:`load_model` with a checkpoint, the model class and the transformer class used during training.
97 |
98 | .. code:: python
99 |
100 | import torch.nn.functional as F
101 | from pysemseg.transforms import CV2ImageLoader
102 | from pysemseg.utils import load_model
103 | from pysemseg.models import VGGFCN32
104 | from pysemseg.datasets import PascalVOCTransform
105 |
106 | model = load_model(
107 | './checkpoint_path',
108 | VGGFCN32,
109 | PascalVOCTransform
110 | )
111 |
112 | image = CV2ImageLoader()('./image_path')
113 | logits = model(image)
114 | probabilities = F.softmax(logits, dim=1)
115 | predictions = torch.argmax(logits, dim=1)
116 |
--------------------------------------------------------------------------------
/pysemseg/__init__.py:
--------------------------------------------------------------------------------
1 | import pysemseg.datasets
2 | import pysemseg.models
3 | import pysemseg.lr_schedulers
4 | import pysemseg.transforms
5 | import pysemseg.loggers
6 | import pysemseg.losses
7 |
--------------------------------------------------------------------------------
/pysemseg/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import SegmentationDataset
2 | from .pascal_voc.pascal import PascalVOCSegmentation, PascalVOCTransform
3 | from .camvid import CamVid, CamVidTransform
4 | from .transformer import DatasetTransformer
5 |
6 |
7 | def create_dataset(data_dir, dataset_cls, dataset_args,
8 | transformer_cls, transformer_args, mode):
9 | dataset = dataset_cls(data_dir, mode, **dataset_args)
10 | transformer = transformer_cls(mode=mode, **transformer_args)
11 | return DatasetTransformer(dataset, transformer, mode)
12 |
--------------------------------------------------------------------------------
/pysemseg/datasets/base.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import Dataset
2 | from abc import ABCMeta, abstractmethod
3 | from pysemseg.utils import ColorPalette
4 |
5 |
6 | class SegmentationDataset(Dataset, metaclass=ABCMeta):
7 | def __init__(self):
8 | self.color_palette_ = ColorPalette(256)
9 |
10 | @property
11 | @abstractmethod
12 | def number_of_classes(self):
13 | pass
14 |
15 | @property
16 | def labels(self):
17 | return None
18 |
19 | @property
20 | def color_palette(self):
21 | return self.color_palette_
22 |
23 | @property
24 | def ignore_index(self):
25 | return -1
26 |
27 | @property
28 | def in_channels(self):
29 | return 3
30 |
--------------------------------------------------------------------------------
/pysemseg/datasets/camvid/__init__.py:
--------------------------------------------------------------------------------
1 | from .camvid import CamVid, CamVidTransform
2 |
--------------------------------------------------------------------------------
/pysemseg/datasets/camvid/camvid.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | from torchvision.transforms import Normalize
4 | import cv2
5 |
6 | from pysemseg import transforms
7 | from pysemseg.datasets.base import SegmentationDataset
8 | from pysemseg.utils import ColorPalette
9 |
10 |
11 | CAMVID_CLASSES = [
12 | "Sky",
13 | "Building",
14 | "Column_pole",
15 | "Road",
16 | "Sidewalk",
17 | "Tree",
18 | "SignSymbol",
19 | "Fence",
20 | "Car",
21 | "Pedestrian",
22 | "Bicyclist"
23 | ]
24 |
25 | CAMVID_COLORS = [
26 | (128, 128, 128),
27 | (128, 0, 0),
28 | (192, 192, 128),
29 | (128, 64, 128),
30 | (0, 0, 192),
31 | (128, 128, 0),
32 | (192, 128, 128),
33 | (64, 64, 128),
34 | (64, 0, 128),
35 | (64, 64, 0),
36 | (0, 128, 192),
37 | (0, 0, 0)
38 | ]
39 |
40 |
41 | def _parse_image_paths(images_dir, annotations_dir):
42 | image_data = []
43 | for image_filepath in glob.glob(images_dir + '/*.png'):
44 | image_filename = os.path.basename(image_filepath)
45 | annotation_filepath = os.path.join(annotations_dir, image_filename)
46 | assert os.path.exists(annotation_filepath)
47 | image_data.append({
48 | 'id': image_filename,
49 | 'image_filepath': image_filepath,
50 | 'gt_filepath': (
51 | annotation_filepath
52 | if os.path.exists(annotation_filepath) else None)
53 | })
54 | return image_data
55 |
56 |
57 | class CamVid(SegmentationDataset):
58 | def __init__(self, root_dir, split):
59 | super().__init__()
60 | assert split in ['train', 'val', 'test']
61 | self.color_palette_ = ColorPalette(CAMVID_COLORS)
62 | self.image_loader = transforms.CV2ImageLoader()
63 | self.target_loader = transforms.CV2ImageLoader(grayscale=True)
64 | self.root_dir = root_dir
65 | self.split = split
66 | self.image_data = _parse_image_paths(
67 | os.path.join(self.root_dir, split),
68 | os.path.join(self.root_dir, split + 'annot')
69 | )
70 |
71 | @property
72 | def number_of_classes(self):
73 | return 12
74 |
75 | @property
76 | def labels(self):
77 | return CAMVID_CLASSES
78 |
79 | @property
80 | def ignore_index(self):
81 | return 11
82 |
83 | def __getitem__(self, index):
84 | item = self.image_data[index]
85 | return (
86 | item['id'],
87 | self.image_loader(item['image_filepath']),
88 | self.target_loader(item['gt_filepath'])
89 | )
90 |
91 | def __len__(self):
92 | return len(self.image_data)
93 |
94 |
95 |
96 | class CamVidTransform:
97 | def __init__(self, mode):
98 | self.mode = mode
99 | self.image_loader = transforms.Compose([
100 | transforms.ToFloatImage()
101 | ])
102 |
103 | self.image_augmentations = transforms.Compose([
104 | transforms.RandomHueSaturation(
105 | hue_delta=0.05, saturation_scale_range=(0.7, 1.3)),
106 | transforms.RandomContrast(0.5, 1.5),
107 | transforms.RandomBrightness(-32.0 / 255, 32. / 255)
108 | ])
109 |
110 | self.joint_augmentations = transforms.Compose([
111 | transforms.RandomCropFixedSize((224, 224)),
112 | transforms.RandomHorizontalFlip()
113 | ])
114 |
115 | self.tensor_transforms = transforms.Compose([
116 | transforms.ToTensor(),
117 | Normalize(
118 | mean=[0.39068785, 0.40521392, 0.41434407],
119 | std=[0.29652068, 0.30514979, 0.30080369])
120 | ])
121 |
122 | def __call__(self, image, target):
123 | image = self.image_loader(image)
124 | if self.mode == 'train':
125 | image, target = self.joint_augmentations(image, target)
126 | image = self.image_augmentations(image)
127 | image = self.tensor_transforms(image)
128 | target = transforms.ToCategoryTensor()(target)
129 | return image, target
130 |
--------------------------------------------------------------------------------
/pysemseg/datasets/pascal_voc/__init__.py:
--------------------------------------------------------------------------------
1 | from .pascal import PascalVOCSegmentation, PascalVOCTransform
2 |
--------------------------------------------------------------------------------
/pysemseg/datasets/pascal_voc/pascal.py:
--------------------------------------------------------------------------------
1 | import os
2 | from torchvision.transforms import Normalize
3 | import cv2
4 |
5 | from pysemseg.datasets.base import SegmentationDataset
6 | from pysemseg import transforms
7 |
8 |
9 | PASCAL_CLASSES = [
10 | 'background',
11 | 'aeroplane',
12 | 'bicycle',
13 | 'bird',
14 | 'boat',
15 | 'bottle',
16 | 'bus',
17 | 'car',
18 | 'cat',
19 | 'chair',
20 | 'cow',
21 | 'diningtable',
22 | 'dog',
23 | 'horse',
24 | 'motorbike',
25 | 'person',
26 | 'potted-plant',
27 | 'sheep',
28 | 'sofa',
29 | 'train',
30 | 'tv/monitor'
31 | ]
32 |
33 |
34 | def _read_image_ids(split_filepath):
35 | with open(split_filepath, 'r') as split_file:
36 | return [s.strip() for s in split_file]
37 |
38 |
39 | def _parse_image_paths(image_dir, ground_truth_dir, image_ids):
40 | image_data = []
41 |
42 | for image_id in image_ids:
43 | img_path = os.path.join(image_dir, image_id + '.jpg')
44 | mask_path = os.path.join(ground_truth_dir, image_id + '.png')
45 | if os.path.exists(img_path):
46 | image_data.append({
47 | 'id': image_id,
48 | 'image_filepath': img_path,
49 | 'gt_filepath': (
50 | mask_path if os.path.exists(mask_path) else None)
51 | })
52 | return image_data
53 |
54 |
55 | class PascalVOCSegmentation(SegmentationDataset):
56 | def __init__(self, root, split='train'):
57 | super().__init__()
58 |
59 | assert split in ['train', 'test', 'val']
60 | self.root = os.path.expanduser(root)
61 | self.split = split
62 | self.image_loader = transforms.CV2ImageLoader()
63 | self.target_loader = transforms.CV2ImageLoader(grayscale=True)
64 |
65 | benchmark_train_ids = set(
66 | _read_image_ids(
67 | os.path.join(root, 'benchmark_RELEASE/dataset/train.txt')
68 | )
69 | )
70 |
71 | benchmark_val_ids = set(
72 | _read_image_ids(
73 | os.path.join(root, 'benchmark_RELEASE/dataset/val.txt')
74 | )
75 | )
76 |
77 | voc2012_train_ids = set(
78 | _read_image_ids(
79 | os.path.join(
80 | root,
81 | 'VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt')
82 | )
83 | )
84 |
85 | voc2012_val_ids = set(
86 | _read_image_ids(
87 | os.path.join(
88 | root,
89 | 'VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt')
90 | )
91 | )
92 |
93 | self.train_image_data = _parse_image_paths(
94 | os.path.join(root, 'VOCdevkit/VOC2012/JPEGImages'),
95 | os.path.join(root, 'VOCdevkit/VOC2012/SegmentationClassLabels'),
96 | voc2012_train_ids
97 | )
98 |
99 | self.train_image_data.extend(_parse_image_paths(
100 | os.path.join(root, 'benchmark_RELEASE/dataset/img'),
101 | os.path.join(root, 'benchmark_RELEASE/dataset/cls_labels'),
102 | benchmark_train_ids | benchmark_val_ids
103 | ))
104 |
105 | self.train_ids = (
106 | voc2012_train_ids | benchmark_val_ids | benchmark_train_ids
107 | )
108 |
109 | self.val_ids = voc2012_val_ids - self.train_ids
110 |
111 | self.val_image_data = _parse_image_paths(
112 | os.path.join(root, 'VOCdevkit/VOC2012/JPEGImages'),
113 | os.path.join(root, 'VOCdevkit/VOC2012/SegmentationClassLabels'),
114 | self.val_ids
115 | )
116 |
117 | self.image_data = {
118 | 'train': self.train_image_data,
119 | 'val': self.val_image_data
120 | }[self.split]
121 |
122 | @property
123 | def number_of_classes(self):
124 | return 21
125 |
126 | @property
127 | def labels(self):
128 | return PASCAL_CLASSES
129 |
130 | @property
131 | def ignore_index(self):
132 | return 255
133 |
134 | def __getitem__(self, index):
135 | item = self.image_data[index]
136 | return (
137 | item['id'],
138 | self.image_loader(item['image_filepath']),
139 | self.target_loader(item['gt_filepath'])
140 | )
141 |
142 | def __len__(self):
143 | return len(self.image_data)
144 |
145 |
146 | class PascalVOCTransform:
147 | def __init__(self, mode):
148 | self.ignore_index = 255
149 | self.mode = mode
150 | self.image_loader = transforms.Compose([
151 | transforms.ToFloatImage()
152 | ])
153 |
154 | self.image_augmentations = transforms.Compose([
155 | transforms.RandomHueSaturation(
156 | hue_delta=0.05, saturation_scale_range=(0.7, 1.3)),
157 | transforms.RandomContrast(0.8, 1.2),
158 | transforms.RandomBrightness(-32.0 / 255, 32. / 255)
159 | ])
160 |
161 | self.joint_augmentations = transforms.Compose([
162 | transforms.RandomCrop(),
163 | transforms.ScaleTo((513, 513)),
164 | transforms.Concat([
165 | transforms.PadTo((513, 513)),
166 | transforms.PadTo((513, 513), 255)
167 | ]),
168 | transforms.RandomHorizontalFlip()]
169 | )
170 |
171 | self.tensor_transforms = transforms.Compose([
172 | transforms.ToTensor(),
173 | Normalize(
174 | mean=[0.485, 0.456, 0.406],
175 | std=[0.229, 0.224, 0.225])
176 | ])
177 |
178 | def __call__(self, image, target):
179 | image = self.image_loader(image)
180 | if self.mode == 'train':
181 | image, target = self.joint_augmentations(image, target)
182 | image = self.image_augmentations(image)
183 | image = self.tensor_transforms(image)
184 | target = transforms.ToCategoryTensor()(target)
185 | return image, target
186 |
--------------------------------------------------------------------------------
/pysemseg/datasets/pascal_voc/prepare_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import glob
4 | import shutil
5 | from multiprocessing import Pool, cpu_count
6 | import scipy.io
7 | import cv2
8 | from tqdm import tqdm
9 | import numpy as np
10 | from transforms.loaders import CV2ImageLoader
11 |
12 | from utils import prompt_delete_dir, ColorPalette
13 |
14 |
15 | OUTPUT_DIR = None
16 | color_palette = ColorPalette(256)
17 |
18 |
19 | def process_mask(filename):
20 | basename = os.path.basename(filename)
21 | palette_mask = CV2ImageLoader()(filename)
22 | label_mask = color_palette.decode_color(palette_mask)
23 | cv2.imwrite(os.path.join(OUTPUT_DIR, basename), label_mask)
24 |
25 |
26 | def process_berkley_gt(filename):
27 | basename = os.path.basename(filename)
28 | image_id, _ = basename.split('.')
29 | mat = scipy.io.loadmat(
30 | filename, mat_dtype=True, squeeze_me=True, struct_as_record=False
31 | )
32 | segmentation = mat['GTcls'].Segmentation
33 | cv2.imwrite(
34 | os.path.join(OUTPUT_DIR, image_id + '.png'), mat['GTcls'].Segmentation
35 | )
36 |
37 |
38 | def init_process(output_dir):
39 | global OUTPUT_DIR
40 | OUTPUT_DIR = output_dir
41 |
42 |
43 | def convert_voc2012_labels(input_dir, output_dir, ncpus=None, overwrite=False):
44 | if overwrite:
45 | shutil.rmtree(output_dir, ignore_errors=True)
46 | else:
47 | prompt_delete_dir(output_dir)
48 |
49 | os.makedirs(output_dir)
50 |
51 | files = glob.glob(os.path.join(input_dir, '*.png'))
52 |
53 | with Pool(processes=ncpus or cpu_count(),
54 | initializer=init_process, initargs=(output_dir,)) as pool:
55 | with tqdm(total=len(files), desc='Prepare VOC2012') as pbar:
56 | for _ in enumerate(pool.imap_unordered(process_mask, files)):
57 | pbar.update()
58 |
59 |
60 |
61 | def convert_berkley_labels(input_dir, output_dir, ncpus=None, overwrite=False):
62 | if overwrite:
63 | shutil.rmtree(output_dir, ignore_errors=True)
64 | else:
65 | prompt_delete_dir(output_dir)
66 | os.makedirs(output_dir)
67 |
68 | files = glob.glob(os.path.join(input_dir, '*.mat'))
69 |
70 | with Pool(processes=ncpus or cpu_count(),
71 | initializer=init_process, initargs=(output_dir,)) as pool:
72 | with tqdm(total=len(files), desc='Prepare Berkley') as pbar:
73 | for _ in enumerate(pool.imap_unordered(
74 | process_berkley_gt, files)):
75 | pbar.update()
76 |
77 |
78 | def prepare_dataset(rootdir, ncpus=None, overwrite=False):
79 | voc2012_root = os.path.join(rootdir, 'VOCdevkit/VOC2012')
80 | berkley_root = os.path.join(rootdir, 'benchmark_RELEASE')
81 | assert os.path.exists(voc2012_root)
82 | convert_voc2012_labels(
83 | os.path.join(voc2012_root, 'SegmentationClass'),
84 | os.path.join(voc2012_root, 'SegmentationClassLabels'),
85 | ncpus,
86 | overwrite)
87 | convert_berkley_labels(
88 | os.path.join(berkley_root, 'dataset/cls/'),
89 | os.path.join(berkley_root, 'dataset/cls_labels'),
90 | ncpus,
91 | overwrite)
92 |
93 |
94 | def main():
95 | parser = argparse.ArgumentParser("Convert palette to labels")
96 | parser.add_argument("--dataset_dir", required=True, type=str)
97 | parser.add_argument("--overwrite", action='store_true', default=False,
98 | help="Overwrite existing data.")
99 | parser.add_argument("--ncpus", required=False, type=int)
100 | args = parser.parse_args()
101 | prepare_dataset(args.dataset_dir, args.ncpus, args.overwrite)
102 |
103 |
104 | if __name__ == '__main__':
105 | main()
106 |
--------------------------------------------------------------------------------
/pysemseg/datasets/transformer.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import Dataset
2 |
3 |
4 | class DatasetTransformer(Dataset):
5 | def __init__(
6 | self, dataset, transform, mode='train'):
7 | self.dataset = dataset
8 | self.transform = transform
9 | self.mode = mode
10 |
11 | def __getitem__(self, index):
12 | example_id, image, mask = self.dataset[index]
13 | image, mask = self.transform(image, mask)
14 | if self.mode == 'test':
15 | return example_id, image
16 | return example_id, image, mask
17 |
18 | def __len__(self):
19 | return len(self.dataset)
20 |
21 | @property
22 | def labels(self):
23 | return self.dataset.labels
24 |
25 | @property
26 | def number_of_classes(self):
27 | return self.dataset.number_of_classes
28 |
29 | @property
30 | def color_palette(self):
31 | return self.dataset.color_palette
32 |
33 | @property
34 | def ignore_index(self):
35 | return self.dataset.ignore_index
36 |
37 | @property
38 | def in_channels(self):
39 | return self.dataset.in_channels
40 |
--------------------------------------------------------------------------------
/pysemseg/evaluate.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from torch.autograd import Variable
3 | import torch
4 | from pysemseg.metrics import TorchSegmentationMetrics
5 | from pysemseg.utils import tensor_to_numpy, flatten_dict
6 |
7 |
8 | def evaluate(
9 | model, loader, criterion, console_logger, epoch,
10 | visual_logger, device, log_images_interval):
11 | model.eval()
12 |
13 | metrics = TorchSegmentationMetrics(
14 | loader.dataset.number_of_classes,
15 | loader.dataset.labels,
16 | ignore_index=loader.dataset.ignore_index,
17 | device=device
18 | )
19 |
20 | with torch.no_grad():
21 | for step, (_, data, target) in enumerate(loader):
22 | data, target = data.to(device), target.to(device)
23 |
24 | data, target = Variable(data), Variable(target)
25 | output = model(data)
26 | loss = criterion(output, target)
27 |
28 | predictions = torch.argmax(output, dim=1)
29 |
30 | loss = loss / torch.sum(target != loader.dataset.ignore_index).float()
31 |
32 | metrics.add(predictions, target, loss)
33 |
34 | if step % log_images_interval == 0:
35 | visual_logger.log_prediction_images(
36 | step,
37 | tensor_to_numpy(data.data),
38 | tensor_to_numpy(target.data),
39 | tensor_to_numpy(predictions),
40 | name='images',
41 | prefix='Validation'
42 | )
43 |
44 | metrics_dict = metrics.metrics()
45 |
46 | console_logger.log(
47 | len(loader), epoch, loader, data,
48 | metrics_dict, mode='Validation')
49 |
50 | if visual_logger is not None:
51 | visual_logger.log_metrics(epoch, metrics_dict, prefix='Validation')
52 |
53 | return predictions
54 |
--------------------------------------------------------------------------------
/pysemseg/loggers/__init__.py:
--------------------------------------------------------------------------------
1 | from .tensorboard_logger import TensorboardLogger
2 | from .visdom_logger import VisdomLogger
3 | from .console_logger import ConsoleLogger
4 |
--------------------------------------------------------------------------------
/pysemseg/loggers/console_logger.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pysemseg.utils import flatten_dict
3 |
4 |
5 | class ConsoleLogger():
6 |
7 | def __init__(self, filename=None, continue_training=False):
8 | self.filename = filename
9 | self.log_file = None
10 | self.continue_training = continue_training
11 |
12 | def __enter__(self):
13 | mode = 'w'
14 | if self.continue_training:
15 | mode += 'a'
16 | self.log_file = open(self.filename, mode)
17 | return self
18 |
19 | def __exit__(self, *args, **kwargs):
20 | self.log_file.close()
21 |
22 | def log(self, index, epoch, loader, data, metrics, mode='Train'):
23 | metric_str = ", ".join([
24 | '{}:{:.6f}'.format(k, v) for k, v in flatten_dict(metrics).items()
25 | ])
26 | print('{} Epoch: {} [{}/{} ({:.0f}%)] [{}]'.format(
27 | mode, epoch, index * len(data), len(loader.dataset),
28 | 100. * index / len(loader), metric_str))
29 | if self.log_file is not None:
30 | log_data = {**metrics, 'epoch': epoch, 'step': index, "mode": mode}
31 | self.log_file.write(json.dumps(log_data) + '\n')
32 |
--------------------------------------------------------------------------------
/pysemseg/loggers/tensorboard_logger.py:
--------------------------------------------------------------------------------
1 | from tensorboardX import SummaryWriter
2 | import numpy as np
3 | from pysemseg.utils import flatten_dict, ColorPalette
4 |
5 |
6 | class TensorboardLogger:
7 | def __init__(self, log_directory, color_palette):
8 | self.log_directory = log_directory
9 | self.summary_writer = SummaryWriter(self.log_directory)
10 | self.color_palette = color_palette
11 |
12 | def log_args(self, args_dict):
13 | args_text = "\n".join("{}: {}".format(k, v) for k, v in args_dict.items())
14 | self.summary_writer.add_text(
15 | 'Args',
16 | text=args_text,
17 | )
18 |
19 | def log_metrics(self, iteration, metrics, prefix):
20 | for key, value in flatten_dict(metrics).items():
21 | self.summary_writer.add_scalar(
22 | "{}/{}".format(prefix, key), value, iteration)
23 |
24 | def log_prediction_images(self, iteration, image, gt, prediction, name, prefix):
25 | gt = self.color_palette.encode_color(gt).transpose([0, 3, 1, 2])
26 | prediction = self.color_palette.encode_color(prediction).transpose([0, 3, 1, 2])
27 | combined_images = np.concatenate((image, gt, prediction), axis=-1)
28 | for i in range(image.shape[0]):
29 | self.summary_writer.add_image(
30 | "{}/{}/{}".format(prefix, name, i), combined_images[i], iteration)
31 |
32 | def log_learning_rate(self, iteration, lr):
33 | self.summary_writer.add_scalar('learning_rate', value, iteration)
34 |
35 |
--------------------------------------------------------------------------------
/pysemseg/loggers/visdom_logger.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import cv2
4 | import visdom
5 |
6 | from pysemseg.utils import flatten_dict
7 | from pysemseg.transforms import ResizeBatch
8 |
9 |
10 | IMAGES_WIDTH = 128
11 |
12 |
13 | class VisdomLogger:
14 | def __init__(self, log_directory, color_palette, continue_logging=False):
15 | self.log_directory = log_directory
16 | self.color_palette = color_palette
17 | self.coninue_logging = continue_logging
18 | visdom_env = os.path.basename(log_directory.rstrip('/'))
19 | self.visdom = visdom.Visdom(
20 | env=visdom_env,
21 | log_to_filename=os.path.join(log_directory, 'viz.log')
22 | )
23 | if not continue_logging:
24 | self.visdom.delete_env(visdom_env)
25 |
26 | def log_args(self, args_dict):
27 | args_text = "
".join("{}: {}".format(k, v) for k, v in args_dict.items())
28 | self.visdom.text(
29 | text=args_text,
30 | win='Args',
31 | )
32 |
33 | def _update_metric_plots(self, iteration, metrics, prefix):
34 | for key, value in flatten_dict(metrics).items():
35 | name = "{}/{}".format(prefix, key)
36 | self.visdom.line(
37 | np.array([value]),
38 | np.array([iteration]),
39 | win=name,
40 | update='append' if iteration > 0 else None,
41 | opts={'title': name}
42 | )
43 |
44 | def _log_current_class_metrics(self, metrics, prefix):
45 | class_metric_names = list(next(iter(metrics['class'].values())).keys())
46 | for metric_name in class_metric_names:
47 | name = '{}/Current/{}'.format(prefix, metric_name)
48 | class_names = list(metrics['class'].keys())
49 | values = np.array([v[metric_name] for v in metrics['class'].values()])
50 | self.visdom.bar(
51 | values,
52 | win=name,
53 | opts={'rownames': class_names, 'title': name}
54 | )
55 |
56 | def log_metrics(self, iteration, metrics, prefix):
57 | self._update_metric_plots(iteration, metrics, prefix)
58 | self._log_current_class_metrics(metrics, prefix)
59 |
60 | def log_prediction_images(
61 | self, iteration, image, ground_truth, prediction, name, prefix):
62 | title = '{}/{}'.format(prefix, name)
63 | ground_truth = self.color_palette.encode_color(ground_truth)
64 | prediction = self.color_palette.encode_color(prediction)
65 | height = int(IMAGES_WIDTH / image.shape[3] * image.shape[2])
66 | image = ResizeBatch((height, IMAGES_WIDTH))(
67 | image.transpose([0, 2, 3, 1]))
68 | prediction = ResizeBatch(
69 | (height, IMAGES_WIDTH), interpolation=cv2.INTER_NEAREST)(
70 | prediction)
71 | ground_truth = ResizeBatch(
72 | (height, IMAGES_WIDTH), interpolation=cv2.INTER_NEAREST)(
73 | ground_truth)
74 | combined_images = np.concatenate(
75 | [image[:, :, :, :3], ground_truth, prediction], axis=2)
76 | self.visdom.images(
77 | combined_images.transpose([0, 3, 1, 2]),
78 | nrow=1,
79 | win=title,
80 | opts={'caption': title}
81 | )
82 |
83 | def log_learning_rate(self, iteration, lr):
84 | self.visdom.line(
85 | np.array([lr]),
86 | np.array([iteration]),
87 | win='learning_rate',
88 | update='append' if iteration > 0 else None,
89 | opts={'title': 'Learning rate'}
90 | )
91 |
92 |
--------------------------------------------------------------------------------
/pysemseg/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from .binary import DiceLoss, LogDiceLoss, JaccardLoss, LogJaccardLoss
2 | from .focal import FocalLoss
3 |
--------------------------------------------------------------------------------
/pysemseg/losses/binary.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | def _compute_dice(inputs, targets, ignore_index, pos_index=1):
7 | smooth = 1.
8 | inputs = F.softmax(inputs, dim=1)
9 | foreground_probs = inputs[:, pos_index, :, :]
10 | targets = (targets == pos_index).float()
11 | intersection = (foreground_probs * targets).sum()
12 | denom = (foreground_probs ** 2 + targets ** 2).sum()
13 | return (2 * intersection + smooth)/ (denom + smooth)
14 |
15 |
16 | def _compute_jaccard(inputs, targets, ignore_index, pos_index=1):
17 | smooth = 1.
18 | inputs = F.softmax(inputs, dim=1)
19 | foreground_probs = inputs[:, pos_index, :, :]
20 | targets = (targets == pos_index).float()
21 | intersection = (foreground_probs * targets).sum()
22 | union = (foreground_probs + targets).sum()
23 | return (intersection + smooth) / (union - intersection + smooth)
24 |
25 |
26 | class DiceLoss(nn.Module):
27 | def __init__(self, ignore_index=-1, pos_index=1, rescale=True):
28 | super().__init__()
29 | self.ignore_index = ignore_index
30 | self.rescale = rescale
31 | self.pos_index = pos_index
32 |
33 | def forward(self, inputs, targets):
34 | loss = 1.0 - _compute_dice(
35 | inputs, targets, self.ignore_index, self.pos_index)
36 | if self.rescale:
37 | num_targets = torch.sum(targets != self.ignore_index).float()
38 | loss *= num_targets
39 | return loss
40 |
41 |
42 | class LogDiceLoss(nn.Module):
43 | def __init__(self, ignore_index=-1, pos_index=1, rescale=True):
44 | super().__init__()
45 | self.ignore_index = ignore_index
46 | self.rescale = rescale
47 | self.pos_index = pos_index
48 |
49 | def forward(self, inputs, targets):
50 | loss = -torch.log(_compute_dice(
51 | inputs, targets, self.ignore_index, self.pos_index))
52 | if self.rescale:
53 | num_targets = torch.sum(targets != self.ignore_index).float()
54 | loss *= num_targets
55 | return loss
56 |
57 |
58 | class JaccardLoss(nn.Module):
59 | def __init__(self, ignore_index=-1, pos_index=1, rescale=True):
60 | super().__init__()
61 | self.ignore_index = ignore_index
62 | self.rescale = rescale
63 | self.pos_index = pos_index
64 |
65 | def forward(self, inputs, targets):
66 | loss = 1.0 - _compute_jaccard(
67 | inputs, targets, self.ignore_index, self.pos_index)
68 | if self.rescale:
69 | num_targets = torch.sum(targets != self.ignore_index).float()
70 | loss *= num_targets
71 | return loss
72 |
73 |
74 | class LogJaccardLoss(nn.Module):
75 | def __init__(self, ignore_index=-1, pos_index=1, rescale=True):
76 | super().__init__()
77 | self.ignore_index = ignore_index
78 | self.rescale = rescale
79 | self.pos_index = pos_index
80 |
81 | def forward(self, inputs, targets):
82 | loss = -torch.log(_compute_jaccard(
83 | inputs, targets, self.ignore_index, self.pos_index))
84 | if self.rescale:
85 | num_targets = torch.sum(targets != self.ignore_index).float()
86 | loss *= num_targets
87 | return loss
88 |
--------------------------------------------------------------------------------
/pysemseg/losses/focal.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 | def one_hot_encode(targets, N):
6 | targets = targets.unsqueeze(1)
7 | one_hot = torch.zeros(
8 | (targets.size(0), N, targets.size(2), targets.size(3)),
9 | device=targets.device
10 | )
11 | one_hot = one_hot.scatter_(1, targets.data, 1)
12 | one_hot = torch.autograd.Variable(one_hot)
13 | return one_hot
14 |
15 |
16 | class FocalLoss(nn.Module):
17 | def __init__(self, gamma=1.0, weights=None, ignore_index=-1):
18 | super().__init__()
19 | self.gamma = gamma
20 | self.weights = weights
21 | self.ignore_index = ignore_index
22 | self.ce = nn.CrossEntropyLoss(
23 | weight=self.weights, ignore_index=ignore_index, reduction='none'
24 | )
25 |
26 | def forward(self, inputs, targets):
27 | ce_loss = self.ce(inputs, targets)
28 | one_hot = one_hot_encode(targets, inputs.size(1))
29 | inv_probs = 1.0 - F.softmax(inputs, dim=1)
30 | focal = (inv_probs * one_hot).sum(dim=1) ** self.gamma
31 | mask = (targets != self.ignore_index).float()
32 | return torch.sum(focal * ce_loss * mask)
33 |
--------------------------------------------------------------------------------
/pysemseg/lr_schedulers/__init__.py:
--------------------------------------------------------------------------------
1 | from .poly import PolyLR
2 | from .constant import ConstantLR
3 |
--------------------------------------------------------------------------------
/pysemseg/lr_schedulers/constant.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler
2 |
3 |
4 | class ConstantLR(_LRScheduler):
5 | def __init__(self, optimizer, last_epoch=-1):
6 | super().__init__(optimizer, last_epoch)
7 |
8 | def get_lr(self):
9 | return self.base_lrs
10 |
--------------------------------------------------------------------------------
/pysemseg/lr_schedulers/poly.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler
2 |
3 |
4 | class PolyLR(_LRScheduler):
5 | def __init__(self, optimizer, max_epochs, gamma=0.9, last_epoch=-1):
6 | self.max_epochs = max_epochs
7 | self.gamma = gamma
8 | super().__init__(optimizer, last_epoch)
9 |
10 | def get_lr(self):
11 | return [
12 | base_lr * (1.0 - self.last_epoch / self.max_epochs) ** self.gamma
13 | for base_lr in self.base_lrs
14 | ]
15 |
--------------------------------------------------------------------------------
/pysemseg/metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from pysemseg.utils import tensor_to_numpy
4 |
5 | def _apply(fcn, acc, counter):
6 | result = {}
7 | for key, value in acc.items():
8 | if isinstance(value, dict):
9 | result[key] = _apply(fcn, value, counter.get(key, {}))
10 | else:
11 | result[key] = fcn(acc[key], counter.get(key, 0.0))
12 | return result
13 |
14 |
15 | class _Accumulator():
16 | def __init__(self):
17 | self.numerator = {}
18 | self.denominator = {}
19 |
20 | def mean(self):
21 | return _apply(
22 | lambda n, d: n / d if d > 0 else 0.0,
23 | self.numerator, self.denominator)
24 |
25 | def update(self, data):
26 | self.numerator = _apply(
27 | lambda a, d: a[0] + d, data, self.numerator)
28 | self.denominator = _apply(
29 | lambda a, d: a[1] + d,
30 | data, self.denominator)
31 |
32 |
33 | class SegmentationMetrics:
34 | def __init__(self, num_classes, labels=None, ignore_index=-1):
35 | labels = labels or list(range(num_classes))
36 | self.labels = dict(enumerate(labels))
37 | self.accumulator = _Accumulator()
38 | self.num_classes = num_classes
39 | self.cm = np.zeros(
40 | (self.num_classes, self.num_classes), dtype=np.int32)
41 | self.ignore_index = ignore_index
42 |
43 |
44 | def metrics(self):
45 | metrics = {}
46 | metrics['accuracy'] = self._accuracy()
47 | metrics['class'] = {}
48 | metrics['class']['iou'] = dict(zip(self.labels, self._iou()))
49 | metrics['mIOU'] = np.mean(list(metrics['class']['iou'].values()))
50 | return metrics
51 |
52 | def _accuracy(self):
53 | return self.cm.diagonal().sum() / self.cm.sum()
54 |
55 | def _iou(self):
56 | colsum = self.cm.sum(axis=0)
57 | rowsum = self.cm.sum(axis=1)
58 | diag = self.cm.diagonal()
59 | return diag / (colsum + rowsum - diag)
60 |
61 | def _confusion_matrix(self, outputs, targets):
62 | outputs = outputs.reshape(-1,)
63 | targets = targets.reshape(-1,)
64 | mask = targets != self.ignore_index
65 | comb = self.num_classes * outputs[mask] + targets[mask]
66 | comb = np.bincount(comb, minlength=self.num_classes ** 2)
67 | return comb.reshape(self.num_classes, self.num_classes)
68 |
69 |
70 | def add(self, outputs, targets, loss):
71 | self.accumulator.update({'loss': (loss, 1)})
72 | self.cm += self._confusion_matrix(outputs, targets)
73 |
74 |
75 | class TorchSegmentationMetrics:
76 | def __init__(self, num_classes, labels=None, ignore_index=-1, device=None):
77 | labels = labels or list(range(num_classes))
78 | self.labels = dict(enumerate(labels))
79 | self.accumulator = _Accumulator()
80 | self.num_classes = num_classes
81 | self.cm = torch.zeros(
82 | (self.num_classes, self.num_classes), dtype=torch.long,
83 | requires_grad=False, device=device)
84 | self.ignore_index = ignore_index
85 |
86 |
87 | def metrics(self):
88 | metrics = {}
89 | metrics['accuracy'] = self._accuracy()
90 | metrics['class'] = {
91 | k: {'iou': v}
92 | for k, v in zip(self.labels, self._iou())
93 | }
94 | metrics['mIOU'] = np.mean([c['iou'] for c in metrics['class'].values()])
95 | metrics.update(self.accumulator.mean())
96 | metrics = _apply(
97 | lambda x, y: float(x),
98 | metrics,
99 | {}
100 | )
101 | return metrics
102 |
103 | def _accuracy(self):
104 | accuracy = self.cm.diagonal().sum().float() / self.cm.sum().float()
105 | return tensor_to_numpy(accuracy)
106 |
107 | def _iou(self):
108 | colsum = self.cm.sum(dim=0)
109 | rowsum = self.cm.sum(dim=1)
110 | diag = self.cm.diagonal()
111 | iou = diag.float() / (colsum + rowsum - diag).float()
112 | return tensor_to_numpy(iou)
113 |
114 | def _confusion_matrix(self, outputs, targets):
115 | outputs = outputs.view(-1).contiguous()
116 | targets = targets.view(-1).contiguous()
117 | mask = (targets != self.ignore_index)
118 | comb = self.num_classes * outputs[mask] + targets[mask]
119 | comb = torch.bincount(comb, minlength=self.num_classes ** 2)
120 | return comb.reshape(self.num_classes, self.num_classes).long()
121 |
122 |
123 | def add(self, outputs, targets, loss):
124 | self.accumulator.update({'loss': (loss, 1)})
125 | self.cm += self._confusion_matrix(outputs, targets)
126 |
127 |
128 | def compute_example_segmentation_metrics(
129 | n_classes, outputs, targets, loss, ignore_index=-1):
130 | metrics = TorchSegmentationMetrics(n_classes, ignore_index)
131 | metrics.add(outputs, targets, loss)
132 | return metrics.metrics()
133 |
--------------------------------------------------------------------------------
/pysemseg/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .unet import UNet, unet_resnet101, unet_resnet152, unet_big_resnet101
2 | from .fcn import VGGFCN32, VGGFCN16, VGGFCN8
3 | from .densenet import fcdensenet56, fcdensenet67, fcdensenet103
4 | from .deeplab import (
5 | deeplabv3_resnet50, deeplabv3_resnet101, deeplabv3_resnet152,
6 | deeplabv3plus_resnet101, deeplabv3plus_resnet50
7 | )
8 |
--------------------------------------------------------------------------------
/pysemseg/models/deeplab.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from .resnet import resnet50, resnet101, resnet152
5 |
6 | class ASPPModule(nn.Module):
7 | def __init__(self, in_channels, out_channels, rates):
8 | super().__init__()
9 | assert len(rates) == 3
10 | self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1,
11 | bias=False)
12 | self.conv2 = nn.Conv2d(
13 | in_channels, out_channels, kernel_size=3, dilation=rates[0],
14 | padding=rates[0], bias=False)
15 | self.conv3 = nn.Conv2d(
16 | in_channels, out_channels, kernel_size=3, dilation=rates[1],
17 | padding=rates[1], bias=False)
18 | self.conv4 = nn.Conv2d(
19 | in_channels, out_channels, kernel_size=3, dilation=rates[2],
20 | padding=rates[2], bias=False)
21 | self.global_avg_pool = nn.Sequential(
22 | nn.AdaptiveAvgPool2d((1, 1)),
23 | nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
24 | )
25 | self.bn = nn.BatchNorm2d(5 * out_channels)
26 | self.relu = nn.ReLU(inplace=True)
27 | self.final_conv = nn.Conv2d(
28 | 5 * out_channels, out_channels, kernel_size=1, bias=False)
29 |
30 | def forward(self, x):
31 | x1 = self.conv1(x)
32 | x2 = self.conv2(x)
33 | x3 = self.conv3(x)
34 | x4 = self.conv4(x)
35 | x5 = self.global_avg_pool(x)
36 | x5 = F.interpolate(
37 | x5, size=x1.size()[2:], mode='bilinear', align_corners=True
38 | )
39 | x = torch.cat([x1, x2, x3, x4, x5], dim=1)
40 | x = self.relu(self.bn(x))
41 | x = self.final_conv(x)
42 | return x
43 |
44 |
45 | class Decoder(nn.Module):
46 | def __init__(self, low_level_channels, encoder_channels,
47 | low_level_reduced_channels=48):
48 | super().__init__()
49 | self.reduce_low_level = nn.Conv2d(
50 | low_level_channels, low_level_reduced_channels, kernel_size=1)
51 | self.fuse = nn.Sequential(
52 | nn.Conv2d(
53 | encoder_channels + low_level_reduced_channels, 256,
54 | kernel_size=3, padding=1),
55 | nn.ReLU(inplace=True),
56 | nn.BatchNorm2d(256),
57 | nn.Conv2d(256, 256, kernel_size=3, padding=1),
58 | nn.BatchNorm2d(256),
59 | nn.ReLU(inplace=True)
60 | )
61 |
62 | def forward(self, x, low_level_features):
63 | low_level_features = self.reduce_low_level(low_level_features)
64 | x = F.interpolate(
65 | x, size=low_level_features.shape[2:], mode='bilinear',
66 | align_corners=True
67 | )
68 | x = torch.cat([x, low_level_features], dim=1)
69 | x = self.fuse(x)
70 | return x
71 |
72 |
73 | class Deeplab(nn.Module):
74 | def __init__(
75 | self, in_channels, n_classes, backbone_cls, decoder=None,
76 | score_channels=256, aspp_rates=[6, 12, 18], pretrained=True,
77 | output_stride=16, multi_grid=[1, 2, 4], finetune_bn=True):
78 | super().__init__()
79 | self.backbone = backbone_cls(
80 | pretrained=pretrained,
81 | in_channels=in_channels,
82 | output_stride=output_stride,
83 | multi_grid=multi_grid
84 | )
85 | rate = 2 if output_stride == 8 else 1
86 | self.aspp = ASPPModule(2048, 256, [r * rate for r in aspp_rates])
87 | self.finetune_bn = finetune_bn
88 | self.decoder = decoder
89 | self.score = nn.Sequential(
90 | nn.Conv2d(score_channels, score_channels, kernel_size=3, padding=1),
91 | nn.BatchNorm2d(score_channels),
92 | nn.ReLU(inplace=True),
93 | nn.Conv2d(score_channels, n_classes, kernel_size=1)
94 | )
95 |
96 | def forward(self, x):
97 | input_size = x.shape[2:]
98 | x, block_outputs = self.backbone(x)
99 | x = self.aspp(x)
100 | if self.decoder:
101 | x = self.decoder(x, block_outputs[0])
102 | x = F.interpolate(
103 | x, size=input_size, mode='bilinear', align_corners=True
104 | )
105 | x = self.score(x)
106 | return x
107 |
108 | def train(self, mode=True):
109 | super().train(mode)
110 | if not self.finetune_bn:
111 | for module in self.modules():
112 | if isinstance(module, nn.BatchNorm2d):
113 | module.train(False)
114 |
115 |
116 | def deeplabv3_resnet50(in_channels, n_classes, **kwargs):
117 | return Deeplab(in_channels, n_classes, resnet50, **kwargs)
118 |
119 |
120 | def deeplabv3_resnet101(in_channels, n_classes, **kwargs):
121 | return Deeplab(in_channels, n_classes, resnet101, **kwargs)
122 |
123 |
124 | def deeplabv3_resnet152(in_channels, n_classes, **kwargs):
125 | return Deeplab(in_channels, n_classes, resnet152, **kwargs)
126 |
127 |
128 | def deeplabv3plus_resnet101(in_channels, n_classes, **kwargs):
129 | return Deeplab(
130 | in_channels, n_classes, resnet101,
131 | decoder=Decoder(256, 256, 48), **kwargs
132 | )
133 |
134 |
135 | def deeplabv3plus_resnet50(in_channels, n_classes, **kwargs):
136 | return Deeplab(
137 | in_channels, n_classes, resnet50,
138 | decoder=Decoder(256, 256, 48), **kwargs
139 | )
140 |
--------------------------------------------------------------------------------
/pysemseg/models/densenet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.utils import checkpoint
5 |
6 |
7 | class DenseLayer(nn.Sequential):
8 | def __init__(self, n_input_features, growth_rate, drop_rate):
9 | super().__init__()
10 | self.add_module('norm', nn.BatchNorm2d(n_input_features))
11 | self.add_module('relu', nn.ReLU(inplace=True))
12 | self.add_module(
13 | 'conv',
14 | nn.Conv2d(
15 | n_input_features, growth_rate, kernel_size=3, stride=1,
16 | padding=1))
17 | self.add_module('drop', nn.Dropout(drop_rate))
18 | self.reset_parameters()
19 |
20 | def reset_parameters(self):
21 | torch.nn.init.kaiming_uniform_(self.conv.weight, nonlinearity='relu')
22 |
23 |
24 | def _dense_layer_cp_factory(layer):
25 | def _dense_layer_fn(*features):
26 | concat_features = torch.cat(features, 1)
27 | return layer(concat_features)
28 | return _dense_layer_fn
29 |
30 |
31 | class DenseBlock(nn.Module):
32 | def __init__(
33 | self, num_layers, num_input_features, growth_rate, drop_rate,
34 | efficient=False):
35 | super(DenseBlock, self).__init__()
36 | self.efficient = efficient
37 | self.layers = nn.ModuleList([
38 | DenseLayer(num_input_features + i * growth_rate, growth_rate, drop_rate)
39 | for i in range(num_layers)
40 | ])
41 |
42 | def forward(self, x):
43 | layer_outputs = []
44 | for layer in self.layers:
45 | if self.efficient:
46 | output = checkpoint.checkpoint(
47 | _dense_layer_cp_factory(layer), x, *layer_outputs)
48 | else:
49 | output = layer(x)
50 | x = torch.cat([x, output], 1)
51 | layer_outputs.append(output)
52 | return torch.cat(layer_outputs, 1)
53 |
54 |
55 | class TransitionDown(nn.Sequential):
56 | def __init__(self, n_input_features, n_output_features, drop_rate=0.2):
57 | super().__init__()
58 | self.add_module('norm', nn.BatchNorm2d(n_input_features))
59 | self.add_module('relu', nn.ReLU(inplace=True))
60 | self.add_module('conv', nn.Conv2d(
61 | n_input_features, n_output_features, kernel_size=1, stride=1))
62 | self.add_module('drop', nn.Dropout(drop_rate))
63 | self.add_module('pool', nn.MaxPool2d(kernel_size=2, stride=2))
64 | self.reset_parameters()
65 |
66 | def reset_parameters(self):
67 | torch.nn.init.kaiming_uniform_(self.conv.weight, nonlinearity='relu')
68 |
69 |
70 | class TransitionUp(nn.Module):
71 | def __init__(self, n_input_features, n_output_features):
72 | super().__init__()
73 | self.conv = nn.ConvTranspose2d(
74 | n_input_features, n_output_features, kernel_size=3, stride=2,
75 | padding=1, output_padding=1)
76 |
77 | def reset_parameters(self):
78 | torch.nn.init.kaiming_uniform_(self.conv.weight, nonlinearity='relu')
79 | torch.nn.init.kaiming_uniform_(self.conv.bias, nonlinearity='relu')
80 |
81 | def forward(self, x):
82 | return self.conv(x)
83 |
84 |
85 | class FCDenseNet(nn.Module):
86 | def __init__(
87 | self, in_channels, n_classes, growth_rate=32,
88 | n_init_features=48, drop_rate=0.2, blocks=(4, 5, 7, 10, 12, 15),
89 | efficient=False):
90 | super().__init__()
91 |
92 | self.efficient = efficient
93 | self.initial_conv = nn.Conv2d(
94 | in_channels, n_init_features, kernel_size=3, stride=1, padding=1)
95 |
96 | self.blocks_down = nn.ModuleList()
97 | self.transitions_down = nn.ModuleList()
98 |
99 | n_features = n_init_features
100 | self.skip_features = []
101 |
102 | for i, n_layers in enumerate(blocks[:-1]):
103 | self.blocks_down.append(
104 | DenseBlock(
105 | n_layers, n_features, growth_rate, drop_rate,
106 | efficient=self.efficient)
107 | )
108 | n_features = n_features + n_layers * growth_rate
109 | self.skip_features.append(n_features)
110 | self.transitions_down.append(
111 | TransitionDown(n_features, n_features, drop_rate)
112 | )
113 |
114 | self.blocks_down.append(
115 | DenseBlock(
116 | blocks[-1], n_features, growth_rate, drop_rate,
117 | efficient=self.efficient))
118 | n_features = blocks[-1] * growth_rate
119 |
120 | self.blocks_up = nn.ModuleList()
121 | self.transitions_up = nn.ModuleList()
122 |
123 | for i in range(len(blocks) - 2, -1, -1):
124 | self.transitions_up.append(TransitionUp(n_features, n_features))
125 | n_features = n_features + self.skip_features[i]
126 | self.blocks_up.append(
127 | DenseBlock(blocks[i], n_features, growth_rate, drop_rate)
128 | )
129 | n_features = growth_rate * blocks[i]
130 |
131 | self.score = nn.Conv2d(n_features, n_classes, kernel_size=1, stride=1)
132 |
133 | self.reset_parameters()
134 |
135 | def reset_parameters(self):
136 | torch.nn.init.kaiming_uniform_(
137 | self.initial_conv.weight, nonlinearity='relu'
138 | )
139 |
140 | def _maybe_pad(self, x, size):
141 | hpad = max(size[0] - x.shape[2], 0)
142 | wpad = max(size[1] - x.shape[3], 0)
143 | if hpad + wpad > 0:
144 | lhpad = hpad // 2
145 | rhpad = hpad // 2 + hpad % 2
146 | lwpad = wpad // 2
147 | rwpad = wpad // 2 + wpad % 2
148 |
149 | x = F.pad(x, (lwpad, rwpad, lhpad, rhpad, 0, 0, 0, 0 ))
150 | return x
151 |
152 |
153 |
154 | def forward(self, x):
155 | x = self.initial_conv(x)
156 | skips = []
157 | for block, trans in zip(self.blocks_down, self.transitions_down):
158 | c = block(x)
159 | x = torch.cat([c, x], 1)
160 | skips.append(x)
161 | x = trans(x)
162 |
163 | x = self.blocks_down[-1](x)
164 | skips = skips[::-1]
165 |
166 | for block, trans, skip in zip(
167 | self.blocks_up, self.transitions_up, skips):
168 | x = trans(x)
169 | x = self._maybe_pad(x, skip.shape[2:])
170 | x = torch.cat([x, skip], 1)
171 | x = block(x)
172 |
173 | x = self.score(x)
174 | return x
175 |
176 |
177 | def fcdensenet56(in_channels, n_classes):
178 | return FCDenseNet(
179 | in_channels, n_classes, growth_rate=12, n_init_features=48,
180 | drop_rate=0.2, blocks=(4,) * 5, efficient=True
181 | )
182 |
183 |
184 | def fcdensenet67(in_channels, n_classes):
185 | return FCDenseNet(
186 | in_channels, n_classes, growth_rate=16, n_init_features=48,
187 | drop_rate=0.2, blocks=(5,) * 5, efficient=True
188 | )
189 |
190 |
191 | def fcdensenet103(in_channels, n_classes):
192 | return FCDenseNet(
193 | in_channels, n_classes, growth_rate=16, n_init_features=48,
194 | drop_rate=0.2, blocks=(4, 5, 7, 10, 12, 15), efficient=True
195 | )
196 |
--------------------------------------------------------------------------------
/pysemseg/models/fcn.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 | from abc import ABCMeta
4 | import torchvision.models as models
5 |
6 |
7 | def _maybe_pad(x, size):
8 | hpad = size[0] - x.shape[2]
9 | wpad = size[1] - x.shape[3]
10 | if hpad + wpad > 0:
11 | x = F.pad(x, (0, wpad, 0, hpad, 0, 0, 0, 0 ))
12 | return x
13 |
14 |
15 | class VGGFCN(nn.Module, metaclass=ABCMeta):
16 | def __init__(self, in_channels, n_classes):
17 | super().__init__()
18 | assert in_channels == 3
19 | self.n_classes = n_classes
20 | self.vgg16 = models.vgg16(pretrained=True)
21 | self.classifier = nn.Sequential(
22 | nn.Conv2d(512, 4096, kernel_size=7, padding=3),
23 | nn.ReLU(True),
24 | nn.Dropout(),
25 | nn.Conv2d(4096, 4096, kernel_size=1),
26 | nn.ReLU(True),
27 | nn.Dropout(),
28 | nn.Conv2d(4096, n_classes, kernel_size=1),
29 | )
30 |
31 | self._initialize_weights()
32 |
33 | def _initialize_weights(self):
34 | self.classifier[0].weight.data = (
35 | self.vgg16.classifier[0].weight.data.view(
36 | self.classifier[0].weight.size())
37 | )
38 | self.classifier[3].weight.data = (
39 | self.vgg16.classifier[3].weight.data.view(
40 | self.classifier[3].weight.size())
41 | )
42 |
43 |
44 | class VGGFCN32(VGGFCN):
45 | def forward(self, x):
46 | input_height, input_width = x.shape[2], x.shape[3]
47 | x = self.vgg16.features(x)
48 | x = self.classifier(x)
49 | x = F.interpolate(x, size=(input_height, input_width),
50 | mode='bilinear', align_corners=True)
51 | return x
52 |
53 |
54 | class VGGFCN16(VGGFCN):
55 | def __init__(self, in_channels, n_classes):
56 | super().__init__(in_channels, n_classes)
57 | self.score4 = nn.Conv2d(512, n_classes, kernel_size=1)
58 | self.upscale5 = nn.ConvTranspose2d(
59 | n_classes, n_classes, kernel_size=2, stride=2)
60 |
61 | def forward(self, x):
62 | input_height, input_width = x.shape[2], x.shape[3]
63 | pool4 = self.vgg16.features[:-7](x)
64 | pool5 = self.vgg16.features[-7:](pool4)
65 | pool5_upscaled = self.upscale5(self.classifier(pool5))
66 | pool4 = self.score4(pool4)
67 | x = pool4 + pool5_upscaled
68 | x = F.interpolate(x, size=(input_height, input_width),
69 | mode='bilinear', align_corners=True)
70 | return x
71 |
72 |
73 | class VGGFCN8(VGGFCN):
74 | def __init__(self, in_channels, n_classes):
75 | super().__init__(in_channels, n_classes)
76 | self.upscale4 = nn.ConvTranspose2d(
77 | n_classes, n_classes, kernel_size=2, stride=2)
78 | self.score4 = nn.Conv2d(
79 | 512, n_classes, kernel_size=1, stride=1)
80 | self.score3 = nn.Conv2d(
81 | 256, n_classes, kernel_size=1, stride=1)
82 | self.upscale5 = nn.ConvTranspose2d(
83 | n_classes, n_classes, kernel_size=2, stride=2)
84 |
85 | def forward(self, x):
86 | input_height, input_width = x.shape[2], x.shape[3]
87 | pool3 = self.vgg16.features[:-14](x)
88 | pool4 = self.vgg16.features[-14:-7](pool3)
89 | pool5 = self.vgg16.features[-7:](pool4)
90 | pool5_upscaled = self.upscale5(self.classifier(pool5))
91 | pool5_upscaled = _maybe_pad(pool5_upscaled, pool4.shape[2:])
92 | pool4_scores = self.score4(pool4)
93 | pool4_fused = pool4_scores + pool5_upscaled
94 | pool4_upscaled = self.upscale4(pool4_fused)
95 | pool4_upscaled = _maybe_pad(pool4_upscaled, pool3.shape[2:])
96 | x = self.score3(pool3) + pool4_upscaled
97 | x = F.interpolate(x, size=(input_height, input_width),
98 | mode='bilinear', align_corners=True)
99 | return x
100 |
--------------------------------------------------------------------------------
/pysemseg/models/resnet.py:
--------------------------------------------------------------------------------
1 | import math
2 | import re
3 | from collections import OrderedDict
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import torch.utils.model_zoo as model_zoo
7 |
8 |
9 | RESNET_CKPT = {
10 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
11 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
12 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth'
13 | }
14 |
15 |
16 | class Bottleneck(nn.Module):
17 | def __init__(self, in_channels, bottleneck_channels, out_channels,
18 | stride=1, dilation=1, downsample=None):
19 | super().__init__()
20 | self.conv1 = nn.Conv2d(
21 | in_channels, bottleneck_channels, kernel_size=1, bias=False)
22 | self.bn1 = nn.BatchNorm2d(bottleneck_channels)
23 | self.conv2 = nn.Conv2d(
24 | bottleneck_channels, bottleneck_channels, kernel_size=3,
25 | stride=stride, padding=(dilation, dilation),
26 | dilation=dilation, bias=False)
27 | self.bn2 = nn.BatchNorm2d(bottleneck_channels)
28 | self.conv3 = nn.Conv2d(
29 | bottleneck_channels, out_channels, kernel_size=1,
30 | bias=False)
31 | self.bn3 = nn.BatchNorm2d(out_channels)
32 | self.relu = nn.ReLU(inplace=True)
33 | self.downsample = downsample
34 | self.stride = stride
35 |
36 | def forward(self, x):
37 | residual = x
38 |
39 | out = self.conv1(x)
40 | out = self.bn1(out)
41 | out = self.relu(out)
42 |
43 | out = self.conv2(out)
44 | out = self.bn2(out)
45 | out = self.relu(out)
46 |
47 | out = self.conv3(out)
48 | out = self.bn3(out)
49 |
50 | if self.downsample is not None:
51 | residual = self.downsample(x)
52 |
53 | out += residual
54 | out = self.relu(out)
55 |
56 | return out
57 |
58 |
59 | class ResBlock(nn.Sequential):
60 | def __init__(self, in_channels, channels, n_layers, dilations=None,
61 | stride=1, expansion=4):
62 | super().__init__()
63 | if dilations is None:
64 | dilations = [1] * n_layers
65 | downsample = None
66 | if stride != 1 or in_channels != channels * expansion:
67 | downsample = nn.Sequential(
68 | nn.Conv2d(in_channels, channels * expansion,
69 | kernel_size=1, stride=stride, bias=False),
70 | nn.BatchNorm2d(channels * expansion),
71 | )
72 |
73 | layers = []
74 | self.add_module(
75 | '0', Bottleneck(
76 | in_channels, channels, channels * expansion, stride,
77 | dilations[0], downsample=downsample))
78 | in_channels = channels * expansion
79 | for i in range(1, n_layers):
80 | self.add_module(
81 | '{}'.format(i),
82 | Bottleneck(in_channels, channels, in_channels,
83 | dilation=dilations[i]))
84 |
85 |
86 |
87 | class ResNet(nn.Module):
88 | def __init__(self, in_channels, layers_config):
89 | super().__init__()
90 | self.expansion = 4
91 | channels = layers_config[0]['channels']
92 | self.conv1 = nn.Conv2d(
93 | in_channels, channels, kernel_size=7, stride=2, padding=3,
94 | bias=False)
95 | self.bn1 = nn.BatchNorm2d(channels)
96 | self.relu = nn.ReLU(inplace=True)
97 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
98 | self.layers = nn.ModuleList()
99 | for i, layer_config in enumerate(layers_config):
100 | self.layers.add_module(
101 | 'layer{}'.format(i + 1),
102 | ResBlock(channels, **layer_config, expansion=self.expansion)
103 | )
104 | channels = layer_config['channels'] * self.expansion
105 |
106 | self.reset_parameters()
107 |
108 | def load_pretrained_model(self, url):
109 | def replace_key(key):
110 | return re.sub(r'layer(\d)', r'layers.layer\g<1>', key)
111 | state_dict = model_zoo.load_url(url)
112 | state_dict = OrderedDict([
113 | (replace_key(key), value) for key, value in state_dict.items()
114 | ])
115 | self.load_state_dict(state_dict, strict=False)
116 |
117 | def reset_parameters(self):
118 | for m in self.modules():
119 | if isinstance(m, nn.Conv2d):
120 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
121 | m.weight.data.normal_(0, math.sqrt(2. / n))
122 | elif isinstance(m, nn.BatchNorm2d):
123 | m.weight.data.fill_(1)
124 | m.bias.data.zero_()
125 |
126 | def forward(self, x):
127 | x = self.conv1(x)
128 | x = self.bn1(x)
129 | x = self.relu(x)
130 | x = self.maxpool(x)
131 | block_outputs = []
132 | for layer in self.layers:
133 | x = layer(x)
134 | block_outputs.append(x)
135 | return x, block_outputs
136 |
137 |
138 | def resnet(layers, pretrained_model=None, in_channels=3, output_stride=16,
139 | multi_grid=(1, 2, 4), add_blocks=0):
140 |
141 | assert output_stride in [8, 16, 32]
142 | if output_stride == 8:
143 | rate3 = [2] * layers[2]
144 | stride3 = 1
145 | rate4 = [2 * 2 * mg for mg in multi_grid]
146 | stride4 = 1
147 | elif output_stride == 16:
148 | rate3 = [1] * layers[2]
149 | stride3 = 2
150 | rate4 = [2 * mg for mg in multi_grid]
151 | stride4 = 1
152 | elif output_stride == 32:
153 | rate3 = [1] * layers[2]
154 | stride3 = 2
155 | rate4 = [1] * layers[3]
156 | stride4 = 2
157 |
158 | blocks = [
159 | {
160 | 'stride': 1,
161 | 'n_layers': layers[0],
162 | 'channels': 64
163 | },
164 | {
165 | 'stride': 2,
166 | 'n_layers': layers[1],
167 | 'channels': 128
168 | },
169 | {
170 | 'stride': stride3,
171 | 'n_layers': layers[2],
172 | 'dilations': rate3,
173 | 'channels': 256
174 | },
175 | {
176 | 'stride': stride4,
177 | 'n_layers': layers[3],
178 | 'dilations': rate4,
179 | 'channels': 512
180 | }
181 | ]
182 | rate = rate3[0] * 4
183 | for _ in range(add_blocks):
184 | dilations = [rate * mg for mg in multi_grid]
185 | blocks.append(
186 | {
187 | 'stride': 1,
188 | 'n_layers': 3,
189 | 'dilations': dilations,
190 | 'channels': 512
191 | }
192 | )
193 | resnet = ResNet(in_channels, blocks)
194 | if pretrained_model is not None:
195 | resnet.load_pretrained_model(pretrained_model)
196 |
197 | return resnet
198 |
199 |
200 | def resnet50(pretrained=True, **kwargs):
201 | return resnet(
202 | [3, 4, 6, 3],
203 | pretrained_model=RESNET_CKPT['resnet50'] if pretrained else None,
204 | **kwargs
205 | )
206 |
207 |
208 | def resnet101(pretrained=True, **kwargs):
209 | return resnet(
210 | [3, 4, 23, 3],
211 | pretrained_model=RESNET_CKPT['resnet101'] if pretrained else None,
212 | **kwargs
213 | )
214 |
215 |
216 | def resnet152(pretrained=True, **kwargs):
217 | return resnet(
218 | [3, 8, 36, 3],
219 | pretrained_model=RESNET_CKPT['resnet152'] if pretrained else None
220 | **kwargs
221 | )
222 |
--------------------------------------------------------------------------------
/pysemseg/models/unet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from pysemseg.models.resnet import resnet50, resnet101, resnet152, ResBlock
5 |
6 |
7 | def _maybe_pad(x, size):
8 | hpad = max(size[0] - x.shape[2], 0)
9 | wpad = max(size[1] - x.shape[3], 0)
10 | if hpad + wpad > 0:
11 | lhpad = hpad // 2
12 | rhpad = hpad // 2 + hpad % 2
13 | lwpad = wpad // 2
14 | rwpad = wpad // 2 + wpad % 2
15 |
16 | x = F.pad(x, (lwpad, rwpad, lhpad, rhpad, 0, 0, 0, 0 ))
17 | return x
18 |
19 |
20 | class DownLayer(nn.Module):
21 | def __init__(self, in_units, out_units, batch_norm=True):
22 | super().__init__()
23 | self.batch_norm = batch_norm
24 | self.conv1 = nn.Conv2d(in_units, out_units, kernel_size=3, padding=1)
25 | if batch_norm:
26 | self.bn1 = nn.BatchNorm2d(out_units)
27 | self.conv2 = nn.Conv2d(out_units, out_units, kernel_size=3, padding=1)
28 | if batch_norm:
29 | self.bn2 = nn.BatchNorm2d(out_units)
30 | self.dropout = nn.Dropout(p=0.0)
31 |
32 | def forward(self, x):
33 | x = F.relu(self.conv1(x), inplace=True)
34 | if self.batch_norm:
35 | x = self.bn1(x)
36 | x = F.relu(self.conv2(x), inplace=True)
37 | if self.batch_norm:
38 | x = self.bn2(x)
39 | if self.training:
40 | x = self.dropout(x)
41 | return x
42 |
43 |
44 | class UpResBlock(nn.Module):
45 | def __init__(self, in_units, out_units, upsample=True, batch_norm=True):
46 | super().__init__()
47 | self.upsample = upsample
48 | self.batch_norm = batch_norm
49 | self.block = ResBlock(
50 | in_units, out_units, 3, dilations=[1, 2, 1], expansion=1)
51 | if self.upsample:
52 | self.conv3 = nn.ConvTranspose2d(
53 | out_units, out_units // 2, kernel_size=2, stride=2,
54 | padding=0, output_padding=0)
55 | if self.batch_norm:
56 | self.bn3 = nn.BatchNorm2d(out_units // 2)
57 |
58 |
59 | def forward(self, x):
60 | x = self.block(x)
61 | if self.upsample:
62 | x = F.relu(self.conv3(x), inplace=True)
63 | if self.batch_norm:
64 | x = self.bn3(x)
65 | return x
66 |
67 |
68 | class UpLayer(nn.Module):
69 | def __init__(self, in_units, out_units, upsample=True, batch_norm=True):
70 | super().__init__()
71 | self.batch_norm = batch_norm
72 | self.conv1 = nn.Conv2d(in_units, out_units, kernel_size=3, padding=1)
73 | if batch_norm:
74 | self.bn1 = nn.BatchNorm2d(out_units)
75 | self.conv2 = nn.Conv2d(out_units, out_units, kernel_size=3, padding=1)
76 | if batch_norm:
77 | self.bn2 = nn.BatchNorm2d(out_units)
78 | self.upsample = upsample
79 | self.dropout = nn.Dropout(p=0.0)
80 | if self.upsample:
81 | self.conv3 = nn.ConvTranspose2d(
82 | out_units, out_units // 2, kernel_size=2, stride=2,
83 | padding=0, output_padding=0)
84 | if self.batch_norm:
85 | self.bn3 = nn.BatchNorm2d(out_units // 2)
86 |
87 |
88 | def forward(self, x):
89 | x = F.relu(self.conv1(x), inplace=True)
90 | if self.batch_norm:
91 | x = self.bn1(x)
92 | x = F.relu(self.conv2(x), inplace=True)
93 | if self.batch_norm:
94 | x = self.bn2(x)
95 | if self.training:
96 | x = self.dropout(x)
97 | if self.upsample:
98 | x = F.relu(self.conv3(x), inplace=True)
99 | if self.batch_norm:
100 | x = self.bn3(x)
101 | return x
102 |
103 |
104 | class UNet(nn.Module):
105 | def __init__(self, n_classes, in_channels, batch_norm=True):
106 | super().__init__()
107 | self.down_layers = [
108 | DownLayer(in_channels, 64, batch_norm),
109 | DownLayer(64, 128, batch_norm),
110 | DownLayer(128, 256, batch_norm),
111 | DownLayer(256, 512, batch_norm)
112 | ]
113 | self.interface1 = nn.Conv2d(512, 1024, kernel_size=3, padding=1)
114 | self.interface2 = nn.Conv2d(1024, 512, kernel_size=3, padding=1)
115 | self.interface_up = nn.ConvTranspose2d(
116 | 512, 512, kernel_size=2, stride=2)
117 | self.up_layers = [
118 | UpLayer(1024, 512, batch_norm=batch_norm),
119 | UpLayer(512, 256, batch_norm=batch_norm),
120 | UpLayer(256, 128, batch_norm=batch_norm),
121 | UpLayer(128, 64, upsample=False, batch_norm=batch_norm)
122 | ]
123 | self.down_layers = nn.ModuleList(self.down_layers)
124 | self.up_layers = nn.ModuleList(self.up_layers)
125 | self.conv_classes = nn.Conv2d(64, n_classes, kernel_size=1)
126 |
127 | def forward(self, x):
128 | input_height, input_width = x.shape[2:]
129 | down_outputs = []
130 | for layer in self.down_layers:
131 | x = layer(x)
132 | down_outputs.append(x)
133 | x = F.max_pool2d(x, kernel_size=2)
134 | x = F.relu(self.interface1(x), inplace=True)
135 | x = F.relu(self.interface2(x), inplace=True)
136 | x = F.relu(self.interface_up(x), inplace=True)
137 | for i, layer in enumerate(self.up_layers):
138 | skip = down_outputs[-(i+1)]
139 | x = _maybe_pad(x, skip.shape[2:])
140 | skip = _maybe_pad(skip, x.shape[2:])
141 | x = torch.cat([x, skip], dim=1)
142 | x = layer(x)
143 | x = F.interpolate(
144 | x, (input_height, input_width), mode='bilinear', align_corners=True)
145 | x = self.conv_classes(x)
146 | return x
147 |
148 |
149 | class UNetResNetV1(nn.Module):
150 | def __init__(
151 | self, n_classes, network, skip_channels, interface_channels,
152 | up_channels):
153 | super().__init__()
154 | self.network = network
155 | self.bottleneck_channels = interface_channels // self.network.expansion
156 | self.interface = UpLayer(interface_channels, self.bottleneck_channels)
157 | self.up_layers = nn.ModuleList([
158 | UpLayer(skip_channels[-1] + self.bottleneck_channels // 2, up_channels[0]),
159 | UpLayer(skip_channels[-2] + up_channels[0] // 2, up_channels[1]),
160 | UpLayer(skip_channels[-3] + up_channels[1] // 2, up_channels[2]),
161 | UpLayer(skip_channels[-4] + up_channels[2] // 2, up_channels[3])
162 | ])
163 | self.conv_classes = nn.Conv2d(up_channels[3] // 2, n_classes, kernel_size=1)
164 |
165 | def _resnet_forward(self, x):
166 | skips = []
167 | x = self.network.conv1(x)
168 | skips.append(x)
169 | x = self.network.bn1(x)
170 | x = self.network.relu(x)
171 | x = self.network.maxpool(x)
172 | skips.append(x)
173 | for i, layer in enumerate(self.network.layers):
174 | height, width = x.shape[2:]
175 | x = layer(x)
176 | if (x.shape[2] < height and x.shape[3] < width and
177 | i < len(self.network.layers) - 1):
178 | skips.append(x)
179 | return x, skips
180 |
181 | def forward(self, x):
182 | input_tensor = x
183 | x, skips = self._resnet_forward(x)
184 | x = self.interface(x)
185 | for i in range(0, len(self.up_layers)):
186 | skip = _maybe_pad(skips[-i - 1], x.shape[2:])
187 | x = torch.cat([x, skip], dim=1)
188 | x = self.up_layers[i](x)
189 | x = _maybe_pad(x, input_tensor.shape[2:])
190 | x = F.interpolate(
191 | x, size=input_tensor.shape[2:], mode='bilinear', align_corners=True)
192 | scores = self.conv_classes(x)
193 | return scores
194 |
195 |
196 | def unet_resnet(
197 | resnet_model_fn, in_channels, n_classes, pretrained=True,
198 | up_channels=[512, 256, 128, 64], **kwargs):
199 | net = resnet_model_fn(
200 | in_channels=in_channels, output_stride=32, pretrained=pretrained,
201 | **kwargs)
202 | return UNetResNetV1(
203 | n_classes=n_classes, network=net,
204 | skip_channels=[64, 64, 512, 1024],
205 | interface_channels=2048,
206 | up_channels=up_channels
207 | )
208 |
209 |
210 | def unet_resnet101(*args, **kwargs):
211 | return unet_resnet(resnet101, *args, **kwargs)
212 |
213 |
214 | def unet_resnet152(*args, **kwargs):
215 | return unet_resnet(resnet152, *args, **kwargs)
216 |
217 | def unet_big_resnet101(*args, **kwargs):
218 | return unet_resnet(
219 | resnet101, *args, up_channels=[1024, 256, 128, 64], **kwargs)
220 |
221 |
--------------------------------------------------------------------------------
/pysemseg/train.py:
--------------------------------------------------------------------------------
1 | import cProfile
2 | import sys
3 | import os
4 | import ast
5 | import time
6 | import yaml
7 | import numpy as np
8 | import configargparse
9 |
10 | import torch
11 | import torch.nn.functional as F
12 | from torch.autograd import Variable
13 |
14 | from pysemseg import datasets
15 | from pysemseg.metrics import SegmentationMetrics, TorchSegmentationMetrics
16 | from pysemseg.loggers import TensorboardLogger, VisdomLogger, ConsoleLogger
17 | from pysemseg.evaluate import evaluate
18 | from pysemseg.utils import (
19 | prompt_delete_dir, restore, tensor_to_numpy, import_type,
20 | flatten_dict, get_latest_checkpoint, save
21 | )
22 |
23 |
24 | def define_args():
25 | parser = configargparse.ArgParser(description='PyTorch Segmentation Framework',
26 | config_file_parser_class=configargparse.YAMLConfigFileParser,
27 | )
28 | parser.add_argument(
29 | '--config', is_config_file=True, required=False, help='Config file')
30 | parser.add_argument('--model', type=str, required=True,
31 | help=('A path to the model including the module. '
32 | 'Should be resolvable'))
33 | parser.add_argument('--model-args', type=ast.literal_eval, required=False, default={},
34 | help=('Args passed to the model constructor'))
35 | parser.add_argument('--data-dir', type=str, required=True,
36 | help='Path to the dataset root dir.')
37 | parser.add_argument('--model-dir', type=str, required=True,
38 | help='Path to store output data.')
39 | parser.add_argument('--dataset', type=str, required=True,
40 | help=('Path to the dataset class including the module'))
41 | parser.add_argument('--dataset-args', type=ast.literal_eval, default={},
42 | required=False,
43 | help='Dataset args.')
44 | parser.add_argument('--criterion', type=str, required=False,
45 | default='torch.nn.CrossEntropyLoss',
46 | help=('Path to the loss class including the module'))
47 | parser.add_argument('--criterion-args', type=ast.literal_eval,
48 | default={'reduction': 'sum'}, required=False,
49 | help='Criterion args.')
50 | parser.add_argument('--batch-size', type=int, default=64, metavar='N',
51 | help='input batch size for training (default: 64)')
52 | parser.add_argument('--max-gpu-batch-size', type=int, default=None,
53 | help='Effective GPU batch size. Gradients will be'
54 | 'accumulated to the batch-size before update.')
55 | parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
56 | help='input batch size for testing (default: 1000)')
57 | parser.add_argument('--epochs', type=int, default=10, metavar='N',
58 | help='number of epochs to train (default: 10)')
59 | parser.add_argument('--optimizer', type=str, default='RMSprop',
60 | required=False,
61 | help='Optimizer type.')
62 | parser.add_argument('--optimizer-args', type=ast.literal_eval, default={},
63 | required=False,
64 | help='Optimizer args.')
65 | parser.add_argument('--lr-scheduler', type=str, required=False,
66 | default='lr_schedulers.ConstantLR',
67 | help='Learning rate scheduler type.')
68 | parser.add_argument('--lr-scheduler-args', type=ast.literal_eval, default={},
69 | required=False,
70 | help='Learning rate scheduler args.')
71 | parser.add_argument('--transformer', type=str, required=False,
72 | help='Transformer type')
73 | parser.add_argument('--transformer-args', type=ast.literal_eval, default={},
74 | required=False,
75 | help='Transformer args.')
76 | parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
77 | help='learning rate (default: 0.001)')
78 | parser.add_argument('--no-cuda', action='store_true', default=False,
79 | help='disables CUDA training')
80 | parser.add_argument('--seed', type=int, default=8861, metavar='S',
81 | help='random seed (default: 8861)')
82 | parser.add_argument('--log-interval', type=int, default=20, metavar='N',
83 | help='logging training status frequency')
84 | parser.add_argument('--log-images-interval', type=int, default=200, metavar='N',
85 | help='Frequency of logging images and larger plots')
86 | parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
87 | parser.add_argument('--num-workers', type=int, default=1,
88 | help='Number of CPU data workers')
89 | parser.add_argument('--checkpoint', type=str,
90 | required=False,
91 | help='Load model on checkpoint.')
92 | parser.add_argument('--save-model-frequency', type=int,
93 | required=False, default=5,
94 | help='Save model checkpoint every nth epoch.')
95 | parser.add_argument('--profile', type=str, required=False,
96 | help='Runs a profiler and saves the results in a file.')
97 | group = parser.add_mutually_exclusive_group()
98 | group.add_argument('--allow-missing-keys', action='store_true', default=False,
99 | help='Whether to allow module keys to differ from checkpoint keys'
100 | ' when loading a checkpoint')
101 | group.add_argument('--continue-training', action='store_true', default=False,
102 | help='Continue experiment from the last checkpoint in the model dir')
103 | return parser
104 |
105 |
106 | def train_epoch(
107 | model, loader, criterion, optimizer, lr_scheduler,
108 | epoch, console_logger, visual_logger, device, log_interval,
109 | log_images_interval, accumulate_steps):
110 | model.train()
111 |
112 | ignore_index = loader.dataset.ignore_index
113 |
114 | metrics = TorchSegmentationMetrics(
115 | loader.dataset.number_of_classes,
116 | loader.dataset.labels,
117 | ignore_index=ignore_index,
118 | device=device
119 | )
120 | epoch_metrics = TorchSegmentationMetrics(
121 | loader.dataset.number_of_classes,
122 | loader.dataset.labels,
123 | ignore_index=ignore_index,
124 | device=device
125 | )
126 |
127 | start_time = time.time()
128 | for step, (ids, data, target) in enumerate(loader):
129 | data , target = Variable(data.to(device)), Variable(target.to(device, non_blocking=True))
130 | output = model(data)
131 | loss = criterion(output, target)
132 | num_targets = torch.sum(target != ignore_index).float()
133 | loss = loss / (num_targets * accumulate_steps)
134 | loss.backward()
135 |
136 | if step % accumulate_steps == 0:
137 | optimizer.step()
138 | optimizer.zero_grad()
139 |
140 | output = F.softmax(output.detach(), dim=1)
141 | predictions = torch.argmax(output, dim=1)
142 |
143 | predictions, target, loss = (
144 | predictions.detach(), target.detach(),
145 | (loss * accumulate_steps).detach()
146 | )
147 |
148 | metrics.add(predictions, target, loss)
149 | epoch_metrics.add(predictions, target, loss)
150 |
151 | if step % (accumulate_steps * log_interval) == 0 and step > 0:
152 | avg_time = (
153 | time.time() - start_time) / log_interval
154 | start_time = time.time()
155 | metrics_dict = metrics.metrics()
156 | metrics_dict['time'] = avg_time
157 | metrics_dict.pop('class')
158 | console_logger.log(step, epoch, loader, tensor_to_numpy(data), metrics_dict)
159 |
160 | metrics = TorchSegmentationMetrics(
161 | loader.dataset.number_of_classes,
162 | loader.dataset.labels,
163 | ignore_index=ignore_index,
164 | device=device
165 | )
166 |
167 | if step % (accumulate_steps * log_images_interval) == 0 and step > 0:
168 | visual_logger.log_prediction_images(
169 | step,
170 | tensor_to_numpy(data.data),
171 | tensor_to_numpy(target.data),
172 | tensor_to_numpy(predictions.data),
173 | name='images',
174 | prefix='Train'
175 | )
176 |
177 | visual_logger.log_metrics(epoch, epoch_metrics.metrics(), 'Train')
178 | visual_logger.log_learning_rate(epoch, optimizer.param_groups[0]['lr'])
179 |
180 |
181 | def _create_data_loaders(
182 | data_dir, dataset_cls, dataset_args, transformer_cls, transformer_args,
183 | train_batch_size, val_batch_size, num_workers, pin_memory=False):
184 | train_dataset = datasets.create_dataset(
185 | data_dir, dataset_cls, dataset_args,
186 | transformer_cls, transformer_args, mode='train')
187 |
188 | train_loader = torch.utils.data.DataLoader(
189 | train_dataset, batch_size=train_batch_size,
190 | shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
191 |
192 | validate_dataset = datasets.create_dataset(
193 | data_dir, dataset_cls, dataset_args,
194 | transformer_cls, transformer_args, mode='val')
195 |
196 | validate_loader = torch.utils.data.DataLoader(
197 | validate_dataset, batch_size=val_batch_size,
198 | shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
199 |
200 | return train_loader, validate_loader
201 |
202 |
203 | def _store_args(args, model_dir):
204 | with open(os.path.join(model_dir, 'args.yaml'), 'w') as args_file:
205 | yaml.dump(
206 | {**args.__dict__, 'command': " ".join(sys.argv)},
207 | args_file
208 | )
209 |
210 |
211 | def _set_seed(seed, cuda):
212 | torch.manual_seed(seed)
213 | np.random.seed(seed)
214 | if cuda:
215 | torch.cuda.manual_seed(seed)
216 |
217 |
218 | def train(args):
219 | if not args.continue_training:
220 | prompt_delete_dir(args.model_dir)
221 | os.makedirs(args.model_dir)
222 |
223 | _store_args(args, args.model_dir)
224 |
225 | # seed torch and cuda
226 | args.cuda = not args.no_cuda and torch.cuda.is_available()
227 | _set_seed(args.seed, args.cuda)
228 |
229 | device = torch.device('cuda:0' if args.cuda else 'cpu:0')
230 |
231 | dataset_cls = import_type(args.dataset, ['pysemseg.datasets'])
232 | transformer_cls = import_type(args.transformer, ['pysemseg.datasets'])
233 |
234 | args.max_gpu_batch_size = args.max_gpu_batch_size or args.batch_size
235 |
236 | train_loader, validate_loader = _create_data_loaders(
237 | args.data_dir, dataset_cls, args.dataset_args, transformer_cls,
238 | args.transformer_args, args.max_gpu_batch_size,
239 | args.test_batch_size, args.num_workers, pin_memory=args.cuda
240 | )
241 |
242 | visual_logger = VisdomLogger(
243 | log_directory=args.model_dir,
244 | color_palette=train_loader.dataset.color_palette,
245 | continue_logging=args.continue_training
246 | )
247 |
248 | visual_logger.log_args(args.__dict__)
249 |
250 | model_class = import_type(args.model, ['pysemseg.models'])
251 | model = model_class(
252 | in_channels=train_loader.dataset.in_channels,
253 | n_classes=train_loader.dataset.number_of_classes,
254 | **args.model_args
255 | )
256 |
257 | if torch.cuda.device_count() > 1:
258 | model = torch.nn.DataParallel(model)
259 |
260 | model = model.to(device)
261 |
262 | criterion_cls = import_type(args.criterion, ['pysemseg.losses'])
263 | criterion = criterion_cls(
264 | ignore_index=train_loader.dataset.ignore_index,
265 | **args.criterion_args
266 | )
267 |
268 | criterion = criterion.to(device)
269 |
270 | optimizer_class = import_type(args.optimizer, ['torch.optim'])
271 | optimizer = optimizer_class(
272 | model.parameters(), lr=args.lr, **args.optimizer_args
273 | )
274 |
275 | start_epoch = 0
276 |
277 | if args.continue_training:
278 | args.checkpoint = get_latest_checkpoint(args.model_dir)
279 | assert args.checkpoint is not None
280 |
281 | lr_scheduler_cls = import_type(
282 | args.lr_scheduler, ['pysemseg.lr_schedulers', 'torch.optim.lr_scheduler']
283 | )
284 | lr_scheduler = lr_scheduler_cls(optimizer, **args.lr_scheduler_args)
285 |
286 | if args.checkpoint:
287 | start_epoch = restore(
288 | args.checkpoint, model, optimizer, lr_scheduler,
289 | strict=not args.allow_missing_keys) + 1
290 |
291 | log_filepath = os.path.join(args.model_dir, 'train.log')
292 |
293 | with ConsoleLogger(filename=log_filepath) as logger:
294 | for epoch in range(start_epoch, start_epoch + args.epochs):
295 | train_epoch(
296 | model, train_loader, criterion, optimizer, lr_scheduler,
297 | epoch, logger, visual_logger, device, args.log_interval,
298 | args.log_images_interval,
299 | args.batch_size // args.max_gpu_batch_size)
300 | evaluate(
301 | model, validate_loader, criterion, logger, epoch,
302 | visual_logger, device, args.log_images_interval)
303 | if epoch % args.save_model_frequency == 0:
304 | save(model, optimizer, lr_scheduler, args.model_dir,
305 | train_loader.dataset.in_channels,
306 | train_loader.dataset.number_of_classes, epoch, args)
307 | lr_scheduler.step()
308 |
309 |
310 | def main():
311 | sys.path.append('./')
312 | parser = define_args()
313 | args = parser.parse_args()
314 | if args.profile is not None:
315 | cProfile.runctx('train(args)', globals(), locals(), args.profile)
316 | else:
317 | train(args)
318 |
319 |
320 | if __name__ == '__main__':
321 | main()
322 |
--------------------------------------------------------------------------------
/pysemseg/transforms/__init__.py:
--------------------------------------------------------------------------------
1 | from .loaders import PILImageLoader, CV2ImageLoader
2 | from .convert import Grayscale, ToCategoryTensor, ToTensor, ToFloatImage
3 | from .transforms import (
4 | Compose, Resize, Binarize, RandomContrast, RandomBrightness,
5 | RandomHueSaturation, RandomHorizontalFlip, RandomRotate,
6 | RandomTranslate, RandomGammaCorrection, ResizeBatch,
7 | RandomCrop, PadTo, ScaleTo, RandomCropFixedSize, Concat,
8 | RandomScale, Choice, RandomPerspective, RandomShear,
9 | RandomElasticTransform, RandomGaussianBlur
10 | )
11 |
--------------------------------------------------------------------------------
/pysemseg/transforms/convert.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import torch
4 |
5 |
6 | class Grayscale:
7 | def __call__(self, image):
8 | return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
9 |
10 |
11 | class ToCategoryTensor:
12 | def __init__(self, remap=None):
13 | self.remap = remap
14 |
15 | def __call__(self, image):
16 | if self.remap:
17 | for k, v in self.remap.items():
18 | image[image == k] = v
19 | return torch.LongTensor(image)
20 |
21 |
22 | class ToTensor:
23 | def __call__(self, image):
24 | if len(image.shape) == 2:
25 | image = np.expand_dims(image, axis=-1)
26 | image = np.transpose(image, [2, 0, 1])
27 | return torch.FloatTensor(image)
28 |
29 |
30 | class ToFloatImage:
31 | def __call__(self, image):
32 | if image.dtype == np.uint8:
33 | return image / 255.
34 | elif image.dtype == np.uint16:
35 | return image / np.iinfo(np.uint16).max
36 | elif image.dtype == np.float:
37 | return image
38 | raise ValueError("Unsupported image type")
39 |
--------------------------------------------------------------------------------
/pysemseg/transforms/loaders.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import cv2
3 |
4 |
5 | class PILImageLoader():
6 | def __call__(self, path):
7 | with open(path, 'rb') as f:
8 | with Image.open(f) as img:
9 | return img.convert('RGB')
10 |
11 |
12 | class CV2ImageLoader():
13 | def __init__(self, grayscale=False):
14 | self.grayscale = grayscale
15 |
16 | def __call__(self, path):
17 | if self.grayscale:
18 | return cv2.imread(path, cv2.IMREAD_GRAYSCALE)
19 | img = cv2.imread(path)
20 | return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
21 |
--------------------------------------------------------------------------------
/pysemseg/transforms/transforms.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from scipy.ndimage.filters import gaussian_filter
4 |
5 |
6 |
7 | class Compose:
8 | def __init__(self, transforms):
9 | self.transforms = transforms
10 |
11 | def __call__(self, image, *args):
12 | for transform in self.transforms:
13 | if args:
14 | image, *args = transform(image, *args)
15 | else:
16 | image = transform(image)
17 | if args:
18 | return (image,) + tuple(args)
19 | else:
20 | return image
21 |
22 | class Resize:
23 | """
24 | params:
25 | size: (height, width)
26 | returns:
27 | resized image
28 | """
29 | def __init__(self, size, interpolation=cv2.INTER_LANCZOS4):
30 | self.size = size
31 | self.interpolation = interpolation
32 |
33 | def __call__(self, image):
34 | dims = len(image.shape)
35 | image = cv2.resize(
36 | image, self.size[::-1], interpolation=self.interpolation)
37 | if len(image.shape) == dims -1:
38 | image = np.expand_dims(image, -1)
39 | return image
40 |
41 |
42 |
43 | class ResizeBatch:
44 | """
45 | params:
46 | size: (height, width)
47 | returns:
48 | resized image
49 | """
50 | def __init__(self, size, interpolation=cv2.INTER_LANCZOS4):
51 | self.size = size
52 | self.interpolation = interpolation
53 | self.resize_op = Resize(size, interpolation=interpolation)
54 |
55 | def __call__(self, images):
56 | resized_images = []
57 | for i in range(images.shape[0]):
58 | resized_images.append(self.resize_op(images[i]))
59 | return np.stack(resized_images)
60 |
61 |
62 | class Binarize:
63 | """
64 | params:
65 | threshold: binary threshold
66 | returns:
67 | binarized image
68 | """
69 | def __init__(self, threshold):
70 | self.threshold = threshold
71 |
72 | def __call__(self, image):
73 | _, binarized_image = cv2.threshold(
74 | image, self.threshold, 255, cv2.THRESH_BINARY)
75 | return binarized_image
76 |
77 |
78 | class RandomContrast:
79 | """
80 | params:
81 | contrast level:
82 | returns:
83 | image with changed contrast
84 | """
85 | def __init__(self, low, high):
86 | self.low = low
87 | self.high = high
88 |
89 | def __call__(self, image):
90 | assert np.issubdtype(image.dtype, np.floating)
91 | contrast = np.random.uniform(self.low, self.high)
92 | image = image * contrast
93 | return np.clip(image, 0.0, 1.0)
94 |
95 |
96 | class RandomBrightness:
97 | """
98 | params:
99 | brightness level
100 | returns:
101 | image with changed birghtness
102 | """
103 | def __init__(self, low, high):
104 | self.low = low
105 | self.high = high
106 |
107 | def __call__(self, image):
108 | assert np.issubdtype(image.dtype, np.floating)
109 | brightness = np.random.uniform(self.low, self.high)
110 | image = image + brightness
111 | return np.clip(image, 0.0, 1.0)
112 |
113 |
114 | class RandomHueSaturation:
115 | """
116 | params:
117 | saturation adjustment
118 | returns:
119 | saturated image
120 | """
121 | def __init__(self, hue_delta=0.1, saturation_scale_range=(0.7, 1.3)):
122 | self.hue_delta = hue_delta
123 | self.saturation_scale_range = saturation_scale_range
124 |
125 | def _adjust_hue(self, hue, delta):
126 | hue += delta * 360
127 | hue[hue < 0] += 360
128 | hue[hue >= 360] -= 360
129 | return hue
130 |
131 | def __call__(self, image):
132 | assert np.issubdtype(image.dtype, np.floating)
133 | image = image.astype(np.float32)
134 | saturation = np.random.uniform(*self.saturation_scale_range)
135 | hue = np.random.uniform(-self.hue_delta, self.hue_delta)
136 | hsvimage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
137 | (h, s, v) = cv2.split(hsvimage)
138 | s = np.clip(s * saturation, 0.0, 1.0)
139 | h = self._adjust_hue(h, hue)
140 | hsvimage = cv2.merge([h, s, v])
141 | image = cv2.cvtColor(hsvimage, cv2.COLOR_HSV2RGB)
142 | image = np.clip(image, 0.0, 1.0)
143 | return image
144 |
145 |
146 | class RandomGammaCorrection:
147 | def __init__(self, min_gamma=0.75, max_gamma=1.25):
148 | self.min_gamma = min_gamma
149 | self.max_gamma = max_gamma
150 |
151 | def __call__(self, image):
152 | if np.issubdtype(image.dtype, np.floating):
153 | image = (image * 255).astype(np.uint8)
154 |
155 | gamma = np.random.uniform(self.min_gamma, self.max_gamma)
156 | inv_gamma = 1.0 / gamma
157 | table = np.array([((i / 255.0) ** inv_gamma) * 255
158 | for i in np.arange(0, 256)]).astype("uint8")
159 |
160 | image = cv2.LUT(image, table)
161 | image = image / 255.
162 | image = np.clip(image, 0.0, 1.0)
163 | return image
164 |
165 | class RandomHorizontalFlip:
166 | """
167 | params:
168 | Probability to flip the image horizontally
169 | returns:
170 | Maybe flipped image
171 | """
172 | def __init__(self, flip_probability=0.5):
173 | self.flip_probability = flip_probability
174 |
175 | def __call__(self, image, mask):
176 | if np.random.random() < self.flip_probability:
177 | image = cv2.flip(image, 1)
178 | mask = cv2.flip(mask, 1)
179 | return image, mask
180 |
181 |
182 | class RandomRotate:
183 | def __init__(self, max_delta=5.0):
184 | self.max_delta = max_delta
185 |
186 | def __call__(self, image, mask):
187 | angle = np.random.uniform(-self.max_delta, self.max_delta)
188 | image = self._rotate(image, angle, interpolation=cv2.INTER_LINEAR)
189 | mask = self._rotate(mask, angle, interpolation=cv2.INTER_NEAREST)
190 | return image, mask
191 |
192 | def _rotate(self, image, angle, interpolation=cv2.INTER_LANCZOS4):
193 | image_center = (image.shape[1] / 2, image.shape[0] / 2)
194 | rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
195 | result = cv2.warpAffine(
196 | image, rot_mat, (image.shape[1], image.shape[0]), flags=interpolation,
197 | borderMode=cv2.BORDER_REPLICATE)
198 | return result
199 |
200 |
201 | class RandomTranslate:
202 | def __init__(self, max_percent_delta=0.02):
203 | self.max_percent_delta = max_percent_delta
204 |
205 | def __call__(self, image, mask):
206 | delta_x = np.random.uniform(-self.max_percent_delta, self.max_percent_delta)
207 | delta_y = np.random.uniform(-self.max_percent_delta, self.max_percent_delta)
208 | image = self._translate(image, delta_x, delta_y)
209 | mask = self._translate(mask, delta_x, delta_y)
210 | return image, mask
211 |
212 | def _translate(self, image, delta_x, delta_y):
213 | height, width = image.shape[:2]
214 | translation_matrix = np.float32([
215 | [1, 0, int(width * delta_x)],
216 | [0, 1, int(height * delta_y)]
217 | ])
218 | translated_image = cv2.warpAffine(
219 | image, translation_matrix, (width, height),
220 | borderMode=cv2.BORDER_REPLICATE
221 | )
222 | return translated_image
223 |
224 |
225 | class RandomCropFixedSize:
226 | def __init__(self, size):
227 | self.height, self.width = size
228 |
229 | def __call__(self, image, mask):
230 | assert image.shape[:2] == mask.shape[:2]
231 | assert image.shape[0] >= self.height and image.shape[1] >= self.width
232 | scol = np.random.randint(0, image.shape[0] - self.height + 1)
233 | srow = np.random.randint(0, image.shape[1] - self.width + 1)
234 | image_crop = image[scol:scol+self.height, srow:srow+self.width]
235 | mask_crop = mask[scol:scol+self.height, srow:srow+self.width]
236 | return image_crop, mask_crop
237 |
238 |
239 | class RandomCrop:
240 | def __init__(self, scale_height=(0.8, 1.0), scale_width=(0.8, 1.0)):
241 | self.scale_height = scale_height
242 | self.scale_width = scale_width
243 |
244 | def __call__(self, image, mask):
245 | assert image.shape[:2] == mask.shape[:2]
246 | sh = np.random.uniform(*self.scale_height)
247 | sw = np.random.uniform(*self.scale_width)
248 | crop_height = int(sh * image.shape[0])
249 | crop_width = int(sw * image.shape[1])
250 | return RandomCropFixedSize((crop_height, crop_width))(image, mask)
251 |
252 |
253 | class RandomScale:
254 | def __init__(self, scale_height=(0.8, 1.2), scale_width=(0.8, 1.2), aspect_range=[0.8, 1.2]):
255 | self.scale_height = scale_height
256 | self.scale_width = scale_width
257 | self.aspect_range = aspect_range
258 |
259 | def __call__(self, image, mask):
260 | hs = np.random.uniform(*self.scale_height)
261 | height = int(image.shape[0] * hs)
262 | lw = max(hs * self.aspect_range[0], self.scale_width[0])
263 | hw = min(hs * self.aspect_range[1], self.scale_width[1])
264 | ws = np.random.uniform(lw, hw)
265 | width = int(image.shape[1] * ws)
266 | return (
267 | Resize((height, width))(image),
268 | Resize((height, width), cv2.INTER_NEAREST)(mask)
269 | )
270 |
271 |
272 | class ScaleTo:
273 | def __init__(self, size, interpolation=cv2.INTER_LANCZOS4):
274 | self.height, self.width = size
275 | self.interpolation = interpolation
276 |
277 | def __call__(self, image, mask):
278 | scale = min(self.height / image.shape[0], self.width / image.shape[1])
279 | height = int(scale * image.shape[0])
280 | width = int(scale * image.shape[1])
281 | return (
282 | cv2.resize(image, (width, height),
283 | interpolation=self.interpolation),
284 | cv2.resize(mask, (width, height),
285 | interpolation=cv2.INTER_NEAREST)
286 | )
287 |
288 |
289 | class Concat:
290 | def __init__(self, transforms):
291 | self.transforms = transforms
292 |
293 | def __call__(self, *args):
294 | assert len(args) == len(self.transforms)
295 | return [
296 | tr(x) for tr, x in zip(self.transforms, args)
297 | ]
298 |
299 | class PadTo:
300 | def __init__(self, size, pad_value=0):
301 | self.height, self.width = size
302 | self.pad_value = pad_value
303 |
304 | def __call__(self, image):
305 | hpad = max(0, self.height - image.shape[0])
306 | wpad = max(0, self.width - image.shape[1])
307 | top_pad = hpad // 2
308 | bottom_pad = hpad // 2 + hpad % 2
309 | left_pad = wpad // 2
310 | right_pad = wpad // 2 + wpad % 2
311 | image = cv2.copyMakeBorder(
312 | image, top_pad, bottom_pad, left_pad,
313 | right_pad, cv2.BORDER_CONSTANT, value=self.pad_value
314 | )
315 | return image
316 |
317 |
318 | class Choice:
319 | def __init__(self, choices, p=None):
320 | self.choices = choices
321 | self.p = p
322 | if not self.p:
323 | self.p = np.ones(len(self.choices)) / len(self.choices)
324 |
325 | def __call__(self, *args):
326 | c = np.random.choice(self.choices, p=self.p)
327 | return c(*args)
328 |
329 |
330 | class RandomPerspective:
331 | def __init__(self, corner_offset=0.15):
332 | self.corner_offset = corner_offset
333 |
334 | def __call__(self, image, mask):
335 | height, width = image.shape[:2]
336 | lh = int(self.corner_offset * height)
337 | hh = int(height - self.corner_offset * height)
338 | lw = int(self.corner_offset * width)
339 | hw = int(width - self.corner_offset * width)
340 | x1, x3 = np.random.randint(0, lw, [2])
341 | y1, y2 = np.random.randint(0, lh, [2])
342 | x2, x4 = np.random.randint(hw, width, [2])
343 | y3, y4 = np.random.randint(hh, height, [2])
344 | mapped = np.float32([
345 | [x1, y1],
346 | [x2, y2],
347 | [x3, y3],
348 | [x4, y4]
349 | ])
350 | corners = np.float32([
351 | [0, 0],
352 | [width, 0],
353 | [0, height],
354 | [width, height]
355 | ])
356 | M = cv2.getPerspectiveTransform(mapped, corners)
357 | image = cv2.warpPerspective(
358 | image, M, (width, height), borderMode=cv2.BORDER_REFLECT)
359 | mask = cv2.warpPerspective(
360 | mask, M, (width, height), flags=cv2.INTER_NEAREST,
361 | borderMode=cv2.BORDER_REFLECT)
362 |
363 | return image, mask
364 |
365 | class RandomShear:
366 | def __init__(self, max_range=0.01):
367 | self.max_range = max_range
368 |
369 | def __call__(self, image, mask):
370 | height, width = image.shape[:2]
371 | cx = width / 2
372 | cy = height / 2
373 | pts = np.float32([
374 | [cx, cy],
375 | [cx + 0.1 * width, cy],
376 | [cx, cy + 0.1 * height]
377 | ])
378 | ru = lambda x: x * np.random.uniform(
379 | -self.max_range, self.max_range)
380 |
381 | mapped = np.float32([
382 | [pts[0][0] + ru(width), pts[0][1]],
383 | [pts[1][0] + ru(width), pts[1][1] + ru(height)],
384 | [pts[2][0], pts[2][1] + ru(height)]
385 | ])
386 |
387 | M = cv2.getAffineTransform(pts, mapped)
388 |
389 | image = cv2.warpAffine(
390 | image, M, (width, height), borderMode=cv2.BORDER_REFLECT)
391 | mask = cv2.warpAffine(
392 | mask, M, (width, height), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT)
393 |
394 | return image, mask
395 |
396 |
397 | class RandomElasticTransform:
398 | """
399 | Adapted from
400 | https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
401 |
402 | """
403 | def __init__(self, sigma=(0.07, 0.15), alpha=(3, 3.5)):
404 | self.sigma = sigma
405 | self.alpha = alpha
406 |
407 | def __call__(self, image, mask):
408 | height, width = image.shape[:2]
409 | sigma = np.random.uniform(*self.sigma)
410 | x_sigma, y_sigma = sigma * width, sigma * height
411 | alpha = np.random.uniform(*self.alpha)
412 | x_alpha, y_alpha = alpha * width, alpha * height
413 | x_, y_ = np.meshgrid(np.arange(0, width), np.arange(0, height), indexing='xy')
414 | dx = gaussian_filter((np.random.rand(height, width) * 2 - 1), x_sigma) * x_alpha
415 | dy = gaussian_filter((np.random.rand(height, width) * 2 - 1), y_sigma) * y_alpha
416 | x = (x_ + dx).astype(np.float32)
417 | y = (y_ + dy).astype(np.float32)
418 | image = cv2.remap(
419 | image, x, y, interpolation=cv2.INTER_AREA,
420 | borderMode=cv2.BORDER_REFLECT)
421 | mask = cv2.remap(
422 | mask, x, y, interpolation=cv2.INTER_NEAREST,
423 | borderMode=cv2.BORDER_REFLECT)
424 | return image, mask
425 |
426 |
427 | class RandomGaussianBlur:
428 | def __init__(self, kernel_sizes=(3, 5, 7), apply_prob=0.2):
429 | self.kernel_sizes = kernel_sizes
430 | self.apply_prob = apply_prob
431 |
432 | def __call__(self, image):
433 | if np.random.rand() < self.apply_prob:
434 | ksize = np.random.choice(self.kernel_sizes)
435 | image = cv2.GaussianBlur(image, (ksize,) * 2, 0)
436 | return image
437 |
--------------------------------------------------------------------------------
/pysemseg/utils.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import sys
3 | import os
4 | import glob
5 | import re
6 | import numpy as np
7 | import torch
8 | import importlib
9 |
10 | def save(model, optimizer, lr_scheduler, model_dir,
11 | in_channels, n_classes, epoch, train_args):
12 | save_dict = {
13 | 'model': model.state_dict(),
14 | 'model_args': {
15 | 'in_channels': in_channels,
16 | 'n_classes': n_classes,
17 | **train_args.model_args,
18 | },
19 | 'transformer_args': train_args.transformer_args,
20 | 'epoch': epoch,
21 | 'optimizer': optimizer.state_dict(),
22 | 'lr_scheduler': lr_scheduler.state_dict(),
23 | 'train_args': train_args.__dict__
24 | }
25 | torch.save(
26 | save_dict,
27 | os.path.join(model_dir, 'checkpoint-{}'.format(epoch)))
28 |
29 |
30 | def prompt_delete_dir(directory):
31 | if os.path.exists(directory):
32 | answer = input(
33 | "{} exists. Do you want to delete it?[y/n]".format(directory))
34 | if answer == 'y':
35 | shutil.rmtree(directory)
36 | elif answer != 'n':
37 | sys.exit(1)
38 |
39 |
40 | def restore(checkpoint_path, model, optimizer=None, lr_scheduler=None,
41 | restore_cpu=False, strict=True, continue_training=False):
42 | checkpoint = torch.load(
43 | checkpoint_path,
44 | map_location=lambda storage, location: storage if restore_cpu else None)
45 | checkpoint['model'] = {
46 | re.sub('^module[.]', '', key): value
47 | for key, value in checkpoint['model'].items()
48 | }
49 | if not strict:
50 | msd = model.state_dict()
51 | checkpoint['model'] = {
52 | k: v for k, v in checkpoint['model'].items()
53 | if k in msd and msd[k].shape == v.shape
54 | }
55 | print("Loading {} parameters".format(len(checkpoint['model'])))
56 |
57 | model.load_state_dict(checkpoint['model'], strict=strict)
58 | if optimizer is not None and continue_training:
59 | optimizer.load_state_dict(checkpoint['optimizer'])
60 | if (lr_scheduler is not None and 'lr_scheduler' in checkpoint and
61 | continue_training):
62 | lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
63 | return checkpoint['epoch']
64 |
65 |
66 | def get_latest_checkpoint(model_dir):
67 | def key_fn(path):
68 | matches = re.match(r'checkpoint-(\d+)', os.path.basename(path))
69 | return int(matches.groups()[0])
70 | return sorted(
71 | glob.glob(os.path.join(model_dir, 'checkpoint-*')),
72 | key=key_fn
73 | )[-1]
74 |
75 |
76 | def tensor_to_numpy(tensor):
77 | if tensor.is_cuda:
78 | tensor = tensor.cpu()
79 | if isinstance(tensor, torch.autograd.Variable):
80 | tensor = tensor.data
81 | return tensor.numpy()
82 |
83 |
84 | def import_type(name, modules=[]):
85 | type_paths = [name] + [m + '.' + name for m in modules]
86 | for tp in type_paths:
87 | try:
88 | components = tp.split('.')
89 | mod = importlib.import_module(components[0])
90 | for comp in components[1:]:
91 | mod = getattr(mod, comp)
92 | return mod
93 | except:
94 | pass
95 | raise ImportError(name + ' not found')
96 |
97 |
98 | def flatten_dict(dict_obj):
99 | result = {}
100 | for key, value in dict_obj.items():
101 | if isinstance(value, dict):
102 | flattened = flatten_dict(value)
103 | result.update({
104 | "{}/{}".format(key, kk): fv
105 | for kk, fv in flattened.items()})
106 | else:
107 | result[key] = value
108 | return result
109 |
110 |
111 | def _get_palette_map(n_classes):
112 | def bitget(byteval, idx):
113 | return (byteval & (1 << idx)) != 0
114 | color_to_label = {}
115 | for k in range(0, n_classes):
116 | red = green = blue = 0
117 | cls = k
118 | for j in range(8):
119 | red = red | (bitget(cls, 0) << 7 - j)
120 | green = green | (bitget(cls, 1) << 7 - j)
121 | blue = blue | (bitget(cls, 2) << 7 - j)
122 | cls = cls >> 3
123 | color_to_label[(red, green, blue)] = k
124 | return color_to_label
125 |
126 |
127 | class ColorPalette:
128 | def __init__(self, colors):
129 | if isinstance(colors, int):
130 | self.color_to_label = _get_palette_map(colors)
131 | else:
132 | self.color_to_label = {v: k for k, v in enumerate(colors)}
133 | self.label_to_color = {v: k for k, v in self.color_to_label.items()}
134 |
135 | def encode_color(self, label_mask):
136 | """
137 | Encodes a label mask with its RGB color representation
138 | Arguments:
139 | label_mask: A numpy array with dimensions either
140 | (height, width), (height, width, 1), (batch_size, height, width)
141 | or (batch_size, height, width, 1)
142 |
143 | Returns: Color encoded representation
144 | """
145 | input_shape = label_mask.shape
146 | if len(input_shape) == 2:
147 | label_mask = np.reshape(label_mask, [1, *input_shape, 1])
148 | output_shape = input_shape + (3,)
149 | elif len(input_shape) == 3 and input_shape[-1] == 1:
150 | label_mask = np.expand_dims(label_mask, axis=0)
151 | output_shape = input_shape[:2] + (3,)
152 | elif len(input_shape) == 3:
153 | label_mask = np.expand_dims(label_mask, axis=-1)
154 | output_shape = input_shape + (3,)
155 | elif len(input_shape) == 4:
156 | output_shape = input_shape[:3] + (3,)
157 |
158 | palette_mask = np.zeros(label_mask.shape[:3] + (3,), dtype=np.uint8)
159 | for i, mask in self.label_to_color.items():
160 | palette_mask[np.where(np.all(label_mask == i, axis=-1))[:3]] = mask
161 |
162 | palette_mask = palette_mask.reshape(output_shape)
163 |
164 | return palette_mask
165 |
166 |
167 | def decode_color(self, palette_mask):
168 | label_mask = np.zeros(palette_mask.shape[:2], dtype=np.uint8)
169 | for mask, i in self.color_to_label.items():
170 | label_mask[np.where(np.all(palette_mask == mask, axis=-1))[:2]] = i
171 | return label_mask
172 |
173 |
174 | def load_model(
175 | checkpoint_path, model_cls, transformer_cls,
176 | model_args={}, transformer_args={}, device=None):
177 | checkpoint = torch.load(
178 | checkpoint_path,
179 | map_location=lambda storage, location: storage)
180 | model = model_cls(**{**checkpoint.get('model_args', {}), **model_args})
181 | if device is not None:
182 | model = model.to(device)
183 | transformer = transformer_cls(
184 | 'test',
185 | **{**checkpoint.get('transformer_args', {}), **transformer_args}
186 | )
187 | checkpoint['model'] = {
188 | re.sub('^module[.]', '', key): value
189 | for key, value in checkpoint['model'].items()
190 | }
191 | model.load_state_dict(checkpoint['model'], strict=True)
192 | model.eval()
193 | def predict(input_tensor):
194 | with torch.no_grad():
195 | transformed_image = transformer(
196 | input_tensor,
197 | torch.zeros(input_tensor.shape[1:], dtype=torch.long)
198 | )[0].unsqueeze(0)
199 | if device is not None:
200 | transformed_image = transformed_image.to(device)
201 | return model(transformed_image)
202 | return predict
203 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pytorh>=0.4.4
2 | tqdm>=4.19.5
3 | tensorboardX>=1.1
4 | ipython>=6.2.1
5 | opencv-python>=3.4.0.12
6 | Pillow>=5.0.0
7 | numpy>=1.14.0
8 | scikit-image>=0.13.1
9 | visdom>=0.1.8.5
10 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | import io
3 | import re
4 | from setuptools import setup, find_packages
5 |
6 |
7 | def read(*names, **kwargs):
8 | with io.open(os.path.join(os.path.dirname(__file__), *names),
9 | encoding=kwargs.get("encoding", "utf8")) as fp:
10 | return fp.read()
11 |
12 |
13 | readme = read('README.rst')
14 |
15 | VERSION = '0.1.1'
16 |
17 | requirements = [
18 | "torch==1.0.0",
19 | "torchvision==0.2.1",
20 | "tqdm>=4.19.5",
21 | "tensorboardX>=1.1",
22 | "ipython>=6.2.1",
23 | "opencv-python>=3.4.0.12",
24 | "Pillow>=5.0.0",
25 | "numpy>=1.14.0",
26 | "scikit-image>=0.13.1",
27 | "visdom>=0.1.8.5"
28 | "cython==0.29.1",
29 | "pycocotools==2.0.0",
30 | "ConfigArgParse==0.13.0"
31 | ]
32 |
33 | setup(
34 | # Metadata
35 | name='pysemseg',
36 | version=VERSION,
37 | author='Petko Nikolov',
38 | author_email='py.nikolov@gmail.com',
39 | url='https://github.com/petko-nikolov/pysemseg',
40 | description='Pytorch library for training Deep Learning models for Semantic Segmentation',
41 | long_description=readme,
42 | license='BSD',
43 |
44 | # Package info
45 | packages=find_packages(),
46 |
47 | zip_safe=True,
48 | install_requires=requirements,
49 | entry_points={
50 | 'console_scripts': [
51 | 'pysemseg-train=pysemseg.train:main'
52 | ]
53 | }
54 | )
55 |
--------------------------------------------------------------------------------