├── .gitignore
├── LICENSE.txt
├── demo.ipynb
├── docs
├── Makefile
├── cda.rst
├── conf.py
├── docbuild
│ ├── doctrees
│ │ ├── cda.doctree
│ │ ├── environment.pickle
│ │ ├── index.doctree
│ │ ├── modules.doctree
│ │ ├── pycda.doctree
│ │ └── start.doctree
│ └── html
│ │ ├── .buildinfo
│ │ ├── .nojekyll
│ │ ├── _images
│ │ ├── image1.bmp
│ │ └── plot1.png
│ │ ├── _modules
│ │ ├── index.html
│ │ ├── pycda.html
│ │ └── pycda
│ │ │ ├── classifiers.html
│ │ │ ├── detectors.html
│ │ │ ├── error_stats.html
│ │ │ ├── extractors.html
│ │ │ ├── predictions.html
│ │ │ ├── sample_data.html
│ │ │ └── util_functions.html
│ │ ├── _sources
│ │ ├── cda.rst.txt
│ │ ├── index.rst.txt
│ │ ├── modules.rst.txt
│ │ ├── pycda.rst.txt
│ │ └── start.rst.txt
│ │ ├── _static
│ │ ├── ajax-loader.gif
│ │ ├── alabaster.css
│ │ ├── basic.css
│ │ ├── classic.css
│ │ ├── comment-bright.png
│ │ ├── comment-close.png
│ │ ├── comment.png
│ │ ├── custom.css
│ │ ├── doctools.js
│ │ ├── documentation_options.js
│ │ ├── down-pressed.png
│ │ ├── down.png
│ │ ├── file.png
│ │ ├── jquery-3.2.1.js
│ │ ├── jquery.js
│ │ ├── minus.png
│ │ ├── plus.png
│ │ ├── pygments.css
│ │ ├── searchtools.js
│ │ ├── sidebar.js
│ │ ├── underscore-1.3.1.js
│ │ ├── underscore.js
│ │ ├── up-pressed.png
│ │ ├── up.png
│ │ └── websupport.js
│ │ ├── cda.html
│ │ ├── genindex.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ ├── objects.inv
│ │ ├── py-modindex.html
│ │ ├── pycda.html
│ │ ├── search.html
│ │ ├── searchindex.js
│ │ └── start.html
├── image1.bmp
├── images
│ ├── sample1.bmp
│ └── sample1.png
├── index.rst
├── plot1.png
├── pycda.rst
└── start.rst
├── manifest.in
├── pycda
├── __init__.py
├── classifiers.py
├── detectors.py
├── error_stats.py
├── extractors.py
├── models
│ ├── classifier_12x12_2.h5
│ ├── tinynet.h5
│ └── unet.h5
├── predictions.py
├── sample_data.py
├── sample_imgs
│ ├── holdout_tile.pgm
│ ├── holdout_tile_l.png
│ ├── holdout_tile_labels.csv
│ ├── mercury.png
│ ├── rgb_sample.jpg
│ ├── selection0.png
│ ├── selection2.png
│ ├── selection3.png
│ ├── selection4.png
│ └── selection5.png
└── util_functions.py
├── readme.md
├── requirements.txt
├── setup.cfg
├── setup.py
└── test.py
/.gitignore:
--------------------------------------------------------------------------------
1 | pycda.egg-info
2 | pycda/__pycache__
3 | .ipynb_checkpoints
4 | sandbox.ipynb
5 | dist
6 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Michael Klear
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = PyCDA
8 | SOURCEDIR = .
9 | BUILDDIR = docbuild
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/cda.rst:
--------------------------------------------------------------------------------
1 | Crater Detection Pipeline
2 | =================================
3 |
4 | Pipeline and Image Objects
5 | --------------------------
6 |
7 | .. automodule:: pycda
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/stable/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | import os
16 | import sys
17 | sys.path.insert(0, os.path.abspath('../pycda'))
18 | sys.path.insert(0, os.path.abspath('../../'))
19 |
20 |
21 | # -- Project information -----------------------------------------------------
22 |
23 | project = 'PyCDA'
24 | copyright = '2018, Michael Klear'
25 | author = 'Michael Klear'
26 |
27 | # The short X.Y version
28 | version = '0.1'
29 | # The full version, including alpha/beta/rc tags
30 | release = '0.1.14'
31 |
32 |
33 | # -- General configuration ---------------------------------------------------
34 |
35 | # If your documentation needs a minimal Sphinx version, state it here.
36 | #
37 | # needs_sphinx = '1.0'
38 |
39 | # Add any Sphinx extension module names here, as strings. They can be
40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
41 | # ones.
42 | extensions = [
43 | 'sphinx.ext.autodoc',
44 | 'sphinx.ext.doctest',
45 | 'sphinx.ext.coverage',
46 | 'sphinx.ext.viewcode',
47 | 'sphinx.ext.githubpages',
48 | ]
49 |
50 | # Add any paths that contain templates here, relative to this directory.
51 | templates_path = ['doctemplates']
52 |
53 | # The suffix(es) of source filenames.
54 | # You can specify multiple suffix as a list of string:
55 | #
56 | # source_suffix = ['.rst', '.md']
57 | source_suffix = '.rst'
58 |
59 | # The master toctree document.
60 | master_doc = 'index'
61 |
62 | # The language for content autogenerated by Sphinx. Refer to documentation
63 | # for a list of supported languages.
64 | #
65 | # This is also used if you do content translation via gettext catalogs.
66 | # Usually you set "language" from the command line for these cases.
67 | language = None
68 |
69 | # List of patterns, relative to source directory, that match files and
70 | # directories to ignore when looking for source files.
71 | # This pattern also affects html_static_path and html_extra_path .
72 | exclude_patterns = ['docbuild', 'Thumbs.db', '.DS_Store']
73 |
74 | # The name of the Pygments (syntax highlighting) style to use.
75 | pygments_style = 'sphinx'
76 |
77 |
78 | # -- Options for HTML output -------------------------------------------------
79 |
80 | # The theme to use for HTML and HTML Help pages. See the documentation for
81 | # a list of builtin themes.
82 | #
83 | html_theme = 'classic'
84 |
85 | # Theme options are theme-specific and customize the look and feel of a theme
86 | # further. For a list of options available for each theme, see the
87 | # documentation.
88 | #
89 | # html_theme_options = {}
90 |
91 | # Add any paths that contain custom static files (such as style sheets) here,
92 | # relative to this directory. They are copied after the builtin static files,
93 | # so a file named "default.css" will overwrite the builtin "default.css".
94 | html_static_path = ['docstatic']
95 |
96 | # Custom sidebar templates, must be a dictionary that maps document names
97 | # to template names.
98 | #
99 | # The default sidebars (for documents that don't match any pattern) are
100 | # defined by theme itself. Builtin themes are using these templates by
101 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
102 | # 'searchbox.html']``.
103 | #
104 | # html_sidebars = {}
105 |
106 |
107 | # -- Options for HTMLHelp output ---------------------------------------------
108 |
109 | # Output file base name for HTML help builder.
110 | htmlhelp_basename = 'PyCDAdoc'
111 |
112 |
113 | # -- Options for LaTeX output ------------------------------------------------
114 |
115 | latex_elements = {
116 | # The paper size ('letterpaper' or 'a4paper').
117 | #
118 | # 'papersize': 'letterpaper',
119 |
120 | # The font size ('10pt', '11pt' or '12pt').
121 | #
122 | # 'pointsize': '10pt',
123 |
124 | # Additional stuff for the LaTeX preamble.
125 | #
126 | # 'preamble': '',
127 |
128 | # Latex figure (float) alignment
129 | #
130 | # 'figure_align': 'htbp',
131 | }
132 |
133 | # Grouping the document tree into LaTeX files. List of tuples
134 | # (source start file, target name, title,
135 | # author, documentclass [howto, manual, or own class]).
136 | latex_documents = [
137 | (master_doc, 'PyCDA.tex', 'PyCDA Documentation',
138 | 'Michael Klear', 'manual'),
139 | ]
140 |
141 |
142 | # -- Options for manual page output ------------------------------------------
143 |
144 | # One entry per manual page. List of tuples
145 | # (source start file, name, description, authors, manual section).
146 | man_pages = [
147 | (master_doc, 'pycda', 'PyCDA Documentation',
148 | [author], 1)
149 | ]
150 |
151 |
152 | # -- Options for Texinfo output ----------------------------------------------
153 |
154 | # Grouping the document tree into Texinfo files. List of tuples
155 | # (source start file, target name, title, author,
156 | # dir menu entry, description, category)
157 | texinfo_documents = [
158 | (master_doc, 'PyCDA', 'PyCDA Documentation',
159 | author, 'PyCDA', 'One line description of project.',
160 | 'Miscellaneous'),
161 | ]
162 |
163 |
164 | # -- Options for Epub output -------------------------------------------------
165 |
166 | # Bibliographic Dublin Core info.
167 | epub_title = project
168 | epub_author = author
169 | epub_publisher = author
170 | epub_copyright = copyright
171 |
172 | # The unique identifier of the text. This can be a ISBN number
173 | # or the project homepage.
174 | #
175 | # epub_identifier = ''
176 |
177 | # A unique identification for the text.
178 | #
179 | # epub_uid = ''
180 |
181 | # A list of files that should not be packed into the epub file.
182 | epub_exclude_files = ['search.html']
183 |
184 |
185 | # -- Extension configuration -------------------------------------------------
186 |
--------------------------------------------------------------------------------
/docs/docbuild/doctrees/cda.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/doctrees/cda.doctree
--------------------------------------------------------------------------------
/docs/docbuild/doctrees/environment.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/doctrees/environment.pickle
--------------------------------------------------------------------------------
/docs/docbuild/doctrees/index.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/doctrees/index.doctree
--------------------------------------------------------------------------------
/docs/docbuild/doctrees/modules.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/doctrees/modules.doctree
--------------------------------------------------------------------------------
/docs/docbuild/doctrees/pycda.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/doctrees/pycda.doctree
--------------------------------------------------------------------------------
/docs/docbuild/doctrees/start.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/doctrees/start.doctree
--------------------------------------------------------------------------------
/docs/docbuild/html/.buildinfo:
--------------------------------------------------------------------------------
1 | # Sphinx build info version 1
2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3 | config: d13ce8c784297a73757e2e29abf2d4a3
4 | tags: 645f666f9bcd5a90fca523b33c5a78b7
5 |
--------------------------------------------------------------------------------
/docs/docbuild/html/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/.nojekyll
--------------------------------------------------------------------------------
/docs/docbuild/html/_images/image1.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_images/image1.bmp
--------------------------------------------------------------------------------
/docs/docbuild/html/_images/plot1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_images/plot1.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_modules/index.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | Overview: module code — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
All modules for which code is available
38 |
47 |
48 |
49 |
50 |
51 |
67 |
68 |
69 |
81 |
85 |
86 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_modules/pycda/sample_data.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | pycda.sample_data — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
Source code for pycda.sample_data
40 | from pycda import load_image
41 | import pandas as pd
42 | import pkg_resources
43 | import random
44 | import os
45 |
46 | [docs] def get_sample_image ( filename = 'holdout_tile.pgm' , choose = False ):
47 |
"""Retrieves sample data from the in-package directory.
48 |
if choose=True, randomly selects sample photo from package.
49 |
"""
50 |
path = pkg_resources . resource_filename ( 'pycda' , 'sample_imgs/' )
51 |
if choose :
52 |
choices = os . listdir ( path )
53 |
filename = random . choice ( choices )
54 |
while filename [ - 4 :] == '.csv' :
55 |
filename = random . choice ( choices )
56 |
file = path + filename
57 |
img = load_image ( file )
58 | return img
59 |
60 | [docs] def get_sample_csv ( filename = 'holdout_tile_labels.csv' ):
61 |
"""Retrieves hand-labeled crater annotations for image
62 |
holdout_tile.pgm (default returned by get_sample_image()).
63 |
Returns pandas dataframe.
64 |
"""
65 |
path = pkg_resources . resource_filename ( 'pycda' , 'sample_imgs/ {} ' . format ( filename ))
66 |
df = pd . read_csv ( path )
67 | return df
68 |
69 |
70 |
71 |
72 |
73 |
89 |
90 |
91 |
105 |
109 |
110 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_sources/cda.rst.txt:
--------------------------------------------------------------------------------
1 | Crater Detection Pipeline
2 | =================================
3 |
4 | Pipeline and Image Objects
5 | --------------------------
6 |
7 | .. automodule:: pycda
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_sources/index.rst.txt:
--------------------------------------------------------------------------------
1 | .. PyCDA documentation master file, created by
2 | sphinx-quickstart on Tue Mar 20 17:26:56 2018.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | PyCDA Documentation: Simple Crater Detection
7 | ============================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | start.rst
13 | cda.rst
14 | pycda.rst
15 |
16 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_sources/modules.rst.txt:
--------------------------------------------------------------------------------
1 | pycda
2 | =====
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | pycda
8 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_sources/pycda.rst.txt:
--------------------------------------------------------------------------------
1 | PyCDA Submodules
2 | ================
3 |
4 | Detectors
5 | ----------------------
6 |
7 | .. automodule:: pycda.detectors
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 | Extractors
13 | -----------------------
14 |
15 | .. automodule:: pycda.extractors
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
20 | Classifiers
21 | ------------------------
22 |
23 | .. automodule:: pycda.classifiers
24 | :members:
25 | :undoc-members:
26 | :show-inheritance:
27 |
28 |
29 | Predictions
30 | ------------------------
31 |
32 | .. automodule:: pycda.predictions
33 | :members:
34 | :undoc-members:
35 | :show-inheritance:
36 |
37 | Error Stats
38 | -------------------------
39 |
40 | .. automodule:: pycda.error_stats
41 | :members:
42 | :undoc-members:
43 | :show-inheritance:
44 |
45 |
46 | Sample Data
47 | -------------------------
48 |
49 | .. automodule:: pycda.sample_data
50 | :members:
51 | :undoc-members:
52 | :show-inheritance:
53 |
54 |
55 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_sources/start.rst.txt:
--------------------------------------------------------------------------------
1 | Getting Started
2 | =================================
3 |
4 | Installation
5 | ------------
6 |
7 | The latest version of PyCDA is available for download via pip. As things are improving rapidly, you'll want the latest version.
8 |
9 | PyCDA currently supports Python 3.6 only; we recommend using a virtual environment (such as `conda `_ or `virtualenv `_) to keep your dependencies straight. As PyCDA has not been tested for dependency version ranges, pip will want to upgrade packages in the environment with which you install PyCDA; this can cause dependency issues in other places.
10 |
11 | From your Python 3.6 environment, run:
12 |
13 | ``pip install pycda``
14 |
15 | pip will install the dependencies for you. You've installed PyCDA!
16 |
17 | Make Detections
18 | ---------------
19 |
20 | To use PyCDA, you'll need to chop up your image data into reasonably-sized pieces; 2,000 x 2,000 pixels per segment is very reasonable, and better machines can handle much bigger. It really depends on the size of your RAM.
21 |
22 | Put your image segments into a directory and denote its path. For this example, we'll save this 1592 x 1128 pixel image (taken from the Mars Express HRSC instrument):
23 |
24 | .. image:: image1.bmp
25 |
26 | to the path:
27 |
28 | ``/data/image1.bmp``
29 |
30 | Now we're ready to begin. Open your python environment::
31 |
32 | -> % python3
33 | Python 3.6.4 |Anaconda, Inc.| (default, Jan 16 2018, 18:10:19)
34 | [GCC 7.2.0] on linux
35 | Type "help", "copyright", "credits" or "license" for more information.
36 | >>>from pycda import CDA, load_image
37 | >>>cda = CDA()
38 | >>>image = load_image('/data/image1.bmp')
39 | >>>prediction = cda.get_prediction(image)
40 |
41 | If you'd like to see the progress of your detection, pass the verbose=True keyword argument to the .get_prediction call, like::
42 |
43 | >>>prediction = cda.get_prediction(image, verbose=True)
44 |
45 | The CDA object will return a prediction object which here is assigned to the alias "prediction." You can now save your results with::
46 |
47 | >>>prediction.to_csv('/data/results1.csv')
48 |
49 | To see your detections plotted over the input image, call::
50 |
51 | >>>prediction.show()
52 |
53 | This should open a popup window on your system with the plot. For our Mars image, it will look like this:
54 |
55 | .. image:: plot1.png
56 |
57 | You'll see the detector performs but isn't perfect; the large crater in the lower left corner is conspicuous, but the model is designed to detect craters with 80 pixels of diameter or less; to capture larger craters, reduce the image resolution.
58 |
59 | Set the image resolution and save the image for later reference::
60 |
61 | >>>prediction.set_scale(12.5)
62 | >>>prediction.show(save_plot='/data/myplot1.png')
63 |
64 | And you've begun. Happy crater hunting!
65 |
66 | Read about the submodules to learn how to modify your CDA pipeline and quantify detection errors; for a more in-depth example, look at the `demo notebook `_
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/ajax-loader.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/ajax-loader.gif
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/classic.css:
--------------------------------------------------------------------------------
1 | /*
2 | * classic.css_t
3 | * ~~~~~~~~~~~~~
4 | *
5 | * Sphinx stylesheet -- classic theme.
6 | *
7 | * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | @import url("basic.css");
13 |
14 | /* -- page layout ----------------------------------------------------------- */
15 |
16 | body {
17 | font-family: sans-serif;
18 | font-size: 100%;
19 | background-color: #11303d;
20 | color: #000;
21 | margin: 0;
22 | padding: 0;
23 | }
24 |
25 | div.document {
26 | background-color: #1c4e63;
27 | }
28 |
29 | div.documentwrapper {
30 | float: left;
31 | width: 100%;
32 | }
33 |
34 | div.bodywrapper {
35 | margin: 0 0 0 230px;
36 | }
37 |
38 | div.body {
39 | background-color: #ffffff;
40 | color: #000000;
41 | padding: 0 20px 30px 20px;
42 | }
43 |
44 | div.footer {
45 | color: #ffffff;
46 | width: 100%;
47 | padding: 9px 0 9px 0;
48 | text-align: center;
49 | font-size: 75%;
50 | }
51 |
52 | div.footer a {
53 | color: #ffffff;
54 | text-decoration: underline;
55 | }
56 |
57 | div.related {
58 | background-color: #133f52;
59 | line-height: 30px;
60 | color: #ffffff;
61 | }
62 |
63 | div.related a {
64 | color: #ffffff;
65 | }
66 |
67 | div.sphinxsidebar {
68 | }
69 |
70 | div.sphinxsidebar h3 {
71 | font-family: 'Trebuchet MS', sans-serif;
72 | color: #ffffff;
73 | font-size: 1.4em;
74 | font-weight: normal;
75 | margin: 0;
76 | padding: 0;
77 | }
78 |
79 | div.sphinxsidebar h3 a {
80 | color: #ffffff;
81 | }
82 |
83 | div.sphinxsidebar h4 {
84 | font-family: 'Trebuchet MS', sans-serif;
85 | color: #ffffff;
86 | font-size: 1.3em;
87 | font-weight: normal;
88 | margin: 5px 0 0 0;
89 | padding: 0;
90 | }
91 |
92 | div.sphinxsidebar p {
93 | color: #ffffff;
94 | }
95 |
96 | div.sphinxsidebar p.topless {
97 | margin: 5px 10px 10px 10px;
98 | }
99 |
100 | div.sphinxsidebar ul {
101 | margin: 10px;
102 | padding: 0;
103 | color: #ffffff;
104 | }
105 |
106 | div.sphinxsidebar a {
107 | color: #98dbcc;
108 | }
109 |
110 | div.sphinxsidebar input {
111 | border: 1px solid #98dbcc;
112 | font-family: sans-serif;
113 | font-size: 1em;
114 | }
115 |
116 |
117 |
118 | /* -- hyperlink styles ------------------------------------------------------ */
119 |
120 | a {
121 | color: #355f7c;
122 | text-decoration: none;
123 | }
124 |
125 | a:visited {
126 | color: #355f7c;
127 | text-decoration: none;
128 | }
129 |
130 | a:hover {
131 | text-decoration: underline;
132 | }
133 |
134 |
135 |
136 | /* -- body styles ----------------------------------------------------------- */
137 |
138 | div.body h1,
139 | div.body h2,
140 | div.body h3,
141 | div.body h4,
142 | div.body h5,
143 | div.body h6 {
144 | font-family: 'Trebuchet MS', sans-serif;
145 | background-color: #f2f2f2;
146 | font-weight: normal;
147 | color: #20435c;
148 | border-bottom: 1px solid #ccc;
149 | margin: 20px -20px 10px -20px;
150 | padding: 3px 0 3px 10px;
151 | }
152 |
153 | div.body h1 { margin-top: 0; font-size: 200%; }
154 | div.body h2 { font-size: 160%; }
155 | div.body h3 { font-size: 140%; }
156 | div.body h4 { font-size: 120%; }
157 | div.body h5 { font-size: 110%; }
158 | div.body h6 { font-size: 100%; }
159 |
160 | a.headerlink {
161 | color: #c60f0f;
162 | font-size: 0.8em;
163 | padding: 0 4px 0 4px;
164 | text-decoration: none;
165 | }
166 |
167 | a.headerlink:hover {
168 | background-color: #c60f0f;
169 | color: white;
170 | }
171 |
172 | div.body p, div.body dd, div.body li, div.body blockquote {
173 | text-align: justify;
174 | line-height: 130%;
175 | }
176 |
177 | div.admonition p.admonition-title + p {
178 | display: inline;
179 | }
180 |
181 | div.admonition p {
182 | margin-bottom: 5px;
183 | }
184 |
185 | div.admonition pre {
186 | margin-bottom: 5px;
187 | }
188 |
189 | div.admonition ul, div.admonition ol {
190 | margin-bottom: 5px;
191 | }
192 |
193 | div.note {
194 | background-color: #eee;
195 | border: 1px solid #ccc;
196 | }
197 |
198 | div.seealso {
199 | background-color: #ffc;
200 | border: 1px solid #ff6;
201 | }
202 |
203 | div.topic {
204 | background-color: #eee;
205 | }
206 |
207 | div.warning {
208 | background-color: #ffe4e4;
209 | border: 1px solid #f66;
210 | }
211 |
212 | p.admonition-title {
213 | display: inline;
214 | }
215 |
216 | p.admonition-title:after {
217 | content: ":";
218 | }
219 |
220 | pre {
221 | padding: 5px;
222 | background-color: #eeffcc;
223 | color: #333333;
224 | line-height: 120%;
225 | border: 1px solid #ac9;
226 | border-left: none;
227 | border-right: none;
228 | }
229 |
230 | code {
231 | background-color: #ecf0f3;
232 | padding: 0 1px 0 1px;
233 | font-size: 0.95em;
234 | }
235 |
236 | th {
237 | background-color: #ede;
238 | }
239 |
240 | .warning code {
241 | background: #efc2c2;
242 | }
243 |
244 | .note code {
245 | background: #d6d6d6;
246 | }
247 |
248 | .viewcode-back {
249 | font-family: sans-serif;
250 | }
251 |
252 | div.viewcode-block:target {
253 | background-color: #f4debf;
254 | border-top: 1px solid #ac9;
255 | border-bottom: 1px solid #ac9;
256 | }
257 |
258 | div.code-block-caption {
259 | color: #efefef;
260 | background-color: #1c4e63;
261 | }
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/comment-bright.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/comment-bright.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/comment-close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/comment-close.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/comment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/comment.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/custom.css:
--------------------------------------------------------------------------------
1 | /* This file intentionally left blank. */
2 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/doctools.js:
--------------------------------------------------------------------------------
1 | /*
2 | * doctools.js
3 | * ~~~~~~~~~~~
4 | *
5 | * Sphinx JavaScript utilities for all documentation.
6 | *
7 | * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | /**
13 | * select a different prefix for underscore
14 | */
15 | $u = _.noConflict();
16 |
17 | /**
18 | * make the code below compatible with browsers without
19 | * an installed firebug like debugger
20 | if (!window.console || !console.firebug) {
21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
23 | "profile", "profileEnd"];
24 | window.console = {};
25 | for (var i = 0; i < names.length; ++i)
26 | window.console[names[i]] = function() {};
27 | }
28 | */
29 |
30 | /**
31 | * small helper function to urldecode strings
32 | */
33 | jQuery.urldecode = function(x) {
34 | return decodeURIComponent(x).replace(/\+/g, ' ');
35 | };
36 |
37 | /**
38 | * small helper function to urlencode strings
39 | */
40 | jQuery.urlencode = encodeURIComponent;
41 |
42 | /**
43 | * This function returns the parsed url parameters of the
44 | * current request. Multiple values per key are supported,
45 | * it will always return arrays of strings for the value parts.
46 | */
47 | jQuery.getQueryParameters = function(s) {
48 | if (typeof s === 'undefined')
49 | s = document.location.search;
50 | var parts = s.substr(s.indexOf('?') + 1).split('&');
51 | var result = {};
52 | for (var i = 0; i < parts.length; i++) {
53 | var tmp = parts[i].split('=', 2);
54 | var key = jQuery.urldecode(tmp[0]);
55 | var value = jQuery.urldecode(tmp[1]);
56 | if (key in result)
57 | result[key].push(value);
58 | else
59 | result[key] = [value];
60 | }
61 | return result;
62 | };
63 |
64 | /**
65 | * highlight a given string on a jquery object by wrapping it in
66 | * span elements with the given class name.
67 | */
68 | jQuery.fn.highlightText = function(text, className) {
69 | function highlight(node, addItems) {
70 | if (node.nodeType === 3) {
71 | var val = node.nodeValue;
72 | var pos = val.toLowerCase().indexOf(text);
73 | if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
74 | var span;
75 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
76 | if (isInSVG) {
77 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
78 | } else {
79 | span = document.createElement("span");
80 | span.className = className;
81 | }
82 | span.appendChild(document.createTextNode(val.substr(pos, text.length)));
83 | node.parentNode.insertBefore(span, node.parentNode.insertBefore(
84 | document.createTextNode(val.substr(pos + text.length)),
85 | node.nextSibling));
86 | node.nodeValue = val.substr(0, pos);
87 | if (isInSVG) {
88 | var bbox = span.getBBox();
89 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
90 | rect.x.baseVal.value = bbox.x;
91 | rect.y.baseVal.value = bbox.y;
92 | rect.width.baseVal.value = bbox.width;
93 | rect.height.baseVal.value = bbox.height;
94 | rect.setAttribute('class', className);
95 | var parentOfText = node.parentNode.parentNode;
96 | addItems.push({
97 | "parent": node.parentNode,
98 | "target": rect});
99 | }
100 | }
101 | }
102 | else if (!jQuery(node).is("button, select, textarea")) {
103 | jQuery.each(node.childNodes, function() {
104 | highlight(this, addItems);
105 | });
106 | }
107 | }
108 | var addItems = [];
109 | var result = this.each(function() {
110 | highlight(this, addItems);
111 | });
112 | for (var i = 0; i < addItems.length; ++i) {
113 | jQuery(addItems[i].parent).before(addItems[i].target);
114 | }
115 | return result;
116 | };
117 |
118 | /*
119 | * backward compatibility for jQuery.browser
120 | * This will be supported until firefox bug is fixed.
121 | */
122 | if (!jQuery.browser) {
123 | jQuery.uaMatch = function(ua) {
124 | ua = ua.toLowerCase();
125 |
126 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
127 | /(webkit)[ \/]([\w.]+)/.exec(ua) ||
128 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
129 | /(msie) ([\w.]+)/.exec(ua) ||
130 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
131 | [];
132 |
133 | return {
134 | browser: match[ 1 ] || "",
135 | version: match[ 2 ] || "0"
136 | };
137 | };
138 | jQuery.browser = {};
139 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
140 | }
141 |
142 | /**
143 | * Small JavaScript module for the documentation.
144 | */
145 | var Documentation = {
146 |
147 | init : function() {
148 | this.fixFirefoxAnchorBug();
149 | this.highlightSearchWords();
150 | this.initIndexTable();
151 |
152 | },
153 |
154 | /**
155 | * i18n support
156 | */
157 | TRANSLATIONS : {},
158 | PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
159 | LOCALE : 'unknown',
160 |
161 | // gettext and ngettext don't access this so that the functions
162 | // can safely bound to a different name (_ = Documentation.gettext)
163 | gettext : function(string) {
164 | var translated = Documentation.TRANSLATIONS[string];
165 | if (typeof translated === 'undefined')
166 | return string;
167 | return (typeof translated === 'string') ? translated : translated[0];
168 | },
169 |
170 | ngettext : function(singular, plural, n) {
171 | var translated = Documentation.TRANSLATIONS[singular];
172 | if (typeof translated === 'undefined')
173 | return (n == 1) ? singular : plural;
174 | return translated[Documentation.PLURALEXPR(n)];
175 | },
176 |
177 | addTranslations : function(catalog) {
178 | for (var key in catalog.messages)
179 | this.TRANSLATIONS[key] = catalog.messages[key];
180 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
181 | this.LOCALE = catalog.locale;
182 | },
183 |
184 | /**
185 | * add context elements like header anchor links
186 | */
187 | addContextElements : function() {
188 | $('div[id] > :header:first').each(function() {
189 | $('').
190 | attr('href', '#' + this.id).
191 | attr('title', _('Permalink to this headline')).
192 | appendTo(this);
193 | });
194 | $('dt[id]').each(function() {
195 | $('').
196 | attr('href', '#' + this.id).
197 | attr('title', _('Permalink to this definition')).
198 | appendTo(this);
199 | });
200 | },
201 |
202 | /**
203 | * workaround a firefox stupidity
204 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
205 | */
206 | fixFirefoxAnchorBug : function() {
207 | if (document.location.hash && $.browser.mozilla)
208 | window.setTimeout(function() {
209 | document.location.href += '';
210 | }, 10);
211 | },
212 |
213 | /**
214 | * highlight the search words provided in the url in the text
215 | */
216 | highlightSearchWords : function() {
217 | var params = $.getQueryParameters();
218 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
219 | if (terms.length) {
220 | var body = $('div.body');
221 | if (!body.length) {
222 | body = $('body');
223 | }
224 | window.setTimeout(function() {
225 | $.each(terms, function() {
226 | body.highlightText(this.toLowerCase(), 'highlighted');
227 | });
228 | }, 10);
229 | $('' + _('Hide Search Matches') + '
')
231 | .appendTo($('#searchbox'));
232 | }
233 | },
234 |
235 | /**
236 | * init the domain index toggle buttons
237 | */
238 | initIndexTable : function() {
239 | var togglers = $('img.toggler').click(function() {
240 | var src = $(this).attr('src');
241 | var idnum = $(this).attr('id').substr(7);
242 | $('tr.cg-' + idnum).toggle();
243 | if (src.substr(-9) === 'minus.png')
244 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
245 | else
246 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
247 | }).css('display', '');
248 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
249 | togglers.click();
250 | }
251 | },
252 |
253 | /**
254 | * helper function to hide the search marks again
255 | */
256 | hideSearchWords : function() {
257 | $('#searchbox .highlight-link').fadeOut(300);
258 | $('span.highlighted').removeClass('highlighted');
259 | },
260 |
261 | /**
262 | * make the url absolute
263 | */
264 | makeURL : function(relativeURL) {
265 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
266 | },
267 |
268 | /**
269 | * get the current relative url
270 | */
271 | getCurrentURL : function() {
272 | var path = document.location.pathname;
273 | var parts = path.split(/\//);
274 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
275 | if (this === '..')
276 | parts.pop();
277 | });
278 | var url = parts.join('/');
279 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
280 | },
281 |
282 | initOnKeyListeners: function() {
283 | $(document).keyup(function(event) {
284 | var activeElementType = document.activeElement.tagName;
285 | // don't navigate when in search box or textarea
286 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
287 | switch (event.keyCode) {
288 | case 37: // left
289 | var prevHref = $('link[rel="prev"]').prop('href');
290 | if (prevHref) {
291 | window.location.href = prevHref;
292 | return false;
293 | }
294 | case 39: // right
295 | var nextHref = $('link[rel="next"]').prop('href');
296 | if (nextHref) {
297 | window.location.href = nextHref;
298 | return false;
299 | }
300 | }
301 | }
302 | });
303 | }
304 | };
305 |
306 | // quick alias for translations
307 | _ = Documentation.gettext;
308 |
309 | $(document).ready(function() {
310 | Documentation.init();
311 | });
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/documentation_options.js:
--------------------------------------------------------------------------------
1 | var DOCUMENTATION_OPTIONS = {
2 | URL_ROOT: '',
3 | VERSION: '0.1.14',
4 | LANGUAGE: 'None',
5 | COLLAPSE_INDEX: false,
6 | FILE_SUFFIX: '.html',
7 | HAS_SOURCE: true,
8 | SOURCELINK_SUFFIX: '.txt'
9 | };
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/down-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/down-pressed.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/down.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/down.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/file.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/minus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/minus.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/plus.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/pygments.css:
--------------------------------------------------------------------------------
1 | .highlight .hll { background-color: #ffffcc }
2 | .highlight { background: #eeffcc; }
3 | .highlight .c { color: #408090; font-style: italic } /* Comment */
4 | .highlight .err { border: 1px solid #FF0000 } /* Error */
5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */
6 | .highlight .o { color: #666666 } /* Operator */
7 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */
8 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
9 | .highlight .cp { color: #007020 } /* Comment.Preproc */
10 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */
11 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
12 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */
14 | .highlight .ge { font-style: italic } /* Generic.Emph */
15 | .highlight .gr { color: #FF0000 } /* Generic.Error */
16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */
18 | .highlight .go { color: #333333 } /* Generic.Output */
19 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
20 | .highlight .gs { font-weight: bold } /* Generic.Strong */
21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */
23 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
24 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
25 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
26 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */
27 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
28 | .highlight .kt { color: #902000 } /* Keyword.Type */
29 | .highlight .m { color: #208050 } /* Literal.Number */
30 | .highlight .s { color: #4070a0 } /* Literal.String */
31 | .highlight .na { color: #4070a0 } /* Name.Attribute */
32 | .highlight .nb { color: #007020 } /* Name.Builtin */
33 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
34 | .highlight .no { color: #60add5 } /* Name.Constant */
35 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
36 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
37 | .highlight .ne { color: #007020 } /* Name.Exception */
38 | .highlight .nf { color: #06287e } /* Name.Function */
39 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
40 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
41 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
42 | .highlight .nv { color: #bb60d5 } /* Name.Variable */
43 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */
45 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */
46 | .highlight .mf { color: #208050 } /* Literal.Number.Float */
47 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */
48 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */
49 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */
50 | .highlight .sa { color: #4070a0 } /* Literal.String.Affix */
51 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
52 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */
53 | .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */
54 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
55 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */
56 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
57 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
58 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
59 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */
60 | .highlight .sr { color: #235388 } /* Literal.String.Regex */
61 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */
62 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */
63 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
64 | .highlight .fm { color: #06287e } /* Name.Function.Magic */
65 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
66 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
67 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
68 | .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
69 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/sidebar.js:
--------------------------------------------------------------------------------
1 | /*
2 | * sidebar.js
3 | * ~~~~~~~~~~
4 | *
5 | * This script makes the Sphinx sidebar collapsible.
6 | *
7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds
8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton
9 | * used to collapse and expand the sidebar.
10 | *
11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden
12 | * and the width of the sidebar and the margin-left of the document
13 | * are decreased. When the sidebar is expanded the opposite happens.
14 | * This script saves a per-browser/per-session cookie used to
15 | * remember the position of the sidebar among the pages.
16 | * Once the browser is closed the cookie is deleted and the position
17 | * reset to the default (expanded).
18 | *
19 | * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
20 | * :license: BSD, see LICENSE for details.
21 | *
22 | */
23 |
24 | $(function() {
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 | // global elements used by the functions.
34 | // the 'sidebarbutton' element is defined as global after its
35 | // creation, in the add_sidebar_button function
36 | var bodywrapper = $('.bodywrapper');
37 | var sidebar = $('.sphinxsidebar');
38 | var sidebarwrapper = $('.sphinxsidebarwrapper');
39 |
40 | // for some reason, the document has no sidebar; do not run into errors
41 | if (!sidebar.length) return;
42 |
43 | // original margin-left of the bodywrapper and width of the sidebar
44 | // with the sidebar expanded
45 | var bw_margin_expanded = bodywrapper.css('margin-left');
46 | var ssb_width_expanded = sidebar.width();
47 |
48 | // margin-left of the bodywrapper and width of the sidebar
49 | // with the sidebar collapsed
50 | var bw_margin_collapsed = '.8em';
51 | var ssb_width_collapsed = '.8em';
52 |
53 | // colors used by the current theme
54 | var dark_color = $('.related').css('background-color');
55 | var light_color = $('.document').css('background-color');
56 |
57 | function sidebar_is_collapsed() {
58 | return sidebarwrapper.is(':not(:visible)');
59 | }
60 |
61 | function toggle_sidebar() {
62 | if (sidebar_is_collapsed())
63 | expand_sidebar();
64 | else
65 | collapse_sidebar();
66 | }
67 |
68 | function collapse_sidebar() {
69 | sidebarwrapper.hide();
70 | sidebar.css('width', ssb_width_collapsed);
71 | bodywrapper.css('margin-left', bw_margin_collapsed);
72 | sidebarbutton.css({
73 | 'margin-left': '0',
74 | 'height': bodywrapper.height()
75 | });
76 | sidebarbutton.find('span').text('»');
77 | sidebarbutton.attr('title', _('Expand sidebar'));
78 | document.cookie = 'sidebar=collapsed';
79 | }
80 |
81 | function expand_sidebar() {
82 | bodywrapper.css('margin-left', bw_margin_expanded);
83 | sidebar.css('width', ssb_width_expanded);
84 | sidebarwrapper.show();
85 | sidebarbutton.css({
86 | 'margin-left': ssb_width_expanded-12,
87 | 'height': bodywrapper.height()
88 | });
89 | sidebarbutton.find('span').text('«');
90 | sidebarbutton.attr('title', _('Collapse sidebar'));
91 | document.cookie = 'sidebar=expanded';
92 | }
93 |
94 | function add_sidebar_button() {
95 | sidebarwrapper.css({
96 | 'float': 'left',
97 | 'margin-right': '0',
98 | 'width': ssb_width_expanded - 28
99 | });
100 | // create the button
101 | sidebar.append(
102 | ''
103 | );
104 | var sidebarbutton = $('#sidebarbutton');
105 | light_color = sidebarbutton.css('background-color');
106 | // find the height of the viewport to center the '<<' in the page
107 | var viewport_height;
108 | if (window.innerHeight)
109 | viewport_height = window.innerHeight;
110 | else
111 | viewport_height = $(window).height();
112 | sidebarbutton.find('span').css({
113 | 'display': 'block',
114 | 'margin-top': (viewport_height - sidebar.position().top - 20) / 2
115 | });
116 |
117 | sidebarbutton.click(toggle_sidebar);
118 | sidebarbutton.attr('title', _('Collapse sidebar'));
119 | sidebarbutton.css({
120 | 'color': '#FFFFFF',
121 | 'border-left': '1px solid ' + dark_color,
122 | 'font-size': '1.2em',
123 | 'cursor': 'pointer',
124 | 'height': bodywrapper.height(),
125 | 'padding-top': '1px',
126 | 'margin-left': ssb_width_expanded - 12
127 | });
128 |
129 | sidebarbutton.hover(
130 | function () {
131 | $(this).css('background-color', dark_color);
132 | },
133 | function () {
134 | $(this).css('background-color', light_color);
135 | }
136 | );
137 | }
138 |
139 | function set_position_from_cookie() {
140 | if (!document.cookie)
141 | return;
142 | var items = document.cookie.split(';');
143 | for(var k=0; k2;a==
12 | null&&(a=[]);if(y&&a.reduce===y)return e&&(c=b.bind(c,e)),f?a.reduce(c,d):a.reduce(c);j(a,function(a,b,i){f?d=c.call(e,d,a,b,i):(d=a,f=true)});if(!f)throw new TypeError("Reduce of empty array with no initial value");return d};b.reduceRight=b.foldr=function(a,c,d,e){var f=arguments.length>2;a==null&&(a=[]);if(z&&a.reduceRight===z)return e&&(c=b.bind(c,e)),f?a.reduceRight(c,d):a.reduceRight(c);var g=b.toArray(a).reverse();e&&!f&&(c=b.bind(c,e));return f?b.reduce(g,c,d,e):b.reduce(g,c)};b.find=b.detect=
13 | function(a,c,b){var e;E(a,function(a,g,h){if(c.call(b,a,g,h))return e=a,true});return e};b.filter=b.select=function(a,c,b){var e=[];if(a==null)return e;if(A&&a.filter===A)return a.filter(c,b);j(a,function(a,g,h){c.call(b,a,g,h)&&(e[e.length]=a)});return e};b.reject=function(a,c,b){var e=[];if(a==null)return e;j(a,function(a,g,h){c.call(b,a,g,h)||(e[e.length]=a)});return e};b.every=b.all=function(a,c,b){var e=true;if(a==null)return e;if(B&&a.every===B)return a.every(c,b);j(a,function(a,g,h){if(!(e=
14 | e&&c.call(b,a,g,h)))return n});return e};var E=b.some=b.any=function(a,c,d){c||(c=b.identity);var e=false;if(a==null)return e;if(C&&a.some===C)return a.some(c,d);j(a,function(a,b,h){if(e||(e=c.call(d,a,b,h)))return n});return!!e};b.include=b.contains=function(a,c){var b=false;if(a==null)return b;return p&&a.indexOf===p?a.indexOf(c)!=-1:b=E(a,function(a){return a===c})};b.invoke=function(a,c){var d=i.call(arguments,2);return b.map(a,function(a){return(b.isFunction(c)?c||a:a[c]).apply(a,d)})};b.pluck=
15 | function(a,c){return b.map(a,function(a){return a[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);if(!c&&b.isEmpty(a))return-Infinity;var e={computed:-Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b>=e.computed&&(e={value:a,computed:b})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);if(!c&&b.isEmpty(a))return Infinity;var e={computed:Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;bd?1:0}),"value")};b.groupBy=function(a,c){var d={},e=b.isFunction(c)?c:function(a){return a[c]};j(a,function(a,b){var c=e(a,b);(d[c]||(d[c]=[])).push(a)});return d};b.sortedIndex=function(a,
17 | c,d){d||(d=b.identity);for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.difference=function(a){var c=b.flatten(i.call(arguments,1));return b.filter(a,function(a){return!b.include(c,a)})};b.zip=function(){for(var a=i.call(arguments),c=b.max(b.pluck(a,"length")),d=Array(c),e=0;e=0;d--)b=[a[d].apply(this,b)];return b[0]}};
24 | b.after=function(a,b){return a<=0?b():function(){if(--a<1)return b.apply(this,arguments)}};b.keys=J||function(a){if(a!==Object(a))throw new TypeError("Invalid object");var c=[],d;for(d in a)b.has(a,d)&&(c[c.length]=d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=b.methods=function(a){var c=[],d;for(d in a)b.isFunction(a[d])&&c.push(d);return c.sort()};b.extend=function(a){j(i.call(arguments,1),function(b){for(var d in b)a[d]=b[d]});return a};b.defaults=function(a){j(i.call(arguments,
25 | 1),function(b){for(var d in b)a[d]==null&&(a[d]=b[d])});return a};b.clone=function(a){return!b.isObject(a)?a:b.isArray(a)?a.slice():b.extend({},a)};b.tap=function(a,b){b(a);return a};b.isEqual=function(a,b){return q(a,b,[])};b.isEmpty=function(a){if(b.isArray(a)||b.isString(a))return a.length===0;for(var c in a)if(b.has(a,c))return false;return true};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=o||function(a){return l.call(a)=="[object Array]"};b.isObject=function(a){return a===Object(a)};
26 | b.isArguments=function(a){return l.call(a)=="[object Arguments]"};if(!b.isArguments(arguments))b.isArguments=function(a){return!(!a||!b.has(a,"callee"))};b.isFunction=function(a){return l.call(a)=="[object Function]"};b.isString=function(a){return l.call(a)=="[object String]"};b.isNumber=function(a){return l.call(a)=="[object Number]"};b.isNaN=function(a){return a!==a};b.isBoolean=function(a){return a===true||a===false||l.call(a)=="[object Boolean]"};b.isDate=function(a){return l.call(a)=="[object Date]"};
27 | b.isRegExp=function(a){return l.call(a)=="[object RegExp]"};b.isNull=function(a){return a===null};b.isUndefined=function(a){return a===void 0};b.has=function(a,b){return I.call(a,b)};b.noConflict=function(){r._=G;return this};b.identity=function(a){return a};b.times=function(a,b,d){for(var e=0;e /g,">").replace(/"/g,""").replace(/'/g,"'").replace(/\//g,"/")};b.mixin=function(a){j(b.functions(a),
28 | function(c){K(c,b[c]=a[c])})};var L=0;b.uniqueId=function(a){var b=L++;return a?a+b:b};b.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var t=/.^/,u=function(a){return a.replace(/\\\\/g,"\\").replace(/\\'/g,"'")};b.template=function(a,c){var d=b.templateSettings,d="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+a.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(d.escape||t,function(a,b){return"',_.escape("+
29 | u(b)+"),'"}).replace(d.interpolate||t,function(a,b){return"',"+u(b)+",'"}).replace(d.evaluate||t,function(a,b){return"');"+u(b).replace(/[\r\n\t]/g," ")+";__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",e=new Function("obj","_",d);return c?e(c,b):function(a){return e.call(this,a,b)}};b.chain=function(a){return b(a).chain()};var m=function(a){this._wrapped=a};b.prototype=m.prototype;var v=function(a,c){return c?b(a).chain():a},K=function(a,c){m.prototype[a]=
30 | function(){var a=i.call(arguments);H.call(a,this._wrapped);return v(c.apply(b,a),this._chain)}};b.mixin(b);j("pop,push,reverse,shift,sort,splice,unshift".split(","),function(a){var b=k[a];m.prototype[a]=function(){var d=this._wrapped;b.apply(d,arguments);var e=d.length;(a=="shift"||a=="splice")&&e===0&&delete d[0];return v(d,this._chain)}});j(["concat","join","slice"],function(a){var b=k[a];m.prototype[a]=function(){return v(b.apply(this._wrapped,arguments),this._chain)}});m.prototype.chain=function(){this._chain=
31 | true;return this};m.prototype.value=function(){return this._wrapped}}).call(this);
32 |
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/up-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/up-pressed.png
--------------------------------------------------------------------------------
/docs/docbuild/html/_static/up.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/_static/up.png
--------------------------------------------------------------------------------
/docs/docbuild/html/cda.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | Crater Detection Pipeline — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
Crater Detection Pipeline
47 |
48 |
Pipeline and Image Objects
49 |
50 |
51 | class pycda.
CDA
( detector='tiny' , extractor='fast_circle' , classifier='convolution' ) [source]
52 | Bases: object
53 | CDA is the crater detection model pipeline. Its three
54 | main components are the detector, the extractor, and
55 | the classifier; with no keywords, will initialize with
56 | some basic models with general applicability and fast
57 | performance.
58 |
59 | Attributes:
60 |
61 | detector (str, detector object): The detector model used by
62 | pipeline. Accepted text arguments are:
63 | ‘tiny’ : default, fast, performant detector
64 | ‘unet’ : similar model to tiny, more robust, slower
65 | ‘dummy’: does not make detections, used for testing.
66 | extractor (str, extractor object): The extractor model used by
67 | pipeline. Accepted text arguments are:
68 | ‘fast_circle’ : default, converts detections into circles
69 | ‘watershed’ : uses watershed segmentation to generate proposals
70 | ‘dummy’ : does not make extractions, used for testing
71 | classifier (str, classifier object): The classifier model used
72 | by pipeline. Accepted string arguments are:
73 | ‘convolution’ : default, uses a convolutional neural net model
74 | ‘none’ : use no classifier (None type also accepted)
75 | ‘dummy’ : assigns random likelihoods, used for testing.
76 |
77 |
78 |
79 |
80 |
81 | get_prediction
( input_image , verbose=False ) [source]
82 | Performs a detection on input_image. Returns a
83 | prediction object.
84 |
85 |
86 |
87 |
88 | predict
( input_image , threshold=0.5 , verbose=False ) [source]
89 | Performs a detection on input_image. Returns a pandas
90 | dataframe with detections.
91 |
92 |
93 |
94 |
95 |
96 |
97 | class pycda.
CDAImage
( image ) [source]
98 | Bases: object
99 | CDA image object; image stored as array; .show()
100 | method allows for easy viewing.
101 |
102 |
103 | as_array
( ) [source]
104 | Returns the image as a numpy array.
105 |
106 |
107 |
108 |
109 | show
( show_ticks=False ) [source]
110 | Displays the input image by plotting raster
111 |
112 |
113 |
114 |
115 |
116 |
117 | pycda.
load_image
( filename ) [source]
118 | load an image from the input filename path.
119 | returns a CDAImage object.
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
166 |
167 |
168 |
186 |
190 |
191 |
--------------------------------------------------------------------------------
/docs/docbuild/html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | PyCDA Documentation: Simple Crater Detection — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
PyCDA Documentation: Simple Crater Detection
43 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
97 |
98 |
99 |
114 |
118 |
119 |
--------------------------------------------------------------------------------
/docs/docbuild/html/modules.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | pycda — PyCDA 0.1.13 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
31 |
32 |
86 |
98 |
102 |
103 |
--------------------------------------------------------------------------------
/docs/docbuild/html/objects.inv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/docbuild/html/objects.inv
--------------------------------------------------------------------------------
/docs/docbuild/html/py-modindex.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | Python Module Index — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
Python Module Index
42 |
43 |
46 |
47 |
88 |
89 |
90 |
91 |
92 |
93 |
109 |
110 |
111 |
123 |
127 |
128 |
--------------------------------------------------------------------------------
/docs/docbuild/html/search.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | Search — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
22 |
23 |
24 |
25 |
26 |
27 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
Search
46 |
47 |
48 |
49 | Please activate JavaScript to enable the search
50 | functionality.
51 |
52 |
53 |
54 | From here you can search these documents. Enter your search
55 | words into the box below and click "search". Note that the search
56 | function will automatically search for all of the words. Pages
57 | containing fewer words won't appear in the result list.
58 |
59 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
76 |
77 |
78 |
90 |
94 |
95 |
--------------------------------------------------------------------------------
/docs/docbuild/html/searchindex.js:
--------------------------------------------------------------------------------
1 | Search.setIndex({docnames:["cda","index","pycda","start"],envversion:53,filenames:["cda.rst","index.rst","pycda.rst","start.rst"],objects:{"":{pycda:[0,0,0,"-"]},"pycda.CDA":{get_prediction:[0,2,1,""],predict:[0,2,1,""]},"pycda.CDAImage":{as_array:[0,2,1,""],show:[0,2,1,""]},"pycda.classifiers":{ClassifierBaseClass:[2,1,1,""],ConvolutionalClassifier:[2,1,1,""],CustomClassifier:[2,1,1,""],NullClassifier:[2,1,1,""],get:[2,3,1,""]},"pycda.classifiers.ClassifierBaseClass":{predict:[2,2,1,""]},"pycda.classifiers.ConvolutionalClassifier":{predict:[2,2,1,""]},"pycda.classifiers.CustomClassifier":{predict:[2,2,1,""]},"pycda.classifiers.NullClassifier":{predict:[2,2,1,""]},"pycda.detectors":{CustomDetector:[2,1,1,""],DetectorBaseClass:[2,1,1,""],TinyDetector:[2,1,1,""],UnetDetector:[2,1,1,""],get:[2,3,1,""]},"pycda.detectors.CustomDetector":{predict:[2,2,1,""]},"pycda.detectors.DetectorBaseClass":{predict:[2,2,1,""]},"pycda.detectors.TinyDetector":{predict:[2,2,1,""]},"pycda.detectors.UnetDetector":{predict:[2,2,1,""]},"pycda.error_stats":{ErrorAnalyzer:[2,1,1,""]},"pycda.error_stats.ErrorAnalyzer":{analyze:[2,2,1,""],plot_densities:[2,2,1,""],print_report:[2,2,1,""],return_results:[2,2,1,""],return_stats:[2,2,1,""],show:[2,2,1,""]},"pycda.extractors":{ExtractorBaseClass:[2,1,1,""],FastCircles:[2,1,1,""],WatershedCircles:[2,1,1,""],get:[2,3,1,""]},"pycda.predictions":{Prediction:[2,1,1,""]},"pycda.predictions.Prediction":{get_proposals:[2,2,1,""],set_scale:[2,2,1,""],show:[2,2,1,""],show_detection:[2,2,1,""],to_csv:[2,2,1,""]},"pycda.sample_data":{get_sample_csv:[2,3,1,""],get_sample_image:[2,3,1,""]},pycda:{CDA:[0,1,1,""],CDAImage:[0,1,1,""],classifiers:[2,0,0,"-"],detectors:[2,0,0,"-"],error_stats:[2,0,0,"-"],extractors:[2,0,0,"-"],load_image:[0,3,1,""],predictions:[2,0,0,"-"],sample_data:[2,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},terms:{"12x12":2,"boolean":2,"class":[0,2],"default":[0,2,3],"export":2,"float":2,"function":2,"import":3,"int":2,"long":2,"return":[0,2,3],"true":[2,3],And:3,For:[2,3],Its:[0,2],The:[0,2,3],Use:2,Used:[],__init__:2,about:3,accept:[0,2],access:[],actual:2,add:[],added:2,addit:2,advanc:[],after:2,alia:3,all:2,alliedtoast:[],allow:[0,2],along:[],alongsid:2,also:0,anaconda:3,analysi:2,analyz:2,ani:[],annot:2,anoth:2,applic:0,appropr:2,appropri:2,argument:[0,2,3],arrai:[0,2],as_arrai:0,assembl:[],assign:[0,3],assum:2,attribut:[0,2],automat:2,auxiliari:2,avail:3,ax_obj:[],backend:2,background:2,bar:[],base:[0,2],basic:0,basin:2,batch:2,batch_siz:2,been:[2,3],begin:3,begun:3,belong:[],better:3,between:2,bigger:3,binari:2,blob:[],bmp:3,bound:2,boundari:2,box:2,build:2,built:[],calcul:[],call:[2,3],can:[2,3],candid:2,captur:3,caus:3,cda:[0,2,3],cdaimag:0,center:2,centroid:2,channel:2,child:2,choos:2,chop:3,circl:[0,2],classifi:[0,1],classifierbaseclass:2,color:2,column:2,com:[],compon:0,comput:2,compute_result:[],conda:3,consol:[],conspicu:3,contain:[],content:[],convert:[0,2],convolut:[0,2],convolutionalclassifi:2,copyright:3,corner:3,count:2,crater:[2,3],crater_pixel:2,crater_s:2,creat:2,credit:3,crop:2,crop_arrai:[],crop_dim:[],csv:[2,3],current:[2,3],custom:2,customclassifi:2,customdetector:2,data:[1,3],datafram:[0,2],defin:[],demo:3,denot:3,densiti:2,depend:3,depth:3,descript:[],design:3,desir:[],detect:2,detection_map:[],detector:[0,1,3],detectorbaseclass:2,determin:[],diamet:[2,3],dimens:2,directori:[2,3],displai:[0,2],distanc:2,distinguish:[],doe:0,done:[],download:3,downsiz:2,dummi:0,dummyclassifi:[],dummydetector:[],dummyextractor:[],each:[],easi:0,element:[],environ:3,error:[1,3],error_stat:2,erroranalyz:2,errorstat:[],everi:2,exampl:3,except:2,expect:[],express:3,extract:0,extractor:[0,1],extractorbaseclass:2,fals:[0,2],fast:[0,2],fast_circl:0,fastcircl:2,featur:2,figur:[],file:2,filenam:[0,2],filepath:2,fill:2,first:2,format:[],from:[0,2,3],gcc:3,gener:[0,2],get:[1,2],get_crater_pixel:[],get_crater_propos:[],get_crop_spec:[],get_label:[],get_label_map:[],get_pixel_object:[],get_predict:[0,3],get_propos:2,get_sample_csv:2,get_sample_imag:2,get_step:[],github:[],given:2,grayscal:2,group:2,halt:[],hand:2,handl:[2,3],handler:2,happi:3,has:3,have:2,height:2,help:[2,3],here:3,higher:[],histogram:2,holdout_til:2,holdout_tile_label:2,how:3,hrsc:3,http:[],hunt:3,hyperparamet:2,id_no:2,identifi:2,idx:[],imag:[1,2,3],image1:3,impliment:2,improv:3,in_out_map:2,inc:3,include_tick:2,index:2,inform:[2,3],initi:[0,2],input:[0,2,3],input_arrai:[],input_dim:2,input_dimens:[],input_imag:0,inspect:2,instal:1,instrument:3,intend:2,ipynb:[],isn:3,issu:3,its:3,jan:3,keep:3,kera:2,keyword:[0,3],known:2,known_crat:2,label:2,label_matrix:[],larg:3,larger:3,last:2,lat:2,later:3,latest:3,learn:3,left:3,length:2,less:3,licens:3,like:3,likelihood:[0,2],linux:3,list:2,load:[0,2],load_imag:[0,3],local:2,locat:2,look:3,lower:3,machin:3,mai:[],main:0,make:[0,1,2],make_batch:[],manag:[],map:2,mar:3,master:[],match:[],matplotlib:[],matrix:[],maxima:2,mean:2,measur:2,meet:2,merg:[],meter:2,method:[0,2],min:2,model:[0,2,3],model_path:2,modern:[],modifi:[2,3],modul:[],more:[0,3],most:[],much:3,myplot1:3,n_channel:2,necessari:[],need:3,neg:2,net:[0,2],neural:0,nice:[],non:[],none:0,notebook:3,now:3,npx:[],nullclassifi:2,number:2,numpi:0,object:[1,2,3],occupi:2,onc:[],one:2,onli:3,open:3,orgn:[],other:[2,3],our:3,out:[],out_dim:[],output:2,output_dim:2,output_dimens:[],outsid:[],over:3,overlaid:2,packag:[2,3],pad:[],page:[],panda:[0,2],part:2,pass:[2,3],path:[0,2,3],per:[2,3],perfect:3,perform:[0,2,3],pgm:2,photo:2,piec:3,pil:[],pip:3,pipelin:[1,2,3],pixel:[2,3],place:3,plot:[0,2,3],plot_dens:2,png:3,point:2,popul:2,popup:3,posit:2,predict:[0,1,3],print:2,print_report:2,process:[],progress:[2,3],propos:[0,2],provid:2,purpos:[],put:3,pycda:[0,3],python3:3,python:3,quantifi:3,rais:2,ram:3,random:[0,2],randomli:2,rang:3,rapidli:3,raster:0,read:3,readi:3,realli:3,reason:3,rec_batch_s:2,recommend:[2,3],reduc:3,refer:3,remov:[],remove_tick:2,repres:2,requir:[],resolut:3,resolve_color_channel:[],result:[2,3],results1:3,retriev:2,return_result:2,return_stat:2,robust:0,rredict:[],rst:[],run:3,sampl:1,sample_data:2,sampledata:[],save:[2,3],save_plot:[2,3],scale:2,score:2,search:[],see:3,segment:[0,2,3],select:2,self:2,sensit:2,seri:[],set:[2,3],set_scal:[2,3],shape:2,should:[2,3],show:[0,2,3],show_detect:2,show_tick:0,similar:0,singl:2,size:[2,3],slower:0,some:[0,2],sourc:[0,2],special:2,specif:2,specifi:2,speed:2,squar:[],start:1,stat:1,statist:2,step:[],store:0,str:0,straight:3,string:0,submodul:[1,3],support:[2,3],surfac:2,system:3,take:2,taken:3,tensorflow:2,test:[0,3],text:0,thi:[2,3],thing:3,three:0,threshold:[0,2],through:[],throughout:[],tick:[],tile:[],tini:[0,2],tinydetector:2,to_csv:[2,3],total:[],track:2,transform:2,tupl:2,type:[0,3],under:2,unet:0,unetdetector:2,uniqu:[],updat:[],update_progress:[],upgrad:3,use:[0,2,3],used:[0,2],user:2,uses:[0,2],usind:2,using:[2,3],util:[],util_funct:[],utilityfunct:[],valu:2,variou:[],verbos:[0,2,3],veri:3,version:[2,3],via:3,view:0,virtual:3,virtualenv:3,visual:[],want:[2,3],water:2,watersh:[0,2],watershedcircl:2,when:2,where:2,which:3,whose:2,width:2,window:3,wise:2,xlength:[],ylength:[],you:[2,3],your:[2,3],zero:2},titles:["Crater Detection Pipeline","PyCDA Documentation: Simple Crater Detection","PyCDA Submodules","Getting Started"],titleterms:{"function":[],The:[],classifi:2,content:[],crater:[0,1],data:2,detect:[0,1,3],detector:2,document:1,error:2,error_stat:[],errorstat:[],extractor:2,get:3,imag:0,indic:[],instal:3,make:3,modul:[],object:0,packag:[],pipelin:0,predict:2,pycda:[1,2],rredict:[],sampl:2,sample_data:[],sampledata:[],simpl:1,start:3,stat:2,submodul:2,tabl:[],util:[],util_funct:[],utilityfunct:[],welcom:[]}})
--------------------------------------------------------------------------------
/docs/docbuild/html/start.html:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 | Getting Started — PyCDA 0.1.14 documentation
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
Getting Started
47 |
48 |
Installation
49 |
The latest version of PyCDA is available for download via pip. As things are improving rapidly, you’ll want the latest version.
50 |
PyCDA currently supports Python 3.6 only; we recommend using a virtual environment (such as conda or virtualenv ) to keep your dependencies straight. As PyCDA has not been tested for dependency version ranges, pip will want to upgrade packages in the environment with which you install PyCDA; this can cause dependency issues in other places.
51 |
From your Python 3.6 environment, run:
52 |
pip install pycda
53 |
pip will install the dependencies for you. You’ve installed PyCDA!
54 |
55 |
56 |
Make Detections
57 |
To use PyCDA, you’ll need to chop up your image data into reasonably-sized pieces; 2,000 x 2,000 pixels per segment is very reasonable, and better machines can handle much bigger. It really depends on the size of your RAM.
58 |
Put your image segments into a directory and denote its path. For this example, we’ll save this 1592 x 1128 pixel image (taken from the Mars Express HRSC instrument):
59 |
60 |
to the path:
61 |
/data/image1.bmp
62 |
Now we’re ready to begin. Open your python environment:
63 |
-> % python3
64 | Python 3.6 . 4 | Anaconda , Inc .| ( default , Jan 16 2018 , 18 : 10 : 19 )
65 | [ GCC 7.2 . 0 ] on linux
66 | Type "help" , "copyright" , "credits" or "license" for more information .
67 | >>> from pycda import CDA , load_image
68 | >>> cda = CDA ()
69 | >>> image = load_image ( '/data/image1.bmp' )
70 | >>> prediction = cda . get_prediction ( image )
71 |
72 |
73 |
If you’d like to see the progress of your detection, pass the verbose=True keyword argument to the .get_prediction call, like:
74 |
>>>prediction = cda.get_prediction(image, verbose=True)
75 |
76 |
77 |
The CDA object will return a prediction object which here is assigned to the alias “prediction.” You can now save your results with:
78 |
>>>prediction.to_csv('/data/results1.csv')
79 |
80 |
81 |
To see your detections plotted over the input image, call:
82 |
>>>prediction.show()
83 |
84 |
85 |
This should open a popup window on your system with the plot. For our Mars image, it will look like this:
86 |
87 |
You’ll see the detector performs but isn’t perfect; the large crater in the lower left corner is conspicuous, but the model is designed to detect craters with 80 pixels of diameter or less; to capture larger craters, reduce the image resolution.
88 |
Set the image resolution and save the image for later reference:
89 |
>>>prediction.set_scale(12.5)
90 | >>>prediction.show(save_plot='/data/myplot1.png')
91 |
92 |
93 |
And you’ve begun. Happy crater hunting!
94 |
Read about the submodules to learn how to modify your CDA pipeline and quantify detection errors; for a more in-depth example, look at the demo notebook
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
140 |
141 |
142 |
160 |
164 |
165 |
--------------------------------------------------------------------------------
/docs/image1.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/image1.bmp
--------------------------------------------------------------------------------
/docs/images/sample1.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/images/sample1.bmp
--------------------------------------------------------------------------------
/docs/images/sample1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/images/sample1.png
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. PyCDA documentation master file, created by
2 | sphinx-quickstart on Tue Mar 20 17:26:56 2018.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | PyCDA Documentation: Simple Crater Detection
7 | ============================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | start.rst
13 | cda.rst
14 | pycda.rst
15 |
16 |
--------------------------------------------------------------------------------
/docs/plot1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/docs/plot1.png
--------------------------------------------------------------------------------
/docs/pycda.rst:
--------------------------------------------------------------------------------
1 | PyCDA Submodules
2 | ================
3 |
4 | Detectors
5 | ----------------------
6 |
7 | .. automodule:: pycda.detectors
8 | :members:
9 | :undoc-members:
10 | :show-inheritance:
11 |
12 | Extractors
13 | -----------------------
14 |
15 | .. automodule:: pycda.extractors
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
20 | Classifiers
21 | ------------------------
22 |
23 | .. automodule:: pycda.classifiers
24 | :members:
25 | :undoc-members:
26 | :show-inheritance:
27 |
28 |
29 | Predictions
30 | ------------------------
31 |
32 | .. automodule:: pycda.predictions
33 | :members:
34 | :undoc-members:
35 | :show-inheritance:
36 |
37 | Error Stats
38 | -------------------------
39 |
40 | .. automodule:: pycda.error_stats
41 | :members:
42 | :undoc-members:
43 | :show-inheritance:
44 |
45 |
46 | Sample Data
47 | -------------------------
48 |
49 | .. automodule:: pycda.sample_data
50 | :members:
51 | :undoc-members:
52 | :show-inheritance:
53 |
54 |
55 |
--------------------------------------------------------------------------------
/docs/start.rst:
--------------------------------------------------------------------------------
1 | Getting Started
2 | =================================
3 |
4 | Installation
5 | ------------
6 |
7 | The latest version of PyCDA is available for download via pip. As things are improving rapidly, you'll want the latest version.
8 |
9 | PyCDA currently supports Python 3.6 only; we recommend using a virtual environment (such as `conda `_ or `virtualenv `_) to keep your dependencies straight. As PyCDA has not been tested for dependency version ranges, pip will want to upgrade packages in the environment with which you install PyCDA; this can cause dependency issues in other places.
10 |
11 | From your Python 3.6 environment, run:
12 |
13 | ``pip install pycda``
14 |
15 | pip will install the dependencies for you. You've installed PyCDA!
16 |
17 | Make Detections
18 | ---------------
19 |
20 | To use PyCDA, you'll need to chop up your image data into reasonably-sized pieces; 2,000 x 2,000 pixels per segment is very reasonable, and better machines can handle much bigger. It really depends on the size of your RAM.
21 |
22 | Put your image segments into a directory and denote its path. For this example, we'll save this 1592 x 1128 pixel image (taken from the Mars Express HRSC instrument):
23 |
24 | .. image:: image1.bmp
25 |
26 | to the path:
27 |
28 | ``/data/image1.bmp``
29 |
30 | Now we're ready to begin. Open your python environment::
31 |
32 | -> % python3
33 | Python 3.6.4 |Anaconda, Inc.| (default, Jan 16 2018, 18:10:19)
34 | [GCC 7.2.0] on linux
35 | Type "help", "copyright", "credits" or "license" for more information.
36 | >>>from pycda import CDA, load_image
37 | >>>cda = CDA()
38 | >>>image = load_image('/data/image1.bmp')
39 | >>>prediction = cda.get_prediction(image)
40 |
41 | If you'd like to see the progress of your detection, pass the verbose=True keyword argument to the .get_prediction call, like::
42 |
43 | >>>prediction = cda.get_prediction(image, verbose=True)
44 |
45 | The CDA object will return a prediction object which here is assigned to the alias "prediction." You can now save your results with::
46 |
47 | >>>prediction.to_csv('/data/results1.csv')
48 |
49 | To see your detections plotted over the input image, call::
50 |
51 | >>>prediction.show()
52 |
53 | This should open a popup window on your system with the plot. For our Mars image, it will look like this:
54 |
55 | .. image:: plot1.png
56 |
57 | You'll see the detector performs but isn't perfect; the large crater in the lower left corner is conspicuous, but the model is designed to detect craters with 80 pixels of diameter or less; to capture larger craters, reduce the image resolution.
58 |
59 | Set the image resolution and save the image for later reference::
60 |
61 | >>>prediction.set_scale(12.5)
62 | >>>prediction.show(save_plot='/data/myplot1.png')
63 |
64 | And you've begun. Happy crater hunting!
65 |
66 | Read about the submodules to learn how to modify your CDA pipeline and quantify detection errors; for a more in-depth example, look at the `demo notebook `_
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/manifest.in:
--------------------------------------------------------------------------------
1 | include pycda/models/*.h5
2 | include pycda/sample_imgs/*.csv
3 | include pycda/sample_imgs/*.pgm
4 | include pycda/sample_imgs/*.jpg
5 | include pycda/sample_imgs/*.png
6 | include pycda/sample_imgs/*.bmp
7 |
--------------------------------------------------------------------------------
/pycda/__init__.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from skimage import io
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 | import gc
7 | from pycda import detectors
8 | from pycda import extractors
9 | from pycda import classifiers
10 | from pycda import predictions
11 | from pycda import util_functions
12 | from pycda.util_functions import update_progress, resolve_color_channels, get_steps, get_crop_specs, crop_array, remove_ticks, make_batch
13 |
14 | class CDA(object):
15 | """CDA is the crater detection model pipeline. Its three
16 | main components are the detector, the extractor, and
17 | the classifier; with no keywords, will initialize with
18 | some basic models with general applicability and fast
19 | performance.
20 |
21 | Attributes:
22 | detector (str, detector object): The detector model used by
23 | pipeline. Accepted text arguments are:
24 | 'tiny' : default, fast, performant detector
25 | 'unet' : similar model to tiny, more robust, slower
26 | 'dummy': does not make detections, used for testing.
27 |
28 | extractor (str, extractor object): The extractor model used by
29 | pipeline. Accepted text arguments are:
30 | 'fast_circle' : default, converts detections into circles
31 | 'watershed' : uses watershed segmentation to generate proposals
32 | 'dummy' : does not make extractions, used for testing
33 |
34 | classifier (str, classifier object): The classifier model used
35 | by pipeline. Accepted string arguments are:
36 | 'convolution' : default, uses a convolutional neural net model
37 | 'none' : use no classifier (None type also accepted)
38 | 'dummy' : assigns random likelihoods, used for testing.
39 |
40 | """
41 | def __init__(self, detector='tiny', extractor='fast_circle', classifier='convolution'):
42 | """detector, extractor, and classifier keywords can accept initialized
43 | models or certain strings. See CDA class docstring
44 | """
45 | #initialize the models.
46 | self.detector = detectors.get(detector)
47 | if isinstance(extractor, list):
48 | self.extractor = [extractors.get(ex) for ex in extractor]
49 | else:
50 | self.extractor = [extractors.get(extractor)]
51 | self.classifier = classifiers.get(classifier)
52 | #track previous predictions.
53 | self.predictions = []
54 |
55 | def _get_prediction(self, input_image):
56 | """Checks to see if prediction has been made on the same image.
57 | If not found, creates a new prediction object.
58 | """
59 | for prediction in self.predictions:
60 | if np.array_equal(prediction.input_image, input_image):
61 | return prediction
62 | else:
63 | pass
64 | new_prediction = predictions.Prediction(input_image, len(self.predictions), self)
65 | self.predictions.append(new_prediction)
66 | return new_prediction
67 |
68 | def _prepare_detector(self, prediction):
69 | """Gets prediction object ready for use by
70 | the detector by populating coordinate lists.
71 | """
72 | if prediction.verbose:
73 | print('Preparing detection steps...')
74 |
75 | #Calculate latitude steps
76 | height = prediction.input_image.shape[0]
77 | yin = self.detector.input_dims[0]
78 | yout = self.detector.output_dims[0]
79 | y_steps_in, y_steps_out = get_steps(height, yin, yout)
80 |
81 | #Calculate longitude steps
82 | width = prediction.input_image.shape[1]
83 | xin = self.detector.input_dims[1]
84 | xout = self.detector.output_dims[1]
85 | x_steps_in, x_steps_out = get_steps(width, xin, xout)
86 |
87 | #iterate through every step and record in prediction object
88 | for ystep in zip(y_steps_in, y_steps_out):
89 | for xstep in zip(x_steps_in, x_steps_out):
90 | #Record ordered positional steps for input (lat, long)
91 | prediction.image_split_coords.append((ystep[0], xstep[0]))
92 | #and for output (lat, long)
93 | prediction.det_split_coords.append((ystep[1], xstep[1]))
94 |
95 | #set all predictions status to False
96 | prediction.detections_made = np.full((len(prediction.image_split_coords)), False, dtype=bool)
97 | if prediction.verbose:
98 | print('Done!\nDetection will require {} steps'.format(len(prediction.detections_made)))
99 | return prediction
100 |
101 | def _batch_detect(self, prediction, batch_size=None, verbose=False):
102 | """Generates batches to feed to detector,
103 | gets detection maps, handles bookkeeping.
104 | Returned prediction object has a completed detection
105 | map.
106 | """
107 | if verbose:
108 | print('Performing detections...')
109 | #determine batch size
110 | if batch_size == None:
111 | batch_size = self.detector.rec_batch_size
112 | image = util_functions.resolve_color_channels(prediction, self.detector)
113 | crop_dims = self.detector.input_dims
114 | #rescale color values for model if necessary
115 | if image.dtype == np.uint8:
116 | image = image/255
117 | while any(~prediction.detections_made):
118 | if verbose:
119 | progress = prediction.detections_made.sum()
120 | progress = progress/len(prediction.detections_made)
121 | update_progress(progress)
122 | #Find next index range for detection
123 | first_index = prediction.detections_made.sum()
124 | remaining_predictions = len(prediction.detections_made) - first_index
125 | last_index = min(first_index+batch_size, first_index+remaining_predictions)
126 | #Record index range in slice object
127 | indices = slice(first_index, last_index)
128 | #Get cropping coordinates
129 | crop_coords = prediction.image_split_coords[indices]
130 | #Build batch and predict
131 | batch = make_batch(image, crop_dims, crop_coords)
132 | results = self.detector.predict(batch)
133 | #Record detections to prediction object
134 | indices_enumerated = range(indices.start, indices.stop)
135 | prediction._batch_record_detection(results, indices_enumerated)
136 | prediction.detections_made[indices] = True
137 | #delete duplicate image for memory management.
138 | if verbose:
139 | update_progress(1)
140 | del image
141 | return prediction
142 |
143 | def _batch_classify(self, prediction, batch_size=None, verbose=False):
144 | """Performs batch classifications on crater proposals.
145 | Updates the likelihood values for prediction proposals."""
146 | if verbose:
147 | print('performing classifications...')
148 | #determine batch size
149 | if batch_size == None:
150 | batch_size = self.classifier.rec_batch_size
151 | dim = self.classifier.input_dims
152 | df = prediction.proposals
153 | iter_row = df.iterrows()
154 | image = resolve_color_channels(prediction, self.classifier)
155 | #tracks all results
156 | likelihoods = []
157 | #Will switch when iteration is through
158 | done = False
159 | while not done:
160 | #records cropping coords for batch maker
161 | crops = []
162 | crop_dims = []
163 | while len(crops) < batch_size:
164 | try:
165 | i, row = next(iter_row)
166 | if verbose:
167 | progress = i/len(df)
168 | update_progress(progress)
169 | except StopIteration:
170 | done = True
171 | break
172 | proposal = row[['lat', 'long', 'diameter']].values
173 | crop_orgn, crop_dim = get_crop_specs(proposal, self.classifier)
174 | crops.append(crop_orgn)
175 | crop_dims.append(crop_dim)
176 | batch = make_batch(image, crop_dims, crops, out_dims=dim)
177 | results = self.classifier.predict(batch)
178 | likelihoods += [result[0] for result in results]
179 | prediction.proposals['likelihood'] = likelihoods
180 | #delete temporary image from memory
181 | del image
182 | return prediction
183 |
184 | def _predict(self, input_image, verbose=False):
185 | """Calls a series of functions to perform prediction on input
186 | image. Returns a prediction object.
187 | """
188 | if isinstance(input_image, CDAImage):
189 | input_image = input_image.as_array()
190 | elif isinstance(input_image, type(np.array([0]))):
191 | pass
192 | else:
193 | input_image = np.array(input_image)
194 | prediction = self._get_prediction(input_image)
195 | if np.all(prediction.detections_made):
196 | if verbose:
197 | print('detections already made!')
198 | prediction.proposals = pd.DataFrame(columns=['lat', 'long', 'diameter', 'likelihood'])
199 | else:
200 | prediction = self._prepare_detector(prediction)
201 | prediction = self._batch_detect(prediction, verbose=verbose)
202 | for ext in self.extractor:
203 | result = ext(prediction.detection_map, verbose=verbose)
204 | prediction.proposals = pd.concat([prediction.proposals, result], axis=0)
205 | #Reset proposal indices
206 | prediction.proposals.index = range(len(prediction.proposals))
207 | if verbose:
208 | print(
209 | len(prediction.proposals),
210 | ' proposals extracted from detection map.\n'
211 | )
212 | prediction = self._batch_classify(prediction, verbose=verbose)
213 | if verbose:
214 | print(
215 | '\n',
216 | np.where(prediction.proposals.likelihood > prediction.threshold, 1, 0).sum(),
217 | ' objects classified as craters.\n'
218 | )
219 | return prediction
220 |
221 | def predict(self, input_image, threshold=.5, verbose=False):
222 | """Performs a detection on input_image. Returns a pandas
223 | dataframe with detections.
224 | """
225 | prediction = self._predict(input_image, verbose=verbose)
226 | return prediction._predict(threshold=threshold)
227 |
228 | def get_prediction(self, input_image, verbose=False):
229 | """Performs a detection on input_image. Returns a
230 | prediction object.
231 | """
232 | return self._predict(input_image, verbose=verbose)
233 |
234 | class CDAImage(object):
235 | """CDA image object; image stored as array; .show()
236 | method allows for easy viewing.
237 | """
238 | def __init__(self, image):
239 | #Common use case: convert from array to PIL image
240 | if isinstance(image, type(np.array([0]))):
241 | self.image = image
242 | #In case another CDAImage object passed in
243 | elif isinstance(image, type(self)):
244 | self.image = image.image
245 | #In case PIL image passed in
246 | elif isinstance(image, type(Image.new('1', (1,1)))):
247 | self.image = np.array(image)
248 | else:
249 | raise Exception('Image object constructor does not'
250 | ' understand input image type.')
251 |
252 | def show(self, show_ticks=False):
253 | """Displays the input image by plotting raster"""
254 | fig, ax = plt.subplots();
255 | ax.imshow(self.image, cmap='Greys_r');
256 | if not show_ticks:
257 | ax = remove_ticks(ax)
258 | plt.show();
259 | return None
260 |
261 | def as_array(self):
262 | """Returns the image as a numpy array."""
263 | return self.image
264 |
265 | def load_image(filename):
266 | """load an image from the input filename path.
267 | returns a CDAImage object.
268 | """
269 | image = io.imread(filename)
270 | try:
271 | assert isinstance(image, type(np.array([0])))
272 | except AssertionError:
273 | raise Exception('Could not load file into numpy array.')
274 | return CDAImage(image)
275 |
276 |
--------------------------------------------------------------------------------
/pycda/classifiers.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pkg_resources
3 |
4 | class ClassifierBaseClass(object):
5 | """Base object for crater classifier. Classifiers
6 | make a binary prediction on a crater proposal and return
7 | a value between zero and one; one represents a true crater
8 | and zero represents a false proposal.
9 | """
10 | def __init__(self):
11 | #Specify input dimensions for crater classifier
12 | self.input_dims = (0, 0)
13 | #specify the diameter of the crater candidate
14 | #should have in the model input, in pixels
15 | self.crater_pixels = 0
16 | #Specify number of input color channels
17 | self.input_channels = 1
18 | #Recommend batch size reasonable for modest computer
19 | self.rec_batch_size = 32
20 |
21 | def predict(self, batch):
22 | """Prediction call should return an array of predictions
23 | of length of batch.
24 | """
25 | raise Exception('Base classifier cannot make predictions.')
26 |
27 | class ConvolutionalClassifier(ClassifierBaseClass):
28 | """12x12 pixel classifier using 2D convolution
29 | implimented with Keras on tensorflow backend. Fast.
30 | """
31 |
32 | def __init__(self):
33 | import tensorflow as tf
34 | from keras.models import load_model
35 | path = pkg_resources.resource_filename('pycda', 'models/classifier_12x12_2.h5')
36 | self.model = load_model(path)
37 | self.input_dims = (12, 12)
38 | self.crater_pixels = 4
39 | self.input_channels = 1
40 | self.rec_batch_size = 128
41 |
42 | def predict(self, batch):
43 | """Performs prediction on batch."""
44 | return self.model.predict(batch)
45 |
46 | class CustomClassifier(ClassifierBaseClass):
47 | """This class allows a user to load a custom classifier
48 | into PyCDA. PyCDA will automatically detect input
49 | dimensions. Provide crater_size, the number of pixels
50 | the crater candidate diameter should occupy in the
51 | cropped image. All models are channels-last;
52 | channels-first is not currently supported.
53 | You should specify recommended batch size.
54 | (if not specified, set to 24.)
55 | Use model_path argument to specify path to keras model.
56 | """
57 |
58 | def __init__(self, model_path, crater_pixels, rec_batch_size = 24):
59 | import tensorflow as tf
60 | from keras.models import load_model
61 | self.model = load_model(model_path)
62 | #Get input shape from input layer
63 | input_layer = self.model.layers[0]
64 | self.input_dims = input_layer.input_shape[1:3]
65 | #Get color channels
66 | self.input_channels = input_layer.input_shape[3]
67 | self.crater_pixels = crater_pixels
68 | self.rec_batch_size = rec_batch_size
69 |
70 | def predict(self, batch):
71 | """Performs prediction on batch."""
72 | return self.model.predict(batch)
73 |
74 | class NullClassifier(ClassifierBaseClass):
75 | """For use when classifier is not wanted. Returns a likelihood of 1
76 | for every proposal."""
77 |
78 | def __init__(self, input_dims = (1, 1), n_channels=1):
79 | self.input_dims = input_dims
80 | self.crater_pixels = 1
81 | self.rec_batch_size = 1000
82 | self.input_channels = n_channels
83 |
84 | def predict(self, batch):
85 | """Returns an array of randomly-generated predictions of length
86 | of batch."""
87 | batch_size = batch.shape[0]
88 | predictions = [[1] for x in range(batch_size)]
89 | return np.array(predictions)
90 |
91 | class _DummyClassifier(ClassifierBaseClass):
92 | """Dummy classifier for testing."""
93 |
94 | def __init__(self, input_dims=(20, 20), n_channels=1, npx = 8):
95 | self.input_dims = input_dims
96 | self.crater_pixels = npx
97 | self.input_channels = n_channels
98 | self.rec_batch_size = 32
99 |
100 | def predict(self, batch):
101 | """Returns an array of randomly-generated predictions of length
102 | of batch."""
103 | try:
104 | assert (batch.shape[1], batch.shape[2]) == self.input_dims
105 | except AssertionError:
106 | raise Exception('input image shape must match classifier.input_dims')
107 | batch_size = batch.shape[0]
108 | predictions = []
109 | for i in range(batch_size):
110 | prediction = np.random.rand()
111 | prediction = np.expand_dims(prediction, axis=-1)
112 | predictions.append(prediction)
113 | return np.array(predictions)
114 |
115 | def get(identifier):
116 | """handles argument to CDA pipeline for classifier specification.
117 | returns an initialized classifier.
118 | """
119 | model_dictionary = {
120 | 'convolution': ConvolutionalClassifier,
121 | 'dummy': _DummyClassifier,
122 | 'none': NullClassifier
123 | }
124 | if identifier is None:
125 | return NullClassifier()
126 | if isinstance(identifier, ClassifierBaseClass):
127 | model = identifier
128 | return model
129 | elif identifier in model_dictionary:
130 | return model_dictionary[identifier]()
131 | elif callable(identifier):
132 | if isinstance(identifier(), ClassifierBaseClass):
133 | return identifier()
134 | else:
135 | raise Exception('custom classifiers must inherit'
136 | 'from ClassifierBaseClass, which can be'
137 | 'imported from classifiers.py')
138 | return identifier()
139 | else:
140 | raise ValueError('Could not interpret '
141 | 'classifier identifier: {} \n'
142 | 'try one of these keywords: {}'
143 | .format(identifier, list(model_dictionary.keys())))
144 |
145 |
146 |
--------------------------------------------------------------------------------
/pycda/detectors.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pkg_resources
3 | from pycda.util_functions import crop_array
4 |
5 | class DetectorBaseClass(object):
6 | """This is the base class for a detector object.
7 | Attributes in the __init__ method should be specified
8 | by child detectors.
9 | """
10 | def __init__(self):
11 | """Detectors must specify these attributes."""
12 | #Specify the dimensions of input image (x, y)
13 | self.input_dims = (0, 0)
14 | #Three channels if RGB image; 1 if greyscale
15 | self.input_channels = 3
16 | #dimensions of prediction (x, y)
17 | self.output_dims = (0, 0)
18 | #set to "center" if the output pixels map directly to input
19 | self.in_out_map = None
20 | #Recommended batch size should be do-able for modest machines
21 | self.rec_batch_size = 1
22 | #Set to "pixel-wise" if detector makes per-pixel prediction
23 | self.prediction_type = None
24 |
25 | def predict(self, batch):
26 | """Predict method takes a batch and returns a batch of predictions.
27 | Output should be a batch of single-channel images of shape:
28 | (batch_size, self.output_dims[0], self.output_dims[1], 1)
29 | """
30 | raise Exception('Error: the detector base class cannot make predictions.')
31 |
32 |
33 | class UnetDetector(DetectorBaseClass):
34 | """U-net convolutional model to generate pixel-wise
35 | prediction. Its output is a per-pixel likelihood that
36 | a given pixel is a part of a crater surface feature.
37 | Single color channel (grayscale).
38 | """
39 | def __init__(self):
40 | import tensorflow as tf
41 | from keras.models import load_model
42 | self.input_dims = (256, 256)
43 | #one color channel; greyscale
44 | self.input_channels = 1
45 | self.output_dims = (172, 172)
46 | self.in_out_map = 'center'
47 | #small batches due to high memory requirements.
48 | self.rec_batch_size = 3
49 | self.prediction_type = 'pixel-wise'
50 | path = pkg_resources.resource_filename('pycda', 'models/unet.h5')
51 | self.model = load_model(path)
52 |
53 | def predict(self, batch):
54 | """returns a batch of random-pixel images with appropriate shape."""
55 | return self.model.predict(batch)
56 |
57 | class TinyDetector(DetectorBaseClass):
58 | """A tiny version of U-Net downsized for speed.
59 | Its output is a per-pixel likelihood that
60 | a given pixel is a part of a crater surface feature.
61 | Single color channel (grayscale).
62 | """
63 | def __init__(self):
64 | import tensorflow as tf
65 | from keras.models import load_model
66 | self.input_dims = (256, 256)
67 | #one color channel; greyscale
68 | self.input_channels = 1
69 | self.output_dims = (172, 172)
70 | self.in_out_map = 'center'
71 | #bigger batches due to smaller model size
72 | self.rec_batch_size = 12
73 | self.prediction_type = 'pixel-wise'
74 | path = pkg_resources.resource_filename('pycda', 'models/tinynet.h5')
75 | self.model = load_model(path)
76 |
77 | def predict(self, batch):
78 | """returns a batch of random-pixel images with appropriate shape."""
79 | return self.model.predict(batch)
80 |
81 | class CustomDetector(DetectorBaseClass):
82 | """This class allows a user to load a custom detection
83 | model into PyCDA. PyCDA will automatically detect input
84 | and output dimensions. All models are channels-last;
85 | channels-first is not currently supported.
86 | You should specify recommended batch size.
87 | (if not specified, set to 1.)
88 |
89 | Use model_path argument to specify the path to your keras model.
90 | """
91 | def __init__(self, model_path, rec_batch_size = 1, in_out_map = 'center'):
92 | import tensorflow as tf
93 | from keras.models import load_model
94 | self.model = load_model(model_path)
95 | #Get input shape from input layer
96 | input_layer = self.model.layers[0]
97 | self.input_dims = input_layer.input_shape[1:3]
98 | #Get color channels
99 | self.input_channels = input_layer.input_shape[3]
100 | #Get output dimensions
101 | output_layer = self.model.layers[-1]
102 | self.output_dims = output_layer.output_shape[1:3]
103 |
104 | self.rec_batch_size = rec_batch_size
105 | self.in_out_map = in_out_map
106 | self.prediction_type = 'pixel-wise'
107 |
108 | def predict(self, batch):
109 | """returns a batch of random-pixel images with appropriate shape."""
110 | return self.model.predict(batch)
111 |
112 | class _DummyDetector(DetectorBaseClass):
113 | """The dummy detector is used for testing. It returns predictions
114 | when called; the predictions contain random values between 0 and 1,
115 | in the dimensions specified at initialization.
116 | """
117 | def __init__(self, input_dims=(256, 256), output_dims=(172, 172), n_channels=1, batch_size=5):
118 | self.input_dims = input_dims
119 | self.input_channels = n_channels
120 | self.output_dims = output_dims
121 | self.in_out_map = 'center'
122 | self.rec_batch_size = batch_size
123 | self.prediction_type = 'pixel-wise'
124 |
125 | def predict(self, batch):
126 | """returns a batch of random-pixel images with appropriate shape."""
127 | try:
128 | assert (batch.shape[1], batch.shape[2]) == self.input_dims
129 | except AssertionError:
130 | raise Exception('input image shape must match detector.input_dims')
131 | predictions = []
132 | ypadding = (self.input_dims[0] - self.output_dims[0])//2
133 | xpadding = (self.input_dims[1] - self.output_dims[1])//2
134 | for img in batch:
135 | image = img[:, :, 0]
136 | prediction = crop_array(image, self.output_dims[0], self.output_dims[1], orgn=(ypadding,xpadding))
137 | #prediction = np.random.rand(self.output_dims[0], self.output_dims[1])
138 | prediction = np.expand_dims(prediction, axis=-1)
139 | predictions.append(prediction)
140 | return np.array(predictions)
141 |
142 |
143 | def get(identifier):
144 | """handles argument to CDA pipeline for detector specification.
145 | returns an initialized detector.
146 | """
147 | model_dictionary = {
148 | 'dummy': _DummyDetector,
149 | 'unet': UnetDetector,
150 | 'tiny': TinyDetector
151 | }
152 | if identifier is None:
153 | raise Exception('You must specify a detector model.')
154 | if isinstance(identifier, DetectorBaseClass):
155 | model = identifier
156 | return model
157 | elif identifier in model_dictionary:
158 | return model_dictionary[identifier]()
159 | elif callable(identifier):
160 | if isinstance(identifier(), DetectorBaseClass):
161 | return identifier()
162 | else:
163 | raise Exception('custom detectors must inherit'
164 | 'from DetectorBaseClass, which can be'
165 | 'imported from detectors.py')
166 | else:
167 | raise ValueError('Could not interpret '
168 | 'detection model identifier: {} \n'
169 | 'try one of these keywords: {}'
170 | .format(identifier, list(model_dictionary.keys())))
171 |
172 |
173 |
174 |
--------------------------------------------------------------------------------
/pycda/error_stats.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import matplotlib.pyplot as plt
4 | import matplotlib.patches as mpatches
5 | from sklearn.neighbors import NearestNeighbors
6 | from sklearn.metrics import completeness_score
7 |
8 | class ErrorAnalyzer(object):
9 | """Error Analyzer is used to measure predictive performance
10 | of cda. It is intended for use on images where all craters
11 | have been hand labeled and so are "known". The PyCDA prediction
12 | object accepts known craters as a pandas dataframe under the
13 | attribute prediction.known_craters; the columns 'lat', 'long',
14 | and 'diameter' should be populated with approprate values for
15 | known crater objects for use with ErrorAnalyzer.
16 | """
17 | def __init__(self):
18 | """An analyzer tracks predicted and known craters after
19 | performing analysis. Will reset after new call to .analyze
20 | """
21 | self.predicted = None
22 | self.known = None
23 | self.fp = 0
24 | self.tp = 0
25 | self.fn = 0
26 | self.D = None
27 | self.B = None
28 | self.Q = None
29 | self.done = False
30 | self.prediction_object = None
31 | pass
32 |
33 | def _match_predictions(self, prop, known, verbose=True):
34 | """Uses nearest neighbors algorithm to match
35 | known craters to proposals.
36 | """
37 | threshold = self.prediction_object.threshold
38 | #Will search through ten nearest proposals
39 | #unless there are fewer than ten.
40 | kn = min(10, len(prop))
41 | nnb_search = NearestNeighbors(
42 | n_neighbors=kn,
43 | algorithm='ball_tree',
44 | metric='l2'
45 | )
46 | #fit to proposals to search this list
47 | nnb_search.fit(prop[['lat', 'long', 'diameter']])
48 | distances, indices = nnb_search.kneighbors(known[['lat', 'long', 'diameter']])
49 |
50 | #will keep results organized
51 | results = known.copy()
52 | cols1 = ['neighbor{}'.format(n) for n in range(kn)]
53 | distances = pd.DataFrame(columns = cols1, data=distances)
54 | cols2 = ['id{}'.format(n) for n in range(kn)]
55 | ids = pd.DataFrame(columns = cols2, data=indices)
56 | results = pd.concat([results, distances, ids], axis=1)
57 |
58 | #Scale distances by known crater diameter
59 | for col in cols1:
60 | results[col] = results[col]/results['diameter']
61 |
62 | #These copies will be our outputs
63 | known_copy = known[['lat', 'long', 'diameter']].copy()
64 | prop_copy = prop[['lat', 'long', 'diameter', 'likelihood']].copy()
65 |
66 | #initialize truth values
67 | known_copy['detected'] = False
68 | prop_copy['positive'] = False
69 |
70 | #iterate over neighbors, starting with nearest
71 | for n in range(kn):
72 | results_col = 'neighbor{}'.format(n)
73 | prop_col = 'id{}'.format(n)
74 | #order by vicinity and iterate in ascending order
75 | for i, row in results.sort_values(results_col).iterrows():
76 | prop_id = int(row.loc[prop_col])
77 | #stop iteration once we hit threshold
78 | if row[results_col] > .4:
79 | break
80 | #if crater/proposal haven't been matched, match them
81 | if not known_copy.at[i, 'detected'] and not prop_copy.at[prop_id, 'positive']:
82 | #iff proposal was accepted by classifier
83 | if prop_copy.at[prop_id, 'likelihood'] > threshold:
84 | known_copy.at[i, 'detected'] = True
85 | prop_copy.at[prop_id, 'positive'] = True
86 |
87 | if verbose:
88 | print('{} craters were properly detected.'.format(len(known_copy[known_copy['detected']])))
89 | return prop_copy, known_copy
90 |
91 | def _compute_results(self):
92 | """Computes descriptive statistics about model performance.
93 | """
94 | if not self.done:
95 | print('No results to compute!')
96 | return None
97 | self.tp = len(self.predicted[self.predicted.positive])
98 | self.fp = np.where(self.predicted[~self.predicted.positive].likelihood > self.prediction_object.threshold, 1, 0).sum()
99 | self.fn = len(self.known[~self.known.detected])
100 | self.D = 100 * self.tp/(self.tp + self.fn)
101 | self.R = self.tp/(self.tp + self.fn)
102 | self.P = self.tp/(self.tp + self.fp)
103 | self.F1 = 2/((1/self.P) + (1/self.R))
104 | self.FD = 1 - self.P
105 | self.FNR = self.fn/(self.tp + self.fn)
106 | try:
107 | self.B = self.fp/self.tp
108 | except ZeroDivisionError:
109 | self.B = self.fp/.00001
110 | self.Q = 100 * self.tp / (self.tp + self.fp + self.fn)
111 | return None
112 |
113 | def print_report(self):
114 | """Prints performance statistics for prediction."""
115 | if not self.done:
116 | print('Call .analyze() on a prediction to get stats.')
117 | return None
118 | print('='*50)
119 | print('\nDetection Percentage: {}%'.format(round(self.D, 1)))
120 | print('\nPrecision: {}'.format(round(self.P, 2)))
121 | print('\nRecall: {}'.format(round(self.R, 2)))
122 | print('\nF1-Score: {}'.format(round(self.F1, 2)))
123 | print('\nFalse Discovery Rate: {}'.format(round(self.FD, 2)))
124 | print('\nFalse Negative Rate: {}'.format(round(self.FNR, 2)))
125 | print('\nBranching Factor: ', round(self.B, 2))
126 | print('\nQuality Percentage: {}%'.format(round(self.Q, 1)), '\n')
127 | print('='*50)
128 | return
129 |
130 |
131 | def analyze(self, prediction, verbose=True):
132 | """Takes a prediction object and performs analysis on it.
133 | Raises an exception if no known crater labels are attributed
134 | to the input prediction object.
135 | """
136 | if len(prediction.known_craters) == 0:
137 | raise Exception('Known crater statistics are required to perform '
138 | 'error analysis. Please populate the prediction object '
139 | '.known_craters attribute with known crater locations '
140 | "(pandas dataframe with columns 'lat', long', 'diameter')")
141 | elif not isinstance(prediction.known_craters, type(pd.DataFrame())):
142 | #If data is passed in as an array, create
143 | #pandas dataframe for compatability. Assumes
144 | #data is ordered as in df; x(0), y(1), diameter(2)
145 | known_craters = prediction.known_craters
146 | df = pd.DataFrame(columns=['lat', 'long', 'diameter'])
147 | df['lat'] = known_craters[:, 0]
148 | df['long'] = known_craters[:, 1]
149 | df['diameter'] = known_craters[:, 2]
150 | craters = df
151 | elif isinstance(prediction.known_craters, type(pd.DataFrame())):
152 | craters = prediction.known_craters
153 | cols = craters.columns
154 | #attempts to format unlabeled/poorly labeled dataframe
155 | if 'lat' not in cols or 'long' not in cols or 'diameter' not in cols:
156 | if verbose:
157 | print('Warning: crater annotations not properly labeled. '
158 | 'If there is a problem, please reorder crater annotations '
159 | 'as: x position, y position, crater diameter (pixels) '
160 | 'and label columns in pandas dataframe.')
161 | craters.columns = ['lat', 'long', 'diameter']
162 | else:
163 | raise Exception('Sorry, labeled craters datatype is not understood. '
164 | 'Please populate the prediction object '
165 | '.known_craters attribute with known crater locations '
166 | "(pandas dataframe with columns 'lat', 'long', 'diameter')")
167 | self.prediction_object = prediction
168 | self.predicted, self.known = self._match_predictions(
169 | prediction.proposals,
170 | craters,
171 | verbose=verbose
172 | )
173 | if verbose:
174 | print('Matching complete!\n')
175 | self.done = True
176 | self._compute_results()
177 | if verbose:
178 | self.print_report()
179 | return
180 |
181 | def plot_densities(self, verbose=True):
182 | """Generates histogram plot with predicted and actual
183 | crater densities by size."""
184 | predictions = self.predicted[self.predicted.likelihood > self.prediction_object.threshold]
185 | min_ = min(min(self.known.diameter), min(predictions.diameter))
186 | max_ = max(max(self.known.diameter), max(predictions.diameter))
187 | bins = [n*(max_-min_)/20 + min_ for n in range(20)]
188 | fig, ax = plt.subplots();
189 | ax.hist(self.known.diameter.astype(np.float32), bins=bins, color='b', label='known crater density', alpha=.5);
190 | ax.hist(predictions.diameter.astype(np.float32), bins=bins, color='r', label='detected crater density', alpha=.5);
191 | ax.set_xlabel('crater diameter (pixels)');
192 | ax.set_ylabel('density over image area');
193 | plt.legend();
194 | plt.show();
195 | if verbose:
196 | print('known crater count in image: ', len(self.known))
197 | print('detected crater count in image: ', len(predictions))
198 | return
199 |
200 | def show(self):
201 | """Displays the input image with predictions and
202 | known craters displayed by colors.
203 | """
204 | image = self.prediction_object.input_image
205 | name = self.prediction_object.__name__
206 | threshold = self.prediction_object.threshold
207 | predictions = self.predicted[self.predicted.likelihood > threshold]
208 | fig, ax = plt.subplots(figsize=(7, 7))
209 | ax.imshow(image, cmap='Greys_r')
210 | ax.set_title('Detection performance for {}'.format(name))
211 | for i, crater in self.known.iterrows():
212 | if not crater.detected:
213 | y = crater[0]
214 | x = crater[1]
215 | r = crater[2]/2
216 | circle = plt.Circle((x, y), r, fill=False, color='red');
217 | ax.add_artist(circle);
218 | for i, proposal in predictions.iterrows():
219 | if not proposal.positive:
220 | y = proposal[0]
221 | x = proposal[1]
222 | r = proposal[2]/2
223 | circle = plt.Circle((x, y), r, fill=False, color='yellow');
224 | ax.add_artist(circle);
225 | elif proposal.positive:
226 | y = proposal[0]
227 | x = proposal[1]
228 | r = proposal[2]/2
229 | circle = plt.Circle((x, y), r, fill=False, color='green');
230 | ax.add_artist(circle);
231 | handles = []
232 | handles.append(mpatches.Patch(color='green', label='properly detected craters'))
233 | handles.append(mpatches.Patch(color='red', label='undetected craters'))
234 | handles.append(mpatches.Patch(color='yellow', label='false detections (noncraters detected as craters)'))
235 | plt.legend(handles=handles);
236 | plt.show();
237 | return
238 |
239 | def return_results(self):
240 | """Returns a tuple: one df predictions with additional column (positive)
241 | with boolean values; another df with craters and an additional column
242 | (detected) with boolean values.
243 | """
244 | return self.predicted, self.known
245 |
246 | def return_stats(self):
247 | """Returns scoring stats: true positive count, false positive count,
248 | false negative count, f1-score, and some other binary error statistics."""
249 | stats_dict = {
250 | 'true_positives': self.tp,
251 | 'false_positives': self.fp,
252 | 'false_negatives': self.fn,
253 | 'detection_percentage': self.D,
254 | 'precision': self.P,
255 | 'recall': self.R,
256 | 'f1_score': self.F1,
257 | 'false_detections': self.DF,
258 | 'false_negative_rate': self.FNR
259 | }
260 | return stats_dict
261 |
262 |
263 |
264 |
265 |
266 |
--------------------------------------------------------------------------------
/pycda/extractors.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from skimage import measure
4 | from scipy import ndimage as ndi
5 | from scipy.ndimage import find_objects
6 | from skimage.morphology import watershed
7 | from skimage.feature import peak_local_max
8 | from math import sqrt, pi
9 | from pycda.util_functions import update_progress
10 |
11 | class ExtractorBaseClass(object):
12 | """Base class for an extractor object. The extractor
13 | converts a prediction map into a list of crater candidates.
14 | """
15 | def __init__(self):
16 | pass
17 |
18 | def __call__(self):
19 | raise Exception('Extractor base class cannot perform extraction.')
20 |
21 |
22 | class FastCircles(ExtractorBaseClass):
23 | """Circle Extractor assumes all objects in detection map are
24 | circles. It identifies groups of pixels, computes
25 | their mean location (centroid), and diameter based on
26 | the number of pixels in the group. Takes a prediction object
27 | and returns a list of proposals.
28 | """
29 | def __init__(self, sensitivity=.5):
30 | """sensitivity is a hyperparameter that adjusts the extractor's
31 | sensitivity to pixels with smaller values; a higher sensitivity
32 | tends to yeild larger crater candidates, with a risk of merging
33 | adjacent craters. A lower sensitivity can exclude weak detections.
34 | """
35 | self.threshold = 1 - sensitivity
36 |
37 | def _get_label_map(self, detection_map, threshold=.5, verbose=False):
38 | """Takes a pixel-wise prediction map and returns a matrix
39 | of unique objects on the map. Threshold is a hyperparameter
40 | for crater/non-crater pixel determination. Higher threshold
41 | may help distinguish merged crater detections.
42 | """
43 | if verbose:
44 | print('getting label map...')
45 | filtered = np.where(detection_map > threshold, 1, 0)
46 | labels = measure.label(filtered, neighbors=4, background=0)
47 | if verbose:
48 | print('done!')
49 | return labels
50 |
51 | def _get_crater_pixels(self, label_matrix, idx):
52 | """Takes a label matrix and a number and gets all the
53 | pixel locations from that crater object.
54 | """
55 | result = np.argwhere(np.where(label_matrix==idx, 1, 0))
56 | return result
57 |
58 | def _get_pixel_objects(self, label_matrix, verbose=False):
59 | """Takes the label matrix and returns a list of objects.
60 | Each element in the list is a unique object, defined
61 | by an array of pixel locations belonging to it.
62 | """
63 | objects = find_objects(label_matrix)
64 | result = []
65 | for prop in objects:
66 | slice_ = np.argwhere(label_matrix[prop])
67 | slice_[:, 0] += prop[0].start
68 | slice_[:, 1] += prop[1].start
69 | result.append(slice_)
70 | return result
71 |
72 | def _get_crater_proposals(self, detection_map, verbose=False):
73 | """Takes a pixel-wise prediction map and returns a list of
74 | crater proposals as lat, long, diameter.
75 | """
76 | label_matrix = self._get_label_map(detection_map, verbose=verbose)
77 | proposals = self._get_pixel_objects(label_matrix, verbose=verbose)
78 | result = []
79 | if verbose:
80 | print('Defining proposals as circles...')
81 | for proposal in proposals:
82 | area = len(proposal)
83 | y_locs = [x[0] for x in proposal]
84 | x_locs = [x[1] for x in proposal]
85 | y_mean = round(np.mean(y_locs))
86 | x_mean = round(np.mean(x_locs))
87 | d = 2*sqrt(area/pi)
88 | if d > 4:
89 | result.append((y_mean, x_mean, d))
90 | if verbose:
91 | print('done!')
92 | return result
93 |
94 |
95 | def __call__(self, detection_map, verbose=False):
96 | cols = ['lat', 'long', 'diameter']
97 | result = self._get_crater_proposals(detection_map, verbose=verbose)
98 | proposals = pd.DataFrame(columns = cols, data=result)
99 | proposals['likelihood'] = 1
100 | return proposals
101 |
102 | class WatershedCircles(ExtractorBaseClass):
103 | """Takes a prediction object after detection and returns a
104 | list of crater proposals (by calling object on prediction.)
105 | Performs a 'watershed' analysis:
106 | -transforms image to binary for some threshold (default .5)
107 | -transforms pixel values to min distance to background (0) pixel
108 | -uses local maxima as points for 'water sources'
109 | -uses negative distance values to build 'basins'
110 | -fills 'basins' with water from sources
111 | -uses the boundaries where 'water meets' as segment boundaries
112 | -objects are converted to circles whose center is the centroid of the
113 | bounding box of the object, diameter is the mean(width, height) of bounding
114 | box.
115 | """
116 | def __init__(self, sensitivity=.5):
117 | """sensitivity is a hyperparameter that adjusts the extractor's
118 | sensitivity to pixels with smaller values.
119 | """
120 | self.threshold = 1 - sensitivity
121 |
122 | def _get_labels(self, detection_map, verbose=False):
123 | """Handles transformations and extracts labels
124 | for each object identified.
125 | """
126 | #Convert to binary pixel values
127 | binary = np.where(detection_map > self.threshold, 1, 0)
128 | #Distance transform
129 | distance = ndi.distance_transform_edt(binary)
130 | #Identify local maxima
131 | local_maxi = peak_local_max(distance, indices=False,
132 | labels=binary)
133 | #Get object labels
134 | markers = ndi.label(local_maxi)[0]
135 | labels = watershed(-distance, markers, mask=binary)
136 |
137 | return labels
138 |
139 | def _get_crater_proposals(self, detection_map, verbose=False):
140 | """Converts labeled objects into circle figures.
141 | """
142 | labels = self._get_labels(detection_map, verbose=verbose)
143 | objs = find_objects(labels)
144 | proposals = []
145 | for obj in objs:
146 | centy = (obj[0].stop+obj[0].start)/2
147 | centx = (obj[1].stop+obj[1].start)/2
148 | proposal = [centy, centx]
149 | dy = obj[0].stop - obj[0].start
150 | dx = obj[1].stop - obj[1].start
151 | diameter = np.mean([dy, dx])
152 | proposal.append(diameter)
153 | if diameter > 4:
154 | proposals.append(proposal)
155 | return proposals
156 |
157 | def __call__(self, detection_map, verbose=False):
158 | cols = ['lat', 'long', 'diameter']
159 | result = self._get_crater_proposals(detection_map, verbose=verbose)
160 | proposals = pd.DataFrame(columns = cols, data=result)
161 | proposals['likelihood'] = 1
162 | return proposals
163 |
164 | class _DummyExtractor(ExtractorBaseClass):
165 | """Dummy Extractor takes an input image and returns a list of
166 | random predictions for testing. Proposals are a list of tuples.
167 | Each tuple in the list has the crater proposal as (in pixels):
168 | (x position, y position, diameter).
169 | """
170 | def __call__(self, image, verbose=False):
171 | width = image.shape[0]
172 | height = image.shape[1]
173 | n_proposals = np.random.randint(2,50)
174 | proposals = []
175 | for prop in range(n_proposals):
176 | x_pos = np.random.randint(0, width)
177 | y_pos = np.random.randint(0, height)
178 | diameter = np.random.randint(2, width+height/20)
179 | likelihood = 1
180 | proposals.append((x_pos, y_pos, diameter, 1))
181 | proposals = pd.DataFrame(columns=['x', 'y', 'diameter', 'likelihood'], data=proposals)
182 | if verbose:
183 | print('I am a dummy extractor!')
184 | return proposals
185 |
186 |
187 | def get(identifier):
188 | """handles argument to CDA pipeline for extractor specification.
189 | returns an initialized extractor.
190 | """
191 | model_dictionary = {
192 | 'dummy': _DummyExtractor,
193 | 'fast_circle': FastCircles,
194 | 'watershed': WatershedCircles
195 | }
196 | if identifier is None:
197 | raise Exception('You must specify a proposal extractor.')
198 | if isinstance(identifier, ExtractorBaseClass):
199 | model = identifier
200 | return model
201 | elif identifier in model_dictionary:
202 | return model_dictionary[identifier]()
203 | elif callable(identifier):
204 | if isinstance(identifier(), DetectorBaseClass):
205 | return identifier()
206 | else:
207 | raise Exception('custom extractors must inherit'
208 | 'from ExtractorBaseClass, which can be'
209 | 'imported from extractors.py')
210 | else:
211 | raise ValueError('Could not interpret '
212 | 'extractor identifier: {} \n'
213 | 'try one of these keywords: {}'
214 | .format(identifier, list(model_dictionary.keys())))
215 |
216 |
--------------------------------------------------------------------------------
/pycda/models/classifier_12x12_2.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/models/classifier_12x12_2.h5
--------------------------------------------------------------------------------
/pycda/models/tinynet.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/models/tinynet.h5
--------------------------------------------------------------------------------
/pycda/models/unet.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/models/unet.h5
--------------------------------------------------------------------------------
/pycda/predictions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import matplotlib.pyplot as plt
4 | from pycda import util_functions
5 |
6 | class Prediction(object):
7 | """A prediction object is a specialized data
8 | handler for pycda. It tracks the progress of predictions
9 | on an input image, helps the pipeline track information,
10 | and can perform auxiliary functions that help the user
11 | inspect the prediction, save the results, export csv files,
12 | and modify hyperparameters.
13 | """
14 | def __init__(self, image, id_no, cda):
15 | """prediction objects are initialized by the cda pipeline itself."""
16 | #the prediction object stores the input image in memory.
17 | self.input_image = image
18 | self.__name__ = 'prediction_{}'.format(id_no)
19 | self.cda = cda
20 | self.verbose=False
21 | #image_split_coords is a list of (x, y) coordinates
22 | #that map to every split necessary for the detector
23 | self.image_split_coords = []
24 | #In the case that the detector output is different
25 | #from the input, destination coordinates for the output
26 | #are stored as det_split_coords
27 | self.det_split_coords = []
28 | #list of bools recording which predictions have been made.
29 | self.detections_made = np.array([False])
30 | #prediction map will record the outputs of detector
31 | self.detection_map = np.zeros((self.input_image.shape[0], self.input_image.shape[1]))
32 | #proposals will be stored here.
33 | self.proposals = pd.DataFrame(columns=['lat', 'long', 'diameter', 'likelihood'])
34 | #threshold is a likelihood value below which proposals
35 | #are rejected. Lowering this value will include more proposals
36 | #in prediction, while raising it while be more selective.
37 | self.threshold = .5
38 | #add ground truth labels for errors module
39 | self.known_craters = pd.DataFrame(columns=['lat', 'long', 'diameter'])
40 | #optional scale if user wants metric crater sizes
41 | self.scale = None
42 |
43 | def __str__(self):
44 | return self.__name__
45 |
46 | def _record_detection(self, detection, index):
47 | """Records a detection in the prediction map.
48 | Uses index to determine location of detection.
49 | """
50 | ymin = self.det_split_coords[index][0]
51 | ymax = min(ymin+detection.shape[0], self.detection_map.shape[0])
52 | xmin = self.det_split_coords[index][1]
53 | xmax = min(xmin+detection.shape[1], self.detection_map.shape[1])
54 | self.detection_map[ymin:ymax, xmin:xmax] = detection
55 |
56 | def _batch_record_detection(self, batch, indices):
57 | """Takes a batch of detections and a slice object
58 | that contains first, last index of batch. Records
59 | detections into detection map.
60 | """
61 | for i, index in enumerate(indices):
62 | detection = batch[i, :, :, 0]
63 | self._record_detection(detection, index)
64 | return
65 |
66 | def _predict(self, threshold = .5):
67 | """Returns a dataframe of detected craters.
68 | Threshold determines a cutoff for proposal likelihood.
69 | """
70 | df = self.proposals[self.proposals.likelihood >= threshold]
71 | df = df[['lat', 'long', 'diameter']].copy()
72 | return df
73 |
74 | def get_proposals(self):
75 | """Returns a dataframe with crater proposals and
76 | likelihoods from classifier.
77 | """
78 | return self.proposals
79 |
80 | def set_scale(self, scale):
81 | """User can set scale for statistics in meters.
82 | scale should be meters per pixel; pass scale as float or int
83 | as argument, saves scale to prediction object.
84 | """
85 | self.scale = scale
86 | return
87 |
88 |
89 | def show(self, threshold=.5, include_ticks=True, save_plot=False):
90 | """Displays the input image with the predicted craters
91 | overlaid.
92 | """
93 | fig, ax = plt.subplots(figsize=(7, 7))
94 | ax.imshow(self.input_image, cmap='Greys_r')
95 | ax.set_title('Crater detections for {}'.format(self.__name__))
96 | if include_ticks:
97 | if self.scale == None:
98 | message = '(in pixels, resolution unspecified)'
99 | else:
100 | message = '@ {} meters/pixel'.format(self.scale)
101 | ax.set_ylabel('horizontal distance {}'.format(message))
102 | ax.set_xlabel('vertical direction {}'.format(message))
103 | else:
104 | ax = util_functions.remove_ticks(ax)
105 | for i, crater in self.proposals.iterrows():
106 | if crater.likelihood > threshold:
107 | x = crater[1]
108 | y = crater[0]
109 | r = crater[2]/2
110 | circle = plt.Circle((x, y), r, fill=False, color='r');
111 | ax.add_artist(circle);
112 | if save_plot != False:
113 | try:
114 | plt.title('off')
115 | plt.axis('off')
116 | fig.savefig(save_plot, bbox_inches='tight')
117 | except:
118 | print('could not save. please pass filepath as'
119 | 'keyword argument save_plot.')
120 | ax.set_title('Crater detections for {}'.format(self.__name__))
121 | plt.axis('on')
122 | plt.show();
123 |
124 | def show_detection(self, remove_ticks=True):
125 | """Plots the detection map alongside the input image."""
126 | fig, ax = plt.subplots(ncols=2, figsize=(9, 6))
127 | ax[0].imshow(self.input_image, cmap='Greys_r')
128 | ax[1].imshow(self.detection_map, cmap='CMRmap')
129 | if remove_ticks:
130 | ax[0], ax[1] = util_functions.remove_ticks(ax[0]), util_functions.remove_ticks(ax[1])
131 | plt.show();
132 |
133 | def to_csv(self, filepath, likelihoods=False, index=False):
134 | """Creates a csv file with predictions. If likelihoods
135 | is True, a likelihoods column is added to the csv file.
136 | Saves csv to filepath usind pd.to_csv method."""
137 | if len(self.proposals) == 0:
138 | print('Cannot export csv. No predictions made!')
139 | return
140 | df = self.proposals
141 | if not likelihoods:
142 | df = df[['lat', 'long', 'diameter']]
143 | df.to_csv(filepath, index=index)
144 | return
145 |
--------------------------------------------------------------------------------
/pycda/sample_data.py:
--------------------------------------------------------------------------------
1 | from pycda import load_image
2 | import pandas as pd
3 | import pkg_resources
4 | import random
5 | import os
6 |
7 | def get_sample_image(filename='holdout_tile.pgm', choose=False):
8 | """Retrieves sample data from the in-package directory.
9 | if choose=True, randomly selects sample photo from package.
10 | """
11 | path = pkg_resources.resource_filename('pycda', 'sample_imgs/')
12 | if choose:
13 | choices = os.listdir(path)
14 | filename = random.choice(choices)
15 | while filename[-4:] == '.csv':
16 | filename = random.choice(choices)
17 | file = path + filename
18 | img = load_image(file)
19 | return img
20 |
21 | def get_sample_csv(filename='holdout_tile_labels.csv'):
22 | """Retrieves hand-labeled crater annotations for image
23 | holdout_tile.pgm (default returned by get_sample_image()).
24 | Returns pandas dataframe.
25 | """
26 | path = pkg_resources.resource_filename('pycda', 'sample_imgs/{}'.format(filename))
27 | df = pd.read_csv(path)
28 | return df
29 |
--------------------------------------------------------------------------------
/pycda/sample_imgs/holdout_tile.pgm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/holdout_tile.pgm
--------------------------------------------------------------------------------
/pycda/sample_imgs/holdout_tile_l.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/holdout_tile_l.png
--------------------------------------------------------------------------------
/pycda/sample_imgs/holdout_tile_labels.csv:
--------------------------------------------------------------------------------
1 | long,lat,diameter
2 | 1621.9,716.86,4.3318
3 | 171.63,567.58,4.8
4 | 147.79,1150.5,4.8
5 | 125.26,950.62,4.8
6 | 18.359,1694.8,4.8
7 | 1464.5,999.86,4.8
8 | 209.06,320.39,4.8
9 | 159.88,1135.1,6.4
10 | 49.533,1435.8,6.4
11 | 161.68,1274.9,6.4
12 | 50.736,1418,6.4
13 | 144.74,1221,6.4
14 | 142.49,1350.5,6.4
15 | 131.8,1508.7,6.4
16 | 308.12,1144.5,6.4
17 | 1349.4,1073.1,6.4
18 | 714.01,144.08,6.4
19 | 294.83,1656.7,6.4
20 | 128.19,1468.8,6.4
21 | 1340.8,1053.7,6.4
22 | 1382.8,275.44,6.4
23 | 847.82,1229.3,6.4
24 | 839.06,1052.6,6.4
25 | 243.38,915.36,6.4
26 | 1624.8,613.48,7.6772
27 | 138.91,589.45,8
28 | 27.874,602.49,8
29 | 84.564,1194.1,8
30 | 191.4,970.97,8
31 | 5.3434,604.67,8
32 | 24.967,613.39,8
33 | 122.36,959.34,8
34 | 28.219,1492.3,8
35 | 148.12,1324,8
36 | 24.279,1204.7,8
37 | 107.59,1122.5,8
38 | 122.79,1291.4,8
39 | 36.663,1121.9,8
40 | 152.4,1662.7,8
41 | 118.9,1686.8,8
42 | 354.74,91.485,8
43 | 388.36,50.882,8
44 | 1172.6,1559.8,8
45 | 1620.6,1321.8,8
46 | 1564.4,1015.1,8
47 | 91.38,91.055,8
48 | 732.95,73.634,8
49 | 234.24,1449.5,8
50 | 171.82,1533.7,8
51 | 1126,909.06,8
52 | 1105.1,550.94,8
53 | 1319.1,1133.9,8
54 | 1186.3,32.56,8
55 | 312.33,95.194,8
56 | 253.04,1660.6,8
57 | 1120.4,733.96,8
58 | 512.19,268.37,8
59 | 999.04,1056.3,8
60 | 1222.4,632.98,8
61 | 1626.5,798.41,8.2482
62 | 1603.5,7.0817,8.2984
63 | 98.781,1090.2,8.8
64 | 1044.1,641.68,8.8
65 | 1698.7,590.63,8.9673
66 | 15.25,364.16,9.6
67 | 68.207,1592.3,9.6
68 | 23.029,90.042,9.6
69 | 190.38,576.24,9.6
70 | 4.2738,486.07,9.6
71 | 195.76,866.31,9.6
72 | 135.28,1203.5,9.6
73 | 9.7042,1044.4,9.6
74 | 130.67,1220.5,9.6
75 | 150.37,1565,9.6
76 | 404.05,390.53,9.6
77 | 373.54,22.151,9.6
78 | 738.25,30.458,9.6
79 | 185.76,1220.9,9.6
80 | 13.059,1223.9,9.6
81 | 40.327,1233,9.6
82 | 1075.4,913.87,9.6
83 | 965.38,962.05,9.6
84 | 1223.9,824.75,9.6
85 | 1337.9,898.62,9.6
86 | 1629.4,1425.4,9.6
87 | 1246.8,1129,9.6
88 | 1206.8,1061.6,9.6
89 | 1177.2,782.63,9.6
90 | 311.38,5.3073,9.6
91 | 555.22,3.8729,9.6
92 | 214.32,1608.9,9.6
93 | 362.53,1529.6,9.6
94 | 347.71,1507.1,9.6
95 | 518.4,748.78,9.6
96 | 874.12,438.48,9.6
97 | 467.24,223.81,9.6
98 | 1011.5,581.56,9.6
99 | 1651.6,626.92,9.6
100 | 1657.3,608.18,9.6
101 | 1620.9,775.29,9.6
102 | 407.01,68.927,9.6
103 | 1688.7,616.22,9.7545
104 | 1576.5,744.74,10.004
105 | 1577.4,28.169,10.08
106 | 59.902,1114.1,10.218
107 | 1636.9,1455.7,10.364
108 | 1609.7,172.63,11.097
109 | 52.605,477.41,11.2
110 | 81.656,787.82,11.2
111 | 73.079,1627,11.2
112 | 1156.8,1274.2,11.2
113 | 244.21,493.16,11.2
114 | 46.751,638.86,11.2
115 | 1496.4,1199.5,11.2
116 | 261.92,945.29,11.2
117 | 510.43,915.22,11.2
118 | 1349.4,979.62,11.2
119 | 142.13,81.208,11.2
120 | 1635.3,1618.9,11.2
121 | 1641.6,1394.6,11.2
122 | 1468.9,1100.6,11.2
123 | 548.52,1337.8,11.2
124 | 1670.5,651.77,11.396
125 | 1070.4,499.86,11.452
126 | 561.89,45.486,11.672
127 | 1699.3,1064.4,11.755
128 | 1399.1,1009.4,12.205
129 | 1149.9,680.9,12.257
130 | 1076.9,561.71,12.48
131 | 309.87,175.59,12.716
132 | 168.72,1564.6,12.8
133 | 24.655,1432.9,12.8
134 | 107.68,348.69,12.8
135 | 177.4,543.05,12.8
136 | 42.902,717.32,12.8
137 | 185.08,1303,12.8
138 | 306.3,915.75,12.8
139 | 428.02,1286.1,12.8
140 | 283.61,1690.1,12.8
141 | 246.38,735.13,12.8
142 | 304.34,1692.2,12.8
143 | 88.965,1566.5,12.8
144 | 852.93,1248.8,12.8
145 | 1326.7,843.22,12.8
146 | 1487.1,933.72,12.8
147 | 357.75,1193,12.8
148 | 496.41,837.23,12.8
149 | 1392.9,573.3,12.8
150 | 326.3,1072.3,12.8
151 | 210.06,868.71,12.8
152 | 372.6,948.49,12.8
153 | 382.87,153.12,12.8
154 | 345.67,1405.2,12.816
155 | 1661.7,1239.3,13.106
156 | 413.88,43.788,13.207
157 | 1610.3,1442.4,13.255
158 | 127.02,1429.4,13.255
159 | 98.657,973.65,13.28
160 | 821.94,14.288,13.303
161 | 221.96,405.77,13.399
162 | 1579.3,824.58,13.436
163 | 1519,1621.7,13.541
164 | 525.88,859.88,13.6
165 | 452.47,329.47,13.634
166 | 181.9,1283.6,13.634
167 | 1682.9,36.118,13.678
168 | 1582.3,1279.4,13.849
169 | 38.371,572.54,13.866
170 | 38.461,1142.8,13.912
171 | 1093.4,648.37,13.92
172 | 231.96,553.09,14.003
173 | 98.91,641.03,14.048
174 | 498.72,169.18,14.08
175 | 111.36,1132.9,14.139
176 | 422.34,68.278,14.184
177 | 1614.4,1063.7,14.373
178 | 133.34,558.83,14.4
179 | 169.26,1398.6,14.4
180 | 71.281,1136.5,14.4
181 | 40.482,588.28,14.4
182 | 97.993,1611.7,14.4
183 | 19.823,530.88,14.4
184 | 1653.3,1503,14.4
185 | 523.28,641.48,14.4
186 | 112.45,1357.4,14.4
187 | 492.8,922.55,14.4
188 | 106.23,704.31,14.4
189 | 1662.9,1586.3,14.4
190 | 1174.8,1131.3,14.4
191 | 21.497,1129,14.4
192 | 251.96,293.3,14.4
193 | 391.79,1468.2,14.4
194 | 1014.4,924.31,14.4
195 | 1411,849.64,14.4
196 | 77.575,192.73,14.4
197 | 237.7,112.83,14.4
198 | 183.09,1319.3,14.494
199 | 392.7,519.13,14.538
200 | 412.83,1395.8,14.72
201 | 208.01,1590.4,14.842
202 | 370.81,433.52,14.842
203 | 298.91,1358.5,14.927
204 | 20.734,114.9,15.012
205 | 526.99,657.75,15.054
206 | 1274.9,1211.3,15.054
207 | 344,555.56,15.2
208 | 1102.7,777.37,15.2
209 | 1093.4,605.84,15.2
210 | 1518.6,1332.9,15.418
211 | 1583.2,172.57,15.43
212 | 370.31,863.38,15.513
213 | 1363.8,594.72,15.554
214 | 23.382,1396.7,15.595
215 | 329.9,1297.4,15.635
216 | 510.32,964.78,15.958
217 | 270.01,1037.8,15.998
218 | 43.667,800.54,16
219 | 13.651,426.19,16
220 | 35.869,728.22,16
221 | 22.952,696.32,16
222 | 315.64,1037,16
223 | 1161.2,1169.8,16
224 | 622.48,131.65,16
225 | 1326.7,1357.7,16
226 | 1301.7,828.02,16
227 | 475.96,49.453,16
228 | 236.69,1492,16
229 | 259.18,1490.4,16
230 | 385.13,654.1,16
231 | 325.43,434.94,16
232 | 631.15,1247.5,16
233 | 1201.2,21.085,16
234 | 181.81,348.6,16
235 | 471.55,865.44,16
236 | 481.96,789.74,16
237 | 419.41,1403.9,16
238 | 459.12,209.24,16
239 | 389.46,10.421,16
240 | 28,499.3,16.037
241 | 141.62,1356,16.116
242 | 256.21,1649.4,16.156
243 | 338.92,870.03,16.391
244 | 190.51,361.08,16.545
245 | 190.06,717.68,16.622
246 | 1617.6,157.52,16.646
247 | 64.152,1658,16.888
248 | 243.31,866.93,17.113
249 | 301.26,72.626,17.298
250 | 437.35,500.65,17.334
251 | 468.51,732.84,17.371
252 | 13.475,646.66,17.6
253 | 1292.4,1535.4,17.6
254 | 136.73,1233.4,17.6
255 | 308.51,1029,17.6
256 | 1695,1654.2,17.6
257 | 1115.1,1267.7,17.626
258 | 310.19,15.448,17.77
259 | 1104,161.31,18.16
260 | 241.92,327.23,18.369
261 | 1649.3,110.54,18.678
262 | 155.27,1205,18.678
263 | 77.502,410.33,18.712
264 | 380.33,751.47,18.72
265 | 8.0318,829.88,18.982
266 | 296.2,1498.2,18.982
267 | 160.78,1466.6,19.182
268 | 43.921,112.68,19.2
269 | 38.837,1367.9,19.2
270 | 162.25,49.647,19.2
271 | 173.07,194.64,19.2
272 | 9.7042,786.36,19.2
273 | 439.94,147.84,19.2
274 | 1132.6,846.96,19.2
275 | 278.77,1492.7,19.2
276 | 1427.5,867.49,19.2
277 | 30.943,1334.3,19.2
278 | 484.62,972.59,19.2
279 | 458.26,47.156,19.2
280 | 356.16,1057.6,19.2
281 | 444.13,700.72,19.2
282 | 112.61,1521.3,19.2
283 | 1016.6,420.94,19.2
284 | 557.38,670.29,19.2
285 | 1204.5,40.688,19.2
286 | 1482.1,1426.6,19.2
287 | 1521.4,1460.2,19.2
288 | 1348,678.05,19.2
289 | 248.93,687.83,19.216
290 | 60.08,1026.6,19.577
291 | 320.7,1648.8,19.739
292 | 1053.9,608.76,19.835
293 | 1234.4,1325.9,19.867
294 | 251.09,806.5,20.09
295 | 1654.5,667.53,20.312
296 | 259.69,654.35,20.776
297 | 885.97,1463.4,20.8
298 | 248.85,553.93,20.8
299 | 48.114,1304.9,20.8
300 | 971.46,1364.9,20.8
301 | 132.59,1269,20.8
302 | 1264.6,653.4,20.8
303 | 84.961,254.75,20.8
304 | 1012.8,357.06,20.8
305 | 640.15,146.2,20.8
306 | 376.13,1604,20.8
307 | 1072.2,725.99,20.8
308 | 1067.3,367.24,20.8
309 | 1090,1155.1,21.08
310 | 29.343,171.73,21.2
311 | 315.72,810.42,21.409
312 | 400.43,1629.4,21.439
313 | 123.32,995.89,21.528
314 | 181.8,745.04,21.617
315 | 745.64,82.447,21.675
316 | 158.16,416.3,21.763
317 | 216.27,66.798,21.763
318 | 9.3387,665.78,21.851
319 | 26.233,17.549,22.284
320 | 1158.2,920.64,22.398
321 | 34.415,857.59,22.4
322 | 196.43,94.449,22.4
323 | 112.22,140.02,22.4
324 | 58.932,150.84,22.4
325 | 299.87,652.52,22.4
326 | 1290.8,1247.2,22.4
327 | 76.958,1558,22.4
328 | 1156.3,996.91,22.4
329 | 617.1,664.22,22.4
330 | 556.17,723.15,22.4
331 | 197.95,1422.3,22.4
332 | 1346.5,1079.5,22.4
333 | 1695.8,1580.3,22.4
334 | 1537.5,112.4,22.4
335 | 191.11,419.71,22.454
336 | 101.4,1080.8,22.454
337 | 420.47,99.936,22.82
338 | 439.01,738.43,22.876
339 | 89.222,124.29,22.959
340 | 61.983,976.35,23.042
341 | 1528.7,637.77,23.294
342 | 491.69,776.55,23.68
343 | 376.55,1126.4,23.776
344 | 260.6,1423.8,24
345 | 1415,452.98,24
346 | 163.55,872.13,24.358
347 | 1313.1,494.3,24.515
348 | 116.27,880.2,24.515
349 | 593.45,1619.6,24.747
350 | 31.407,1676.1,24.747
351 | 1294.7,917.74,25.003
352 | 68.293,869.86,25.181
353 | 301.53,260.36,25.357
354 | 1546.3,724.27,25.588
355 | 349.57,72.504,25.6
356 | 398.46,452.26,25.6
357 | 563.92,750.48,25.6
358 | 312.22,1589.9,25.6
359 | 465.36,883.71,25.6
360 | 1612,1367.5,25.6
361 | 80.48,543.93,25.6
362 | 112.24,264.87,25.6
363 | 378.6,766.18,26.4
364 | 1033.8,540.45,26.56
365 | 165.01,154.7,26.631
366 | 342.44,1685.8,26.963
367 | 1290.2,1351.2,27.034
368 | 1584.5,1442.3,27.2
369 | 1628.3,1266.5,27.593
370 | 74.173,940.17,27.686
371 | 1010.9,1121.7,27.709
372 | 1408.2,754.26,28
373 | 393.72,266.06,28.051
374 | 1437.7,715.64,28.3
375 | 1490.2,1414.5,28.8
376 | 651.38,727.24,28.989
377 | 260.55,981.04,30.923
378 | 47.559,31.524,32
379 | 1526.6,1029.3,32
380 | 69.066,212.13,32
381 | 382.39,72.066,32
382 | 1208.3,905.94,32
383 | 283.54,1183.2,32
384 | 891.48,1022.4,32
385 | 975.02,891.39,32
386 | 1307.8,20.129,32
387 | 887.03,1203,32
388 | 1236.4,1667.6,32.742
389 | 606.6,99.408,33.6
390 | 39.586,210.54,33.833
391 | 1676.6,118.58,34.503
392 | 1396,770.5,34.88
393 | 1405.6,983.5,35.2
394 | 291.96,759.81,35.2
395 | 199.84,505.68,35.2
396 | 1691.5,1084.9,37.651
397 | 38.361,428.25,37.78
398 | 1388.3,1639.7,38.032
399 | 1652.5,147.16,38.098
400 | 23.126,300.91,41.6
401 | 116.38,779.43,41.6
402 | 1152.3,1075.4,41.6
403 | 69.833,206.09,43.963
404 | 264.69,1289.4,46.4
405 | 319.41,405.58,49.482
406 | 1234.8,174.2,56
407 | 189.26,667.69,64
408 | 119.68,201.61,69.92
409 | 363.88,322.7,78.136
410 | 1427.8,1323.3,78.501
411 |
--------------------------------------------------------------------------------
/pycda/sample_imgs/mercury.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/mercury.png
--------------------------------------------------------------------------------
/pycda/sample_imgs/rgb_sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/rgb_sample.jpg
--------------------------------------------------------------------------------
/pycda/sample_imgs/selection0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/selection0.png
--------------------------------------------------------------------------------
/pycda/sample_imgs/selection2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/selection2.png
--------------------------------------------------------------------------------
/pycda/sample_imgs/selection3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/selection3.png
--------------------------------------------------------------------------------
/pycda/sample_imgs/selection4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/selection4.png
--------------------------------------------------------------------------------
/pycda/sample_imgs/selection5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AlliedToasters/PyCDA/4b1a361f9171b91f3c42cafd88ba5380f25e31e0/pycda/sample_imgs/selection5.png
--------------------------------------------------------------------------------
/pycda/util_functions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import PIL.Image as Image
3 | from skimage import color
4 | import time, sys
5 |
6 | def get_steps(length, input_dimension, output_dimension):
7 | """Calculates each step along a dimension of the tile.
8 | length is the total length of the dimension, model resolution
9 | is the detector's expected input length, and padding is the model's
10 | required padding (zero if none.)
11 | """
12 | steps = []
13 | padding = (input_dimension - output_dimension)//2
14 | remainder = (input_dimension - output_dimension)%2
15 | step_size = output_dimension - remainder
16 | current_step = 0-padding
17 | steps.append(current_step)
18 | current_step += step_size
19 | #Iterate until final prediction "falls off" edge of input image
20 | while (steps[-1]+(2*step_size+padding)) < length:
21 | steps.append(current_step)
22 | current_step += step_size
23 | #Situation with no overlap on final tile or small length;
24 | if current_step+step_size+padding == length or length <= output_dimension:
25 | return steps, [step+padding-remainder for step in steps]
26 | else:
27 | final_step = length - step_size - padding - remainder
28 | steps.append(final_step)
29 | return steps, [step+padding for step in steps]
30 |
31 | def crop_array(input_array, ylength, xlength=None, orgn=(0,0)):
32 | """Crops an image in numpy array format. Pads crops outside
33 | of input image with zeros if necessary. If no y dimension
34 | is specified, outputs a square image.
35 | """
36 | if xlength == None:
37 | xlength = ylength
38 | ylength = int(ylength)
39 | xlength = int(xlength)
40 | orgn = (int(orgn[0]), int(orgn[1]))
41 | target = np.zeros((ylength, xlength))
42 | #slice ranges
43 | ymin = max(orgn[0], 0)
44 | xmin = max(orgn[1], 0)
45 | ymax = min(orgn[0] + ylength, input_array.shape[0])
46 | xmax = min(orgn[1] + xlength, input_array.shape[1])
47 | yslice = slice(ymin, ymax)
48 | xslice = slice(xmin, xmax)
49 | #top, left, bottom, right pads
50 | tp = max(-orgn[0], 0)
51 | lp = max(-orgn[1], 0)
52 | bp = max((ylength + orgn[0] - tp - input_array.shape[0]), 0)
53 | rp = max((xlength + orgn[1] - lp - input_array.shape[1]), 0)
54 | #insert slice into the right spot.
55 | target[tp:(ylength-bp),lp:(xlength-rp)] = input_array[yslice, xslice]
56 | return target
57 |
58 | def make_batch(image, crop_dims, crops, out_dims=None):
59 | """Assembles a batch for model."""
60 | if not isinstance(crop_dims, list):
61 | crop_dims = [crop_dims for x in range(len(crops))]
62 | batch = []
63 | for i, crop_coords in enumerate(crops):
64 | next_image = crop_array(image, crop_dims[i][0], crop_dims[i][1], crop_coords)
65 | if out_dims != None:
66 | if next_image.shape != out_dims:
67 | resized = Image.fromarray(next_image).resize((out_dims[1], out_dims[0]))
68 | next_image = np.array(resized)
69 | if len(next_image.shape) == 2:
70 | #add color channel to greyscale image
71 | next_image = np.expand_dims(next_image, axis=-1)
72 | if next_image.dtype == np.dtype('uint8'):
73 | #Rescale pixel values
74 | next_image = next_image/255
75 | batch.append(next_image)
76 | batch = np.array(batch)
77 | return batch
78 |
79 | def get_crop_specs(proposal, classifier):
80 | """Converts a crater proposal into cropping function
81 | arguments.
82 | """
83 | lat = proposal[0]
84 | long = proposal[1]
85 | px = classifier.crater_pixels
86 | dim = classifier.input_dims
87 | #"Radius" of image
88 | r_im = proposal[2]*min(dim)/(2*px)
89 | #get four parameters of image box
90 | upper = lat - r_im
91 | left = long - r_im
92 | return (round(upper), round(left)), (round(2 * r_im), round(2 * r_im))
93 |
94 | def resolve_color_channels(prediction, model):
95 | """Converts an image to the desired number of color
96 | channels. Returns converted image.
97 | """
98 | image = prediction.input_image.copy()
99 | desired = model.input_channels
100 | if len(image.shape) == 2:
101 | image_channels = 1
102 | else:
103 | image_channels = image.shape[2]
104 | if image_channels == desired:
105 | return image
106 | elif image_channels == 3 and desired == 1:
107 | return color.rgb2grey(image)
108 | elif image_channels == 1 and desired > 1:
109 | print('Working on feature to convert greyscale to RGB. '
110 | 'Try using a greyscale detector.')
111 | raise Exception('The color channels of the input image are '
112 | 'not compatible with this model.'
113 | 'look for a model with the proper number of '
114 | 'color channels for your image.')
115 | return image
116 |
117 | def remove_ticks(ax_obj):
118 | """takes an ax object from matplotlib and removes ticks."""
119 | ax_obj.tick_params(
120 | axis='both',
121 | which='both',
122 | bottom=False,
123 | top=False,
124 | labelbottom=False,
125 | right=False,
126 | left=False,
127 | labelleft=False
128 | )
129 | return ax_obj
130 |
131 |
132 | def update_progress(progress):
133 | """Displays or updates a console progress bar
134 | Accepts a float between 0 and 1. Any int will be converted to a float.
135 | A value under 0 represents a 'halt'.
136 | A value at 1 or bigger represents 100%
137 | """
138 | barLength = 25 # Modify this to change the length of the progress bar
139 | status = ""
140 | if isinstance(progress, int):
141 | progress = float(progress)
142 | if not isinstance(progress, float):
143 | progress = 0
144 | status = "error: progress var must be float\r\n"
145 | if progress < 0:
146 | progress = 0
147 | status = "Halt...\r\n"
148 | if progress >= 1:
149 | progress = 1
150 | status = "Done...\r\n"
151 | block = int(round(barLength*progress))
152 | text = "\rProgress: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), round(progress*100), status)
153 | sys.stdout.write(text)
154 | sys.stdout.flush()
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # PyCDA: Simple Crater Detection
2 | Go from image to crater annotations in minutes.
3 |
4 | PyCDA is a crater detection algorithm (CDA) written in Python.
5 |
6 | Inspired by research in applying convolutional neural networks to crater detection (Benedix et al.) and crater candidate classification (Cohen et al.), PyCDA is aimed at making CDA research modular and usable.
7 | The current release, pre-alpha "fun" 0.1.14, is a conceptual demonstration; its general performance on some datasets is too poor for use; however, it will yield crater detections.
8 |
9 | ## Getting Started
10 |
11 | At its most basic level, PyCDA is built to be easy to use , and that should start with installation; pre-alpha "fun" version 0.1.14 is now available via PyPI with:
12 |
13 | ```
14 | pip install pycda
15 | ```
16 |
17 | ### Prerequisites
18 |
19 | PyCDA currently supports Python 3.6; we recommend using a virtual environment or environment manager such as conda , as PyCDA has not been tested on previous versions of its dependencies.
20 |
21 | ### Installing
22 |
23 | PyCDA's current release, "fun" 0.1.14, is a prototype pre-release. However, it is available for download via PyPi for the adventurous.
24 | From your python 3.6 environment, install with pip via the command line:
25 |
26 | ```
27 | pip install pycda
28 | ```
29 |
30 | ### Using PyCDA
31 |
32 | For a quick prediction "out of the box," use the commands:
33 |
34 | ```
35 | from pycda import CDA, load_image
36 |
37 | cda = CDA()
38 | image = load_image('my_image_filepath.png')
39 | detections = cda.predict(image)
40 | ```
41 |
42 | The output of the call to .predict is a pandas dataframe, with columns 'lat' (crater location from top of image), 'long' (crater location from left edge of image), and diameter' (crater diameter in pixels).
43 |
44 | PyCDA currently handles image using PIL; image files from disc must therefore be in the formats that PIL supports. Numpy arrays of raster images are also supported; pass them in as you would an image object.
45 |
46 | PyCDA provides visualization and error analysis tools as well; check out the demo notebook for a peek at these features!
47 |
48 | Documentation on the entire project is available here .
49 |
50 | ## Running the tests
51 |
52 | Test your installation with test.py, available from this repo. With wget:
53 |
54 | ```
55 | wget https://raw.githubusercontent.com/AlliedToasters/PyCDA/master/test.py
56 | ```
57 |
58 | Then, run
59 |
60 | ```
61 | python test.py
62 | ```
63 |
64 |
65 | ## Versioning
66 |
67 | PyCDA follows something like [SemVer](http://semver.org/) guidelines, the current release is "fun" 0.1.14 and is still in early development. I fixed the data file loading issues that came with 'super top secret pre-alpha release 0.1.1', and we finally have something that does something "out of the box."
68 |
69 | ## Authors
70 |
71 | * **Michael Klear** - *Initial work* - [AlliedToasters](https://github.com/AlliedToasters)
72 |
73 | ## Contributing
74 |
75 | PyCDA is a community project and we welcome anybody in the CDA research community, planetary scientists, or Python developers to the fold. Please reach out to Michael Klear at:
76 |
77 | michael.klear@colorado.edu
78 |
79 | -or-
80 |
81 | michael.r.klear@gmail.com
82 |
83 | to contribute!
84 |
85 |
86 | ## License
87 |
88 | This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details
89 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pandas==0.22.0
2 | numpy==1.14.1
3 | scikit-image==0.13.1
4 | scikit-learn==0.19.1
5 | h5py==2.7.1
6 | -e git+https://github.com/AlliedToasters/PyCDA.git@2a5c85c4563cac3dfe0bd05c4b4866f3f7978e87#egg=pycda
7 | tensorflow==1.12.1
8 | Keras==2.1.5
9 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = readme.md
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name='pycda',
5 | version='0.1.16',
6 | description='Python Crater Detection Algorithm (PyCDA) is a pipeline for crater detection; go from image to annotated crater stats in minutes.',
7 | url='https://github.com/AlliedToasters/PyCDA',
8 | download_url='https://github.com/AlliedToasters/PyCDA/archive/0.1.16.tar.gz',
9 | keywords = ['crater detection', 'astronomy', 'planetary science', 'planetary geology'],
10 | author='Michael Klear',
11 | author_email='michael.klear@colorado.edu',
12 | license='MIT',
13 | packages=find_packages(),
14 | classifiers=[],
15 | data_files = [
16 | 'pycda/models/tinynet.h5',
17 | 'pycda/models/unet.h5',
18 | 'pycda/models/classifier_12x12_2.h5',
19 | 'pycda/sample_imgs/holdout_tile_labels.csv',
20 | 'pycda/sample_imgs/holdout_tile.pgm',
21 | 'pycda/sample_imgs/rgb_sample.jpg',
22 | 'pycda/sample_imgs/mercury.png',
23 | 'pycda/sample_imgs/selection0.png',
24 | 'pycda/sample_imgs/selection2.png',
25 | 'pycda/sample_imgs/selection3.png',
26 | 'pycda/sample_imgs/selection4.png',
27 | 'pycda/sample_imgs/selection5.png'
28 | ],
29 | include_package_data=True,
30 | install_requires=[
31 | 'numpy',
32 | 'pandas',
33 | 'scikit-image',
34 | 'scikit-learn',
35 | 'h5py',
36 | 'tensorflow',
37 | 'Keras'
38 | ]
39 | )
40 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import pycda
3 | from pycda import error_stats as es
4 | from pycda import predictions as pr
5 | from pycda.detectors import _DummyDetector
6 | from pycda.extractors import _DummyExtractor
7 | from pycda.classifiers import _DummyClassifier, ConvolutionalClassifier
8 | from pycda.sample_data import get_sample_image, get_sample_csv
9 | from pycda.util_functions import get_steps, crop_array, make_batch
10 | import numpy as np
11 | import pandas as pd
12 | import matplotlib.pyplot as plt
13 |
14 | class TestUtilFuncs(unittest.TestCase):
15 |
16 | def setUp(self):
17 | return
18 |
19 | def test_get_steps(self):
20 | steps_in, steps_out = get_steps(100, 160, 120)
21 | assert len(steps_in) == len(steps_out)
22 | assert len(steps_in) == 1
23 | steps_in, steps_out = get_steps(130, 160, 120)
24 | assert len(steps_in) == len(steps_out)
25 | assert len(steps_in) == 2
26 | steps_in, steps_out = get_steps(241, 160, 120)
27 | assert len(steps_in) == len(steps_out)
28 | assert len(steps_in) == 3
29 |
30 | def test_crop_array(self):
31 | test_image = np.random.rand(150, 200)
32 | test1 = crop_array(test_image, 100)
33 | assert np.array_equal(test1, test_image[:100, :100])
34 | test2 = crop_array(test_image, 100, orgn=(-26, -10))
35 | assert np.array_equal(test2[:26, :10], np.zeros((26, 10)))
36 | assert np.array_equal(test2[26:, 10:], test_image[:74, :90])
37 | test3 = crop_array(test_image, 100, orgn=(100, 100))
38 | assert np.array_equal(test3[50:, :], np.zeros((50, 100)))
39 | assert np.array_equal(test3[:50, :], test_image[100:, 100:])
40 | test4 = crop_array(test_image, 100, 20, orgn=(140, 190))
41 | assert np.array_equal(test4[10:, 10:], np.zeros((90, 10)))
42 | assert np.array_equal(test4[:10, :10], test_image[140:, 190:])
43 |
44 | def test_make_batch(self):
45 | img_height = np.random.randint(200, 1000)
46 | img_width = np.random.randint(200, 1000)
47 | test_image = np.random.rand(img_height, img_width)
48 | shape = test_image.shape
49 | height, width = shape[0], shape[1]
50 | in1 = np.random.randint(2, height)
51 | in2 = np.random.randint(2, width)
52 | crops = [
53 | (0, 0),
54 | (1, 1),
55 | (2, 2)
56 | ]
57 | try:
58 | batch = make_batch(test_image, (in1, in2), crops)
59 | except:
60 | print('problem batch out dimensions: ', in1, in2)
61 | raise Exception('Problem building batch.')
62 | assert batch.shape == (3, in1, in2, 1)
63 |
64 | class TestImageFlow(unittest.TestCase):
65 |
66 | def setUp(self):
67 | print('\n')
68 | self.cda = pycda.CDA(
69 | detector=_DummyDetector(),
70 | extractor=_DummyExtractor(),
71 | classifier=_DummyClassifier()
72 | )
73 | img_height = np.random.randint(200, 1000)
74 | img_width = np.random.randint(200, 1000)
75 | self.test_image = np.random.rand(img_height, img_width)
76 | self.prediction = pr.Prediction(self.test_image, 'test1', self.cda)
77 | self.cda.predictions.append(self.prediction)
78 |
79 | def test_get_prediction(self):
80 | prediction = self.cda._get_prediction(self.test_image)
81 | assert prediction == self.prediction
82 | img_height = np.random.randint(2, 1000)
83 | img_width = np.random.randint(2, 1000)
84 | new_test_image = np.random.rand(img_height, img_width)
85 | new_prediction = self.cda._get_prediction(new_test_image)
86 | assert new_prediction != self.prediction
87 |
88 | def test_split_image(self):
89 | self.prediction = self.cda._prepare_detector(self.prediction)
90 | try:
91 | assert len(self.prediction.image_split_coords) > 0
92 | assert len(self.prediction.det_split_coords) > 0
93 | if self.test_image.shape[0] > self.cda.detector.output_dims[0]:
94 | assert self.prediction.det_split_coords[-1][0] + self.cda.detector.output_dims[0] \
95 | == self.test_image.shape[0]
96 | else:
97 | assert self.prediction.det_split_coords[-1][0] == 0
98 | if self.test_image.shape[1] > self.cda.detector.output_dims[1]:
99 | assert self.prediction.det_split_coords[-1][1] + self.cda.detector.output_dims[1] \
100 | == self.test_image.shape[1]
101 | else:
102 | assert self.prediction.det_split_coords[-1][1] == 0
103 | except AssertionError:
104 | print('input img dims: ', self.test_image.shape)
105 | raise AssertionError()
106 |
107 | def test_batch_detect(self):
108 | batch_size = np.random.randint(1, 5)
109 | prediction = self.cda.predictions[0]
110 | prediction = self.cda._prepare_detector(prediction)
111 | self.cda._batch_detect(prediction, batch_size)
112 | plt.imshow(self.test_image)
113 | assert self.test_image.shape == prediction.detection_map.shape
114 | assert np.array_equal(self.test_image, prediction.detection_map)
115 |
116 | def test_batch_classify(self):
117 | batch_size = np.random.randint(1, 100)
118 | prediction = self.cda.predictions[0]
119 | prediction.proposals = get_sample_csv()
120 | prediction.input_image = np.array(get_sample_image().image)
121 | self.cda._batch_classify(prediction)
122 |
123 | class TestDetector(unittest.TestCase):
124 |
125 | def setUp(self):
126 | in0 = np.random.randint(150, 250)
127 | in1 = np.random.randint(150, 250)
128 | out0 = np.random.randint(50, 150)
129 | out1 = np.random.randint(50, 150)
130 | self.detector = _DummyDetector(input_dims=(in0, in1), output_dims=(out0, out1))
131 |
132 | def test_dummy_detector(self):
133 | test_img = np.random.rand(self.detector.input_dims[0], self.detector.input_dims[1])
134 | batch = np.array([np.expand_dims(test_img, axis=-1)])
135 | prediction = self.detector.predict(batch)
136 | offsety = (self.detector.input_dims[0] - self.detector.output_dims[0])//2
137 | offsetx = (self.detector.input_dims[1] - self.detector.output_dims[1])//2
138 | yfin = offsety+self.detector.output_dims[0]
139 | xfin = offsetx+self.detector.output_dims[1]
140 | assert np.array_equal(prediction[0, :, :, 0], test_img[offsety:yfin, offsetx:xfin])
141 |
142 | class TestPrediction(unittest.TestCase):
143 |
144 | def setUp(self):
145 | self.cda = pycda.CDA(
146 | detector=_DummyDetector(),
147 | extractor=_DummyExtractor(),
148 | classifier=_DummyClassifier()
149 | )
150 | img_height = np.random.randint(500, 1500)
151 | img_width = np.random.randint(500, 1500)
152 | self.test_image = np.random.rand(img_height, img_width)
153 | self.prediction = pr.Prediction(self.test_image, 'test1', self.cda)
154 | self.cda.predictions.append(self.prediction)
155 |
156 | def test_record_detection(self):
157 | assert self.prediction.detection_map.shape == self.test_image.shape
158 | ins_y = np.random.randint(5, self.test_image.shape[0])
159 | ins_x = np.random.randint(5, self.test_image.shape[1])
160 | self.prediction.det_split_coords.append((ins_y, ins_x))
161 | try:
162 | det_y = np.random.randint(5, self.test_image.shape[0]-ins_y)
163 | det_x = np.random.randint(5, self.test_image.shape[1]-ins_x)
164 | except ValueError:
165 | det_y, det_x = 5, 5
166 | detection = np.random.rand(det_y, det_x)
167 | self.prediction._record_detection(detection, 0)
168 | pred_map_slice = self.prediction.detection_map[ins_y:ins_y+det_y, ins_x:ins_x+det_x]
169 | assert np.array_equal(detection, pred_map_slice)
170 |
171 | def test_batch_record_detection(self):
172 | assert self.prediction.detection_map.shape == self.test_image.shape
173 | batch = []
174 | batch_size = np.random.randint(2, 10)
175 | pred_map_slices = []
176 | indices = []
177 | det_y = None
178 | det_x = None
179 | for n in range(batch_size):
180 | indices.append(n)
181 | if det_y == None:
182 | ins_y = np.random.randint(5, self.test_image.shape[0])
183 | ins_x = np.random.randint(5, self.test_image.shape[1])
184 | else:
185 | ins_y = np.random.randint(5, self.test_image.shape[0]-det_y)
186 | ins_x = np.random.randint(5, self.test_image.shape[1]-det_x)
187 | self.prediction.det_split_coords.append((ins_y, ins_x))
188 | if det_y == None:
189 | try:
190 | det_y = np.random.randint(5, self.test_image.shape[0]-ins_y)
191 | det_x = np.random.randint(5, self.test_image.shape[1]-ins_x)
192 | except ValueError:
193 | det_y = self.test_image.shape[0]-ins_y
194 | det_c = self.test_image.shape[1]-ins_x
195 | detection = np.expand_dims(np.random.rand(det_y, det_x), axis=-1)
196 | batch.append(detection)
197 | batch = np.array(batch)
198 | try:
199 | self.prediction._batch_record_detection(batch, indices)
200 | except:
201 | raise Exception('Error calling ._batch_record_detection')
202 |
203 | if __name__ in "__main__":
204 | unittest.main()
205 |
--------------------------------------------------------------------------------