├── .gitattributes ├── .gitignore ├── .gitmodules ├── .readthedocs.yml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── JOSS_figure_workflow.png ├── LICENSE ├── README.md ├── corebreakout ├── __init__.py ├── column.py ├── datasets │ ├── __init__.py │ └── polygondataset.py ├── defaults.py ├── segmenter.py ├── utils.py └── viz.py ├── docs ├── Makefile ├── _build │ ├── doctrees │ │ ├── creating_datasets.doctree │ │ ├── environment.pickle │ │ ├── index.doctree │ │ ├── layout_parameters.doctree │ │ ├── model_building.doctree │ │ ├── scripts_reference.doctree │ │ └── source │ │ │ ├── corebreakout.datasets.doctree │ │ │ ├── corebreakout.doctree │ │ │ └── modules.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _images │ │ ├── JOSS_figure_workflow.png │ │ ├── endpts_auto.png │ │ ├── endpts_auto_all.png │ │ ├── endpts_explicit.png │ │ ├── endpts_tray.png │ │ └── labelme1.png │ │ ├── _sources │ │ ├── creating_datasets.rst.txt │ │ ├── index.rst.txt │ │ ├── layout_parameters.rst.txt │ │ ├── model_building.rst.txt │ │ ├── scripts_reference.rst.txt │ │ └── source │ │ │ ├── corebreakout.datasets.rst.txt │ │ │ ├── corebreakout.rst.txt │ │ │ └── modules.rst.txt │ │ ├── _static │ │ ├── basic.css │ │ ├── classic.css │ │ ├── css │ │ │ ├── badge_only.css │ │ │ └── theme.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── file.png │ │ ├── fonts │ │ │ ├── Inconsolata-Bold.ttf │ │ │ ├── Inconsolata-Regular.ttf │ │ │ ├── Inconsolata.ttf │ │ │ ├── Lato-Bold.ttf │ │ │ ├── Lato-Regular.ttf │ │ │ ├── Lato │ │ │ │ ├── lato-bold.eot │ │ │ │ ├── lato-bold.ttf │ │ │ │ ├── lato-bold.woff │ │ │ │ ├── lato-bold.woff2 │ │ │ │ ├── lato-bolditalic.eot │ │ │ │ ├── lato-bolditalic.ttf │ │ │ │ ├── lato-bolditalic.woff │ │ │ │ ├── lato-bolditalic.woff2 │ │ │ │ ├── lato-italic.eot │ │ │ │ ├── lato-italic.ttf │ │ │ │ ├── lato-italic.woff │ │ │ │ ├── lato-italic.woff2 │ │ │ │ ├── lato-regular.eot │ │ │ │ ├── lato-regular.ttf │ │ │ │ ├── lato-regular.woff │ │ │ │ └── lato-regular.woff2 │ │ │ ├── RobotoSlab-Bold.ttf │ │ │ ├── RobotoSlab-Regular.ttf │ │ │ ├── RobotoSlab │ │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ │ └── roboto-slab-v7-regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ └── fontawesome-webfont.woff2 │ │ ├── jquery-3.4.1.js │ │ ├── jquery.js │ │ ├── js │ │ │ ├── modernizr.min.js │ │ │ └── theme.js │ │ ├── language_data.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── sidebar.js │ │ ├── underscore-1.3.1.js │ │ └── underscore.js │ │ ├── creating_datasets.html │ │ ├── genindex.html │ │ ├── index.html │ │ ├── layout_parameters.html │ │ ├── model_building.html │ │ ├── objects.inv │ │ ├── py-modindex.html │ │ ├── scripts_reference.html │ │ ├── search.html │ │ ├── searchindex.js │ │ └── source │ │ ├── corebreakout.datasets.html │ │ ├── corebreakout.html │ │ └── modules.html ├── conf.py ├── creating_datasets.rst ├── images │ ├── JOSS_figure_data.png │ ├── JOSS_figure_workflow.png │ ├── endpts_auto.png │ ├── endpts_auto_all.png │ ├── endpts_explicit.png │ ├── endpts_tray.png │ ├── example_display_instances.png │ └── labelme1.png ├── index.rst ├── layout_parameters.rst ├── make.bat ├── model_building.rst ├── rtd_requirements.txt ├── scripts_reference.rst └── source │ ├── corebreakout.datasets.rst │ ├── corebreakout.rst │ └── modules.rst ├── notebooks ├── column_demo.ipynb ├── inspect_dataset.ipynb ├── segmenter_demo.ipynb ├── select_model.ipynb └── train_mrcnn_model.ipynb ├── paper.bib ├── paper.md ├── requirements.txt ├── scripts ├── get_ocr_depths.py ├── post_labeling_tools │ ├── README.md │ ├── join_xml_labels.py │ ├── picks_table_to_row_labels.py │ ├── split_npy_image.py │ └── stack_partial_xml.ipynb ├── process_directory.py ├── prune_imageData.py └── train_mrcnn_model.py ├── setup.py └── tests ├── __init__.py ├── data ├── column1.jpeg ├── column2.jpeg ├── column3.jpeg ├── example_labels.npy ├── example_masks.npy └── two_image_dataset │ ├── S00101409.jpeg │ ├── S00101409.json │ ├── S00111582.jpeg │ ├── S00111582.json │ └── dummy_depths.csv ├── notebooks ├── make_endpts_data.ipynb ├── test_inference.ipynb └── test_plotting.ipynb ├── test_column.py ├── test_dataset.py ├── test_segmenter.py └── test_utils.py /.gitattributes: -------------------------------------------------------------------------------- 1 | *.ipynb linguist-language=Python 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *ipynb_checkpoints* 2 | *__pycache__* 3 | *pytest_cache* 4 | 5 | # exclude assets 6 | assets/ 7 | 8 | *.h5 9 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "Mask_RCNN"] 2 | path = Mask_RCNN 3 | url = https://github.com/matterport/Mask_RCNN 4 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/conf.py 11 | 12 | # Build documentation with MkDocs 13 | #mkdocs: 14 | # configuration: mkdocs.yml 15 | 16 | # Optionally build your docs in additional formats such as PDF and ePub 17 | formats: all 18 | 19 | submodules: 20 | include: all 21 | 22 | # Optionally set the version of Python and requirements required to build your docs 23 | python: 24 | version: 3.6 25 | install: 26 | - requirements: docs/rtd_requirements.txt 27 | - method: pip 28 | path: Mask_RCNN 29 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 7 | 8 | ## 0.4 [Unreleased] 9 | 10 | ### To-Do 11 | 12 | - Refactor `PolygonDataset` to use `labelme`'s `imageData` field rather than require jpegs 13 | 14 | ## 0.3 15 | 16 | ### Changed 17 | 18 | - Increased flexibility of included scripts 19 | - Converted `docs/*.md` files to `*.rst` 20 | - Minor paper and bibliography corrections/fixes 21 | 22 | ### Added 23 | 24 | - Built `sphinx`-generated html docs, hosted on `readthedocs` 25 | - Implemented `CoreColumn.iter_chunks()` 26 | - Added demo/inspection notebooks: 27 | - `column_demo.ipynb` 28 | - `segmenter_demo.ipynb` 29 | - `inspect_dataset.ipynb` 30 | - Added explanatory Markdown to `select_model.ipynb` 31 | - Added restriction in `setup.py` to require `tensorflow` before install 32 | 33 | ## 0.2 34 | 35 | ### Changed 36 | 37 | - Modified code format using `black` 38 | - `README.md` installation instructions 39 | - moved paper files to root directory 40 | 41 | ### Added 42 | 43 | - `requirements.txt` (except `imgaug`, which is included in `setup.py`) 44 | - CONTRIBUTING, LICENSE, and CODE_OF_CONDUCT files 45 | - `tests/notebooks` 46 | - tests for `CoreColumn` saving and loading 47 | 48 | ### Removed 49 | 50 | - superfluous notebooks and scripts 51 | 52 | ## 0.1 53 | 54 | Initial private version. 55 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at ross.meyer@utexas.edu. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | 1. Fork it! 4 | 2. Create your feature branch 5 | 3. Pass all `pytest` tests and verify `tests/notebooks/` run without errors 6 | 4. Update `CHANGELOG.md` 7 | - Describe changes under `[Unreleased]` section 8 | 5. Submit a pull request 9 | -------------------------------------------------------------------------------- /JOSS_figure_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/JOSS_figure_workflow.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | CoreBreakout 2 | 3 | The MIT License (MIT) 4 | 5 | Copyright (c) 2019 Ross Meyer 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in 15 | all copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 | THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CoreBreakout 2 | 3 | [![status](https://joss.theoj.org/papers/add2021f95268fd4cd2850b105f3d570/status.svg)](https://joss.theoj.org/papers/add2021f95268fd4cd2850b105f3d570) 4 | 5 | Requirements, installation, and contribution guidelines can be found below. Our full usage and API documentation can be found at: [corebreakout.readthedocs.io](https://corebreakout.readthedocs.io/en/latest/) 6 | 7 | ### Overview 8 | 9 | `corebreakout` is a Python package built around [matterport/Mask\_RCNN](https://github.com/matterport/Mask_RCNN) for the segmentation and depth-alignment of geological core sample images. It provides utilities and an API to enable the workflow depicted in the figure below, as well as a `CoreColumn` data structure to manage and manipulate the resulting depth-registered image data: 10 | 11 | ![](JOSS_figure_workflow.png) 12 | 13 | We are currently using this package to enable research on [Lithology Prediction of Slabbed Core Photos Using Machine Learning Models](https://figshare.com/articles/Lithology_Prediction_of_Slabbed_Core_Photos_Using_Machine_Learning_Models/8023835/2), and are working on getting a DOI for the project through the [Journal of Open Source Software](https://joss.theoj.org/). 14 | 15 | ## Getting Started 16 | 17 | ### Target Platform 18 | 19 | This package was developed on Linux (Ubuntu, PopOS), and has also been tested on OS X. It may work on other platforms, but we make no guarantees. 20 | 21 | ### Requirements 22 | 23 | In addition to Python`>=3.6`, the packages listed in [requirements.txt](requirements.txt) are required. Notable exceptions to the list are: 24 | 25 | - `1.3<=tensorflow-gpu<=1.14` (or possibly just `tensorflow`) 26 | - `mrcnn` via [submodule: matterport/Mask\_RCNN](https://github.com/matterport/Mask_RCNN/tree/3deaec5d902d16e1daf56b62d5971d428dc920bc) 27 | 28 | The TensorFlow requirement is not explicitly listed in `requirements.txt` due to the ambiguity between `tensorflow` and `tensorflow-gpu` in versions `<=1.14`. The latter is almost certainly required for training new models, although it may be possible to perform inference with saved models on CPU, and use of the `CoreColumn` data structure does not require a GPU. 29 | 30 | Note that TensorFlow GPU capabilities are implemented with [CUDA](https://developer.nvidia.com/cuda-zone), which requires a [supported NVIDIA GPU](https://developer.nvidia.com/cuda-gpus). 31 | 32 | #### Additional (Optional) Requirements 33 | 34 | Optionally, `jupyter` is required to run demo and test notebooks, and `pytest` is required to run unit tests. Both of these should be manually installed if you plan to modify or contribute to the package source code. 35 | 36 | We also provide a script for extraction of top/base depths from core image text using `pytesseract`. After installing the [Tesseract OCR Engine](https://github.com/tesseract-ocr/tesseract) on your machine, you can install the `pytesseract` package with `conda` or `pip`. 37 | 38 | ### Download code 39 | 40 | ``` 41 | $ git clone --recurse-submodules https://github.com/rgmyr/corebreakout.git 42 | $ cd corebreakout 43 | ``` 44 | 45 | ### Download data (optional) 46 | 47 | To make use of the provided dataset and model, or to train new a model starting from the pretrained COCO weights, you will need to download the `assets.zip` folder from the [v0.2 Release](https://github.com/rgmyr/corebreakout/releases/tag/v0.2). 48 | 49 | Unzip and place this folder in the root directory of the repository (its contents will be ignored by `git` -- see the `.gitignore`). If you would like to place it elsewhere, you should modify the paths in [corebreakout/defaults.py](https://github.com/rgmyr/corebreakout/blob/master/corebreakout/defaults.py) to point to your preferred location. 50 | 51 | The current version of `assets/data` has JSON annotation files which include an `imageData` field representing the associated images as strings. For now you can delete this field and reduce the size of the data with `scripts/prune_imageData.py`: 52 | 53 | ``` 54 | $ python scripts/prune_imageData.py assets/ 55 | ``` 56 | 57 | ### Installation 58 | 59 | We recommend installing `corebreakout` and its dependencies in an isolated environment, and further recommend the use of `conda`. See [Conda: Managing environments](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). 60 | 61 | --- 62 | 63 | To create a new `conda` environment called `corebreakout-env` and activate it: 64 | 65 | ``` 66 | $ conda create -n corebreakout-env python=3.6 tensorflow-gpu=1.14 67 | $ conda activate corebreakout-env 68 | ``` 69 | 70 | **Note:** If you want to try a CPU-only installation, then replace `tensorflow-gpu` with `tensorflow`. You may also lower the version number if you are on a machine with `CUDA<10.0` (required for TensorFlow`>=1.13`). See [TensorFlow GPU requirements](https://www.tensorflow.org/install/gpu#software_requirements) for more compatibility details. 71 | 72 | --- 73 | 74 | Then install the rest of the required packages into the environment: 75 | 76 | ``` 77 | $ conda install --file requirements.txt 78 | ``` 79 | 80 | --- 81 | 82 | Finally, install `mrcnn` and `corebreakout` using `pip`. Develop mode installation (`-e`) is recommended (but not required) for `corebreakout`, since many users will want to change some of the default parameters to suit their own data without having to reinstall afterward: 83 | 84 | ``` 85 | $ pip install ./Mask_RCNN 86 | $ pip install -e . 87 | ``` 88 | 89 | ## Usage 90 | 91 | Please refer to our [readthedocs page](https://corebreakout.readthedocs.io/en/latest/) for full documentation! 92 | 93 | ## Development and Community Guidelines 94 | 95 | ### Submit an Issue 96 | 97 | - Navigate to the repository's [issue tab](https://github.com/rgmyr/corebreakout/issues) 98 | - Search for existing related issues 99 | - If necessary, create and submit a new issue 100 | 101 | ### Contributing 102 | 103 | - Please see [`CONTRIBUTING.md`](CONTRIBUTING.md) and the [Code of Conduct](CODE_OF_CONDUCT.md) for how to contribute to the project 104 | 105 | ### Testing 106 | 107 | - Most `corebreakout` functionality not requiring trained model weights can be verified with `pytest`: 108 | 109 | ``` 110 | $ cd 111 | $ pytest . 112 | ``` 113 | 114 | - Model usage via the `CoreSegmenter` class can be verified by running `tests/notebooks/test_inference.ipynb` (requires saved model weights) 115 | - Plotting of `CoreColumn`s can be verified by running `tests/notebooks/test_plotting.ipynb` 116 | -------------------------------------------------------------------------------- /corebreakout/__init__.py: -------------------------------------------------------------------------------- 1 | from .column import CoreColumn 2 | from .segmenter import CoreSegmenter 3 | -------------------------------------------------------------------------------- /corebreakout/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .polygondataset import PolygonDataset 2 | -------------------------------------------------------------------------------- /corebreakout/datasets/polygondataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dataset class for COCO format segmentation labels 3 | """ 4 | import os 5 | import json 6 | from pathlib import Path 7 | from itertools import product 8 | 9 | import skimage 10 | import numpy as np 11 | 12 | from mrcnn.utils import Dataset 13 | 14 | from corebreakout import defaults 15 | 16 | 17 | class PolygonDataset(Dataset): 18 | """Subclass of `mrcnn.utils.Dataset` for polygonal JSON annotations in `labelme` format. 19 | See `wkentaro/labelme` to get the GUI. Outputs a JSON file with list of polygon 'shapes'. 20 | 21 | Labels must start with a unique class name, but instances can be differentiated afterward 22 | however you like. For example, different `col` instances can be labeled 'col1', 'col2', etc. 23 | And multiple polygons may belong to a single instance of a class. 24 | 25 | The tradeoff is that no class name can a substring of any other class name. 26 | """ 27 | 28 | def __init__(self, classes=defaults.CLASSES): 29 | super().__init__() 30 | 31 | if not self.check_classes(classes): 32 | raise ValueError(f"{classes} are invalid.") 33 | 34 | for i, cls_name in zip(range(len(classes)), classes): 35 | # `source` doesn't matter for single dataset, just using 'cb' for 'corebreakout' 36 | self.add_class("cb", i + 1, cls_name) # Note: 'BG' = class 0 37 | 38 | def collect_annotated_images(self, data_dir, subset): 39 | """Check for annotation ('.json') and image ('.jpg'/'.jpeg') pairs, and add them. 40 | 41 | Corresponding annotation and image paths should differ only in their file extensions. 42 | """ 43 | data_dir = Path(data_dir) / subset 44 | assert data_dir.is_dir(), f"Directory {data_dir} must exist." 45 | 46 | annotations = sorted(data_dir.glob("*.json")) 47 | assert len( 48 | annotations 49 | ), "There must be at least one annotation file in `data_dir`" 50 | 51 | for ann_path in annotations: 52 | image_matches = list(data_dir.glob(ann_path.stem + "*.jp*g")) 53 | try: 54 | img_path = image_matches[0] 55 | self.add_image( 56 | "cb", image_id=ann_path.stem, path=img_path, ann_path=ann_path 57 | ) 58 | except IndexError: 59 | raise UserWarning(f"Matching .jpg/.jpeg not found for {ann_path}") 60 | 61 | def load_mask(self, image_id): 62 | """Return the `mask` and `class_ids` arrays for a given `image_id`.""" 63 | ann_path = self.image_info[image_id]["ann_path"] 64 | 65 | with open(ann_path, "r") as ann_file: 66 | ann_json = json.load(ann_file) 67 | 68 | return self.ann_to_mask(ann_json) 69 | 70 | def ann_to_mask(self, ann): 71 | """Take JSON annotation dict, return `(mask, class_ids)` arrays. 72 | 73 | Assumes that some classes may have multiple instances ('col1', 'col2', etc.), 74 | and that each labeled instance may be composed of multiple polygons. 75 | """ 76 | unique_labels = list(set([p["label"] for p in ann["shapes"]])) 77 | 78 | class_ids = np.array([self.label_to_class_id(l) for l in unique_labels]) 79 | 80 | h, w = ann["imageHeight"], ann["imageWidth"] 81 | masks = np.zeros((h, w, len(unique_labels)), dtype=np.bool) 82 | 83 | for polygon in ann["shapes"]: 84 | boundary = np.array(polygon["points"]) 85 | rr, cc = skimage.draw.polygon(boundary[:, 1], boundary[:, 0]) 86 | instance = unique_labels.index(polygon["label"]) 87 | masks[rr, cc, instance] = True 88 | 89 | return masks, class_ids 90 | 91 | def label_to_class_id(self, label): 92 | """ 93 | Return `class_id` corresponding to `label` given that `label` just needs to start with the class name. 94 | """ 95 | matches = [label.startswith(c["name"]) for c in self.class_info] 96 | 97 | assert any( 98 | matches 99 | ), f"Label {label} must match a class from classes: {self.class_info}" 100 | assert ( 101 | sum(matches) == 1 102 | ), f"Label {label} cant match multiple classes in {self.class_info}" 103 | 104 | return self.class_info[matches.index(True)]["id"] 105 | 106 | def image_reference(self, image_id): 107 | """Return the path of the image corresponding to `image_id`, if there is one.""" 108 | info = self.image_info[image_id] 109 | if info["source"] == "cb": 110 | return info["path"] 111 | else: 112 | super(self.__class__, self).image_reference(image_id) 113 | 114 | def __repr__(self): 115 | return ( 116 | f"\n PolygonDataset\n" 117 | f"Image count : {len(self.image_ids)}\n" 118 | f"Class count : {self.num_classes}\n" 119 | + "\n".join( 120 | [ 121 | "{:3}. {:50}".format(i, info["name"]) 122 | for i, info in enumerate(self.class_info) 123 | ] 124 | ) 125 | ) 126 | 127 | @staticmethod 128 | def check_classes(classes): 129 | """Make sure no class is a substring of any other class.""" 130 | for pair in product(classes, classes): 131 | if (pair[0] != pair[1]) and (pair[0] in pair[1]): 132 | return False 133 | return True 134 | -------------------------------------------------------------------------------- /corebreakout/defaults.py: -------------------------------------------------------------------------------- 1 | """ 2 | Default paths, dataset and Mask_RCNN model config, `CoreColumn` viz settings. 3 | """ 4 | import os 5 | from pathlib import Path 6 | 7 | from mrcnn.config import Config 8 | 9 | from corebreakout import __file__ as PKG_FILE 10 | 11 | 12 | ####+++++++++++++++#### 13 | #### Default Paths #### 14 | ####+++++++++++++++#### 15 | 16 | ASSETS_DIR = Path(PKG_FILE).parent.parent / "assets" 17 | 18 | # Where to find train/test subdirectories 19 | DATASET_DIR = ASSETS_DIR / "data" 20 | 21 | # Where to find saved model weights 22 | MODEL_DIR = ASSETS_DIR / "models" 23 | # Default "core" model weights 24 | CB_MODEL_PATH = MODEL_DIR / 'mask_rcnn_cb_default.h5' 25 | # Pretrained COCO model weights 26 | COCO_MODEL_PATH = MODEL_DIR / 'mask_rcnn_coco.h5' 27 | 28 | # Where to save Mask RCNN training checkpoints, etc. 29 | TRAIN_DIR = MODEL_DIR 30 | 31 | 32 | ####++++++++++++++++++++++++#### 33 | #### Default Dataset Params #### 34 | ####++++++++++++++++++++++++#### 35 | 36 | CLASSES = ["col", "tray"] 37 | 38 | # See `docs/layout_parameters.md` for more information 39 | LAYOUT_PARAMS = { 40 | "order": "t2b", # depth order by which to sort set of columns 41 | "orientation": "l2r", # depth orientation of each individual column 42 | "col_height": 1.0, # assumed height of each column, or tray, etc. 43 | "col_class": "col", # name of class for core sample columns 44 | "endpts": "tray", # name of class, 'auto', 'auto_all', or 2-tuple 45 | } 46 | 47 | ####++++++++++++++++++++++#### 48 | #### Default Model Config #### 49 | ####++++++++++++++++++++++#### 50 | 51 | 52 | class DefaultConfig(Config): 53 | """M-RCNN model configuration. 54 | 55 | Override some default Mask_RCNN `Config` values. 56 | 57 | For all available parameters and explanations, see: 58 | https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/config.py 59 | """ 60 | 61 | NAME = "cb_default" 62 | 63 | # Number of classes, including background 64 | NUM_CLASSES = 1 + len(CLASSES) 65 | 66 | BACKBONE = "resnet101" 67 | 68 | # Length of square anchor side in pixels 69 | RPN_ANCHOR_SCALES = (64, 128, 192, 256, 320) 70 | 71 | # < 1 : wide anchor, > 1 : tall anchor 72 | # These defaults assume horizontal (wide) columns 73 | # Note: starting from COCO model requires exactly 3 anchor ratios 74 | RPN_ANCHOR_RATIOS = [0.2, 0.5, 1] 75 | 76 | # Non-max suppresion threshold. Increasing generates more proposals. 77 | RPN_NMS_THRESHOLD = 0.9 # default = 0.7 78 | 79 | # STD_DEVs? Probably not, shouldn't make a big difference. 80 | 81 | # Maximum number of detections and minimum confidence 82 | DETECTION_MAX_INSTANCES = 6 83 | DETECTION_MIN_CONFIDENCE = 0.98 84 | 85 | # Set to default number of train/test images, respectively 86 | # (Can increase former to validate less often though) 87 | STEPS_PER_EPOCH = 25 88 | VALIDATION_STEPS = 5 89 | 90 | # Modify loss weights for more precise optimization 91 | # Few classes present, so we can lower `class` losses 92 | LOSS_WEIGHTS = { 93 | "rpn_class_loss": 0.5, 94 | "rpn_bbox_loss": 1.0, 95 | "mrcnn_class_loss": 0.5, 96 | "mrcnn_bbox_loss": 1.0, 97 | "mrcnn_mask_loss": 1.0, 98 | } 99 | 100 | # Conservative batch size + assuming single GPU. 101 | GPU_COUNT = 1 102 | IMAGES_PER_GPU = 1 103 | 104 | 105 | ####++++++++++++++++++++++++++++++++#### 106 | #### Default CoreColumn plot params #### 107 | ####++++++++++++++++++++++++++++++++#### 108 | """Set the default parameters for (fig, ax) returned by `CoreColumn.plot()`. 109 | 110 | DEPTH_TICK_ARGS -- passed to `viz.make_depth_ticks()` 111 | MAJOR_TICK_PARAMS -- passed to `ax.tick_params(which='major', ...)` 112 | MINOR_TICK_PARAMS -- passed to `ax.tick_params(which='minor', ...)` 113 | 114 | You can also add additional arguments to both `*_TICK_PARAMS` from: 115 | https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.tick_params.html 116 | """ 117 | DEPTH_TICK_ARGS = { 118 | "major_precision": 0.1, 119 | "major_format_str": "{:.1f}", 120 | "minor_precision": 0.01, 121 | "minor_format_str": "{:.2f}", 122 | } 123 | 124 | MAJOR_TICK_PARAMS = { 125 | "labelleft": True, # False to disable labels 126 | "labelsize": 32, 127 | "labelcolor": "black", 128 | "left": True, # False to disable ticks 129 | "length": 35, 130 | "width": 4, 131 | "color": "black", 132 | } 133 | 134 | MINOR_TICK_PARAMS = { 135 | "labelleft": True, # False to disable labels 136 | "labelsize": 12, 137 | "labelcolor": "black", 138 | "left": True, # False to disable ticks 139 | "length": 5, 140 | "width": 4, 141 | "color": "black", 142 | } 143 | -------------------------------------------------------------------------------- /corebreakout/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Assorted image / mask / label / region manipulation functions. 3 | """ 4 | import numpy as np 5 | 6 | 7 | def strict_update(d1, d2): 8 | """For two dicts `d1` and `d2`, works like `d1.update(d2)`, except without 9 | adding any new keys to `d1` (only values of existing keys updated). 10 | 11 | Dictonaries are copied, so that this does not have an 'inplace' effect. 12 | """ 13 | assert type(d1) == type(d2) == dict, 'Only for dictionaries.' 14 | d1, d2 = d1.copy(), d2.copy() 15 | 16 | for k in d1.keys(): 17 | try: 18 | d1[k] = d2[k] 19 | except KeyError: 20 | continue 21 | 22 | return d1 23 | 24 | 25 | def vstack_images(imgA, imgB, *args): 26 | """Vstack `imgA` and `imgB`, after RHS zero-padding the narrower if necessary. 27 | 28 | Will recursively add any additional images passed as *args. 29 | """ 30 | dimA, dimB = imgA.ndim, imgB.ndim 31 | assert dimA == dimB, f"Cannot vstack images of different dimensions: {(dimA, dimB)}" 32 | assert dimA in [2, 3], f"Images must be 2D or 3D, not {dimA}D" 33 | 34 | dw = imgA.shape[1] - imgB.shape[1] 35 | 36 | if dw == 0: 37 | return np.concatenate([imgA, imgB]) 38 | elif dimA == 2: 39 | pads = ((0, 0), (0, abs(dw))) 40 | else: 41 | pads = ((0, 0), (0, abs(dw)), (0, 0)) 42 | 43 | if dw < 0: 44 | paddedA = np.pad(imgA, pads, "constant") 45 | result = np.concatenate([paddedA, imgB]) 46 | else: 47 | paddedB = np.pad(imgB, pads, "constant") 48 | result = np.concatenate([imgA, paddedB]) 49 | 50 | if len(args): 51 | return vstack_images(result, *args) 52 | else: 53 | return result 54 | 55 | 56 | ###++++++++++++++++++++++++++++++### 57 | ### Region + Layout Manipulation ### 58 | ###++++++++++++++++++++++++++++++### 59 | 60 | # Could add 'b2t' and 'r2l', although those are very unusual 61 | ORIENTATIONS = ["t2b", "l2r"] 62 | 63 | 64 | def rotate_vertical(region_img, orientation): 65 | """Rotated (cropped) `region_img` array to vertical, given the depth `orientation`.""" 66 | assert ( 67 | orientation in ORIENTATIONS 68 | ), f"orientation {orientation} must be one of {ORIENTATIONS}" 69 | 70 | if orientation is "t2b": 71 | return region_img 72 | elif orientation is "l2r": 73 | return np.rot90(region_img, k=-1) 74 | else: 75 | raise ValueError(f"bad `orientation`: {orientation}") 76 | 77 | 78 | def sort_regions(regions, order): 79 | """Sort skimage `regions` (core columns), given the column `order`.""" 80 | assert order in ORIENTATIONS, f"order {order} must be one of {ORIENTATIONS}" 81 | 82 | idx = 0 if order is "t2b" else 1 83 | regions.sort(key=lambda x: x.bbox[idx]) 84 | 85 | return regions 86 | 87 | 88 | def maximum_extent(regions, crop_axis): 89 | """Find min/max of combined skimage `regions`, along `crop_axis`.""" 90 | low_idx = 0 if crop_axis == 1 else 1 91 | high_idx = 2 if crop_axis == 1 else 3 92 | 93 | low = min(r.bbox[low_idx] for r in regions) 94 | high = max(r.bbox[high_idx] for r in regions) 95 | 96 | return (low, high) 97 | 98 | 99 | def crop_region(img, labels, region, axis=0, endpts=(815, 6775)): 100 | """Adjust region bbox and return cropped region * mask. 101 | 102 | Parameters 103 | ---------- 104 | img : array 105 | The image to crop 106 | labels : array 107 | Mask of integer labels, same height and width as `img` 108 | region : skimage.RegionProperties instance 109 | Region object corresponding to column to crop around 110 | axis : int, optional 111 | Which axis to change `endpts` along, default=0 (y-coordinates) 112 | endpts : tuple(int) 113 | Least extreme endpoint coordinates allowed along `axis` 114 | 115 | Returns 116 | ------- 117 | region : array 118 | Masked image region, cropped in (adjusted) bounding box 119 | """ 120 | r0, c0, r1, c1 = region.bbox 121 | 122 | if axis is 0: 123 | c0, c1 = min(c0, endpts[0]), max(c1, endpts[1]) 124 | elif axis is 1: 125 | r0, r1 = min(r0, endpts[0]), max(r1, endpts[1]) 126 | 127 | region_img = img * np.expand_dims(labels == region.label, -1) 128 | 129 | return region_img[r0:r1, c0:c1, :] 130 | 131 | 132 | ###++++++++++++++++++++++++### 133 | ### Preds + masks + labels ### 134 | ###++++++++++++++++++++++++### 135 | 136 | 137 | def masks_to_labels(masks): 138 | """Convert boolean (H,W,N) `masks` array to integer (H,W) in range(0,N+1).""" 139 | labels = np.zeros(masks.shape[0:-1], dtype=np.int) 140 | 141 | for i in range(masks.shape[-1]): 142 | labels += (i + 1) * masks[:, :, i].astype(int) 143 | 144 | return labels 145 | 146 | 147 | def squeeze_labels(labels): 148 | """Set labels to range(0, objects+1)""" 149 | label_ids = np.unique([r.label for r in measure.regionprops(labels)]) 150 | 151 | for new_label, label_id in zip(range(1, label_ids.size), label_ids[1:]): 152 | labels[labels == label_id] == new_label 153 | 154 | return labels 155 | -------------------------------------------------------------------------------- /corebreakout/viz.py: -------------------------------------------------------------------------------- 1 | """ 2 | Assorted visualization functions. 3 | """ 4 | import numpy as np 5 | from mrcnn.visualize import display_instances 6 | 7 | 8 | ###++++++++++++++++++++++### 9 | ### Model + bbox + lines ### 10 | ###++++++++++++++++++++++### 11 | 12 | 13 | def show_preds(img, preds, class_names, colors=None, ax=None, figsize=(16, 16)): 14 | """Less verbose wrapper for `mrcnn.visualize.display_instances`. 15 | 16 | Parameters 17 | ---------- 18 | colors : list or array, optional 19 | Colors to use for each object in `preds`, default is random color for each. 20 | ax : matplotlib axis, optional 21 | An axis to plot onto. If None, will create one with size `figsize`. 22 | """ 23 | display_instances( 24 | img, 25 | preds["rois"], 26 | preds["masks"], 27 | preds["class_ids"], 28 | class_names, 29 | preds["scores"], 30 | colors=colors, 31 | ax=ax, 32 | figsize=figsize, 33 | ) 34 | 35 | 36 | def draw_box(image, bbox, color, lw): 37 | """Draw RGB(A) `color` bounding box on image array.""" 38 | y1, x1, y2, x2 = bbox 39 | image[y1 : y1 + lw, x1:x2] = color 40 | image[y2 : y2 + lw, x1:x2] = color 41 | image[y1:y2, x1 : x1 + lw] = color 42 | image[y1:y2, x2 : x2 + lw] = color 43 | return image 44 | 45 | 46 | def draw_lines(img, coords, axis, color=[255, 0, 0], lw=10): 47 | """Draw `color` lines on `img` at `coords` along `axis`. 48 | 49 | axis == 0 --> horizonal lines 50 | axis == 1 --> vertical lines 51 | line width (`lw`) will round down to even numbers. 52 | 53 | Raises 54 | ------ 55 | IndexError 56 | If any (coord +/- (lw // 2)) falls outside of `img` 57 | """ 58 | assert axis in [0, 1], "`axis` must be 0 (horizontal) or 1 (vertical)" 59 | 60 | hw = lw // 2 61 | if axis == 0: 62 | for row in coords: 63 | img[row - hw : row + hw + 1, :, :] = color 64 | else: 65 | for col in coords: 66 | img[:, col - hw : col + hw + 1, :] = color 67 | 68 | return img 69 | 70 | 71 | def draw_box(img, box, color, lw): 72 | """Draw 3-pixel width bounding boxes on the given image array. 73 | color: list of 3 int values for RGB. 74 | """ 75 | y1, x1, y2, x2 = box 76 | img[y1 : y1 + lw, x1:x2] = color 77 | img[y2 : y2 + lw, x1:x2] = color 78 | img[y1:y2, x1 : x1 + lw] = color 79 | img[y1:y2, x2 : x2 + lw] = color 80 | 81 | return img 82 | 83 | 84 | ###++++++++++++++++++++### 85 | ### Column depth ticks ### 86 | ###++++++++++++++++++++### 87 | 88 | # Return True if arr[i] is a local minimum, else False. 89 | local_min = lambda arr, i : np.argmin(arr[i - 1 : i + 2]) == 1 90 | 91 | def make_depth_ticks( 92 | depths, 93 | major_precision=0.1, 94 | major_format_str="{:.1f}", 95 | minor_precision=0.01, 96 | minor_format_str="{:.2f}" 97 | ): 98 | """Generate major & minor (ticks, locs) for depth array axis. 99 | 100 | Parameters 101 | ---------- 102 | depths : array 103 | An array of (ordered) depth values from which to generate ticks/locs. 104 | *_precision : float, optional 105 | Major, minor tick spacing (in depth units), defaults=0.1, 0.01. 106 | *_format_str : str, optional 107 | Format strings to coerce depths -> tick strings, defaults='{:.1f}', '{:.2f}'. 108 | 109 | Returns 110 | ------- 111 | major_ticks, major_locs, minor_ticks, minor_locs 112 | 113 | *_ticks : lists of tick label strings 114 | *_locs : lists of tick locations in array coordinates (fractional indices) 115 | """ 116 | # lambdas to convert values --> strs 117 | major_fmt_fn = lambda x: major_format_str.format(x) 118 | minor_fmt_fn = lambda x: minor_format_str.format(x) 119 | 120 | major_ticks, major_locs = [], [] 121 | minor_ticks, minor_locs = [], [] 122 | 123 | # remainders of depth w.r.t. precision 124 | # add `inf`s at start and end to get first/last ticks 125 | major_rmndr = np.insert(depths % major_precision, (0, depths.size), np.inf) 126 | minor_rmndr = np.insert(depths % minor_precision, (0, depths.size), np.inf) 127 | 128 | for i in np.arange(1, major_rmndr.size): 129 | 130 | if local_min(major_rmndr, i): 131 | major_ticks.append(major_fmt_fn(depths[i - 1])) 132 | major_locs.append(i) 133 | 134 | elif local_min(minor_rmndr, i): 135 | minor_ticks.append(minor_fmt_fn(depths[i - 1])) 136 | minor_locs.append(i) 137 | 138 | # add last tick if it's close to a whole number 139 | last_depth = np.round(depths[-1], decimals=1) 140 | if (last_depth % 1.0) == 0.0: 141 | major_ticks.append(major_fmt_fn(last_depth)) 142 | major_locs.append(depths.size - 1) 143 | 144 | return major_ticks, major_locs, minor_ticks, minor_locs 145 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/_build/doctrees/creating_datasets.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/creating_datasets.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/layout_parameters.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/layout_parameters.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/model_building.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/model_building.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/scripts_reference.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/scripts_reference.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/source/corebreakout.datasets.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/source/corebreakout.datasets.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/source/corebreakout.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/source/corebreakout.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/source/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/doctrees/source/modules.doctree -------------------------------------------------------------------------------- /docs/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 9c1d4c4fc1d3aa1951057fc57293feb2 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/_build/html/_images/JOSS_figure_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_images/JOSS_figure_workflow.png -------------------------------------------------------------------------------- /docs/_build/html/_images/endpts_auto.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_images/endpts_auto.png -------------------------------------------------------------------------------- /docs/_build/html/_images/endpts_auto_all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_images/endpts_auto_all.png -------------------------------------------------------------------------------- /docs/_build/html/_images/endpts_explicit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_images/endpts_explicit.png -------------------------------------------------------------------------------- /docs/_build/html/_images/endpts_tray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_images/endpts_tray.png -------------------------------------------------------------------------------- /docs/_build/html/_images/labelme1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_images/labelme1.png -------------------------------------------------------------------------------- /docs/_build/html/_sources/creating_datasets.rst.txt: -------------------------------------------------------------------------------- 1 | .. _creating-datasets: 2 | 3 | Creating ``Datasets`` 4 | ============================= 5 | 6 | Using ``labelme`` 7 | ----------------- 8 | 9 | The recommended way to add a new set of labeled training images is to annotate them using `wkentaro/labelme `__. The ``labelme`` GUI allows the user to draw any number of labeled polygons on an image, and saves the labels and coordinates in a JSON annotation file. 10 | 11 | You can easily install ``labelme`` with ``pip``, and then open our small test dataset: 12 | 13 | .. code:: 14 | 15 | $ pip install labelme 16 | $ labelme tests/data/two_image_dataset 17 | 18 | This should open a window that looks like this: 19 | 20 | |image0| 21 | 22 | New polygons can be drawn by clicking ``Create Polygons`` and then clicking points on the image to add vertices. When you get back around to the starting vertex to close the loop of points and create a polygon, ``labelme`` will ask you to assign it a text label. 23 | 24 | Unique text labels are aggregated on the right side, and the individual polygons shown in the image are colored and listed with their assigned labels. They happen to be the same in this example, but there could be more labels in the ``Label List`` than polygons in the ``Polygon Labels`` list for any given image in the directory. 25 | 26 | When you finish adding polygons, click the ``Save`` button to export the annotations as a ``.json`` file, which by default will save to the current directory. 27 | 28 | The ``File List`` shows images in the currently directory, with the checkbox indicating whether an annotation has been saved for that file yet. You can use the ``Next Image`` and ``Previous Image`` buttons to traverse through the images (and corresponding annotations) in the directory. 29 | 30 | 31 | Inspect Our Annotations 32 | ----------------------- 33 | 34 | For a more hands-on exploration of annotation structure and the ``Dataset`` API, see `notebooks/inspect_dataset.py `__ 35 | 36 | 37 | How to Name Things 38 | ------------------ 39 | 40 | As you can see above, our text labels for individual objects have the form ````. The ``class`` refers to a general object type, the ``number`` differentiates between individual `instances` of that class within an image. This isn't the only possible naming scheme, but it's the one you should use if you want your annotations to be compatible with our ``PolygonDataset`` implementation. 41 | 42 | In our data, we use two classes, **col** and **tray**, to label two different types of objects: 43 | 44 | - **col** is for `columns` of core material -- the individual objects that we want to segment, crop, and stack on top of each other. 45 | - **tray** is for empty trays, which for our data are the most reliable "measuring stick" to use for defining the "top" and "base" positions of columns within an image 46 | 47 | The first type of object is required -- the whole purpose of this workflow is to segment core columns, so you will need to at least label those. 48 | 49 | The second type of object is optional -- using another object (empty tray, measuring stick, etc.) to define top and base positions of columns is `one` option for the ``endpts`` setting in :ref:`layout-parameters`, but there are others which do not require the existence of any objects other than core columns. 50 | 51 | If you're making your own dataset, your class names can be whatever you want them to be. You just need at least some name for core columns, and to keep general consistency between: 52 | 53 | - The ``PolygonDataset`` (s) you train your model with, which use the annotations you saved. 54 | - The ``kwargs`` you provide when instantiating a ``CoreSegmenter``. Namely: 55 | 56 | - The ``classes`` argument (it should be the same as for ``PolygonDataset``) 57 | - The ``col_class`` (and possibly ``endpts``) fields of the ``layout_params`` 58 | 59 | If you decide to use different class names (or layout parameters), we'd recommend changing the ``CLASSES`` and ``LAYOUT_PARAMS`` in `defaults.py `__ so that you don't have to specify them as often. 60 | 61 | Removing ``imageData`` 62 | ---------------------- 63 | 64 | ``labelme`` saves a field called ``imageData`` which encodes the entire image as a string, consuming quite a bit of unnecessary memory. Our ``PolygonDataset`` class doesn't make use of this field, and we've provided a script to delete it from all JSON files below a root ``path``: 65 | 66 | .. code:: 67 | 68 | $ python scripts/prune_imageData.py 69 | 70 | 71 | Summary of Guidelines 72 | --------------------- 73 | 74 | To be able to use the built-in ``corebreakout.datasets.PolygonDataset`` class with your training data, you want to adhere to these guidelines: 75 | 76 | - Save ``.json`` annotations in a flat directory with 77 | corresponding ``.jpeg`` files (this is ``labelme``\ ’s default 78 | behavior) 79 | - You may label any number of classes. You will have to supply a list 80 | of these classes to the ``PolygonDataset`` and ``CoreSegmenter`` constructors, or modify 81 | ``defaults.CLASSES``. 82 | 83 | - At a minimum, you will want some class name to represent columns of core. We call ours ``'col'``, but it doesn't matter what you want to call it if you're using your own data. 84 | - You may also create a class (*e.g.*, ``'tray'``) for any objects that consistently demarcate the top and base positions of core columns better than the columns themselves. 85 | - Different instances of the same class should begin with the class 86 | name and be differentiated afterward (*e.g.*, ``col1, col2, col3``) 87 | 88 | - The corollary is that no class name can be a substring of any 89 | other class name (*e.g.*, ``col, col_tray`` would not be allowed) 90 | - Multiple polygons may belong to a single instance (for example, if there's a large gap in the middle of a column) 91 | 92 | - After annotating images, split into sibling ``'train'`` and 93 | ``'test'`` subdirectories 94 | 95 | **Note:** We've found that the point of diminishing returns happens somewhere in the range of 20-30 training images, which probably corresponds to 30-50 column instances for this dataset. YMMV. 96 | 97 | After compiling the annotations, you may wish to modify ``defaults.DATASET_DIR`` to avoid need to explicitly specify the data location. 98 | 99 | ``corebreakout.datasets.PolygonDataset`` 100 | ---------------------------------------- 101 | 102 | This is a subclass of ``mrcnn.utils.Dataset`` for instance segmentation 103 | annotations in the default JSON format of 104 | `wkentaro/labelme `__. 105 | 106 | Usage 107 | ~~~~~ 108 | 109 | :: 110 | 111 | from corebreakout.datasets import PolygonDataset 112 | 113 | data_dir = defaults.DEFAULT_DATA_DIR # parent of any separate annotation data directories 114 | subset = 'train' # which subdirectory to read from 115 | 116 | dataset = PolygonDataset(classes=defaults.DEFAULT_CLASSES) 117 | 118 | # Collect all of the requied ID + path information 119 | dataset.collect_annotated_images(data_dir, subset) 120 | 121 | # Set all of the attrs required for use 122 | dataset.prepare() 123 | 124 | print(dataset) 125 | 126 | Two ``Dataset`` objects (train, test) are required in calls to ``model.train(...)``, which is why we split them into separate directories. 127 | 128 | Subclassing ``mrcnn.utils.Dataset`` 129 | ----------------------------------- 130 | 131 | If you want to use a different annotation format, you can inherit from 132 | the base ``mrcnn.utils.Dataset`` class. 133 | 134 | You will need to write some user-called method to collect file 135 | information: 136 | 137 | - *e.g.*, ``collect_annotated_images(data_dir, subset)``: Register ``image_id``, ``path``, and ``ann_path`` for each (image, annotation) file pair in ``/`` directory. 138 | 139 | And then override at least these two methods: 140 | 141 | - ``load_mask(image_id)``: Given an ``image_id``, load (and compute, if necessary) the corresponding mask. For an with ``N`` objects (not including the background), the return value from this function should be ``(mask, class_ids)``, where ``mask`` is boolean array of shape ``(H,W,N)`` and ``class_ids`` is a 1D integer array of size ``N`` with one ``class_id`` for each channel in ``mask``. 142 | - ``image_reference(image_id)``: Return the path of an image, a link to it, or some other unique property to help in looking it up or debugging it. 143 | 144 | .. |image0| image:: images/labelme1.png 145 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. CoreBreakout documentation master file, created by 2 | sphinx-quickstart on Fri Apr 17 06:21:33 2020. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to CoreBreakout's documentation! 7 | ======================================== 8 | 9 | Project Repository: `rgmyr/corebreakout `_ 10 | 11 | ``mrcnn`` Repository: `matterport/Mask_RCNN `_ 12 | 13 | ``corebreakout`` provides two main functionalities: **(1)** a deep learning workflow for transforming raw images of geological core sample boxes into depth-registered datasets, which is facilitated by the ``CoreSegmenter`` API, and **(2)** a ``CoreColumn`` data structure for storing and manipulating depth-registered image data. 14 | 15 | |workflowfigure| 16 | 17 | This documentation covers usage of ``corebreakout``. To dig into the finer details of the Mask R-CNN model implementation, please refer to ``mrcnn``'s documentation. 18 | 19 | Provided Data 20 | ============= 21 | 22 | We provide labeled images from the British Geological Survey's North Sea collection, as well as a saved Mask R-CNN model trained on this data, and a pretrained COCO model from which to start new training runs. 23 | 24 | To use the data or models, download the ``assets.zip`` folder from the `Releases Page `_. Unzip it in the root directory of the project, or modify the paths in `defaults.py `_ to point to the location. 25 | 26 | JSON annotations in this data currently contain a superfluous field called ``imageData`` which takes up most of the memory for these files. You can delete this field and reduce the file sizes with ``scripts/prune_imageData.py``: 27 | 28 | .. code:: 29 | 30 | $ python scripts/prune_imageData.py assets/ 31 | 32 | If you want to use your own data, then label some images for Mask R-CNN training, following the guidelines in :ref:`creating-datasets`. We recommend starting with 20-30 images. 33 | 34 | Overview 35 | ======== 36 | 37 | Image Processing Workflow 38 | ------------------------- 39 | 40 | (1) If you're looking to use your own data, you will probably need to label some of your images for best results. Follow the guidelines in :ref:`creating-datasets`. 41 | 42 | (2) Train a Mask R-CNN model using your labeled data. Model configuration, training, and selection are explained in :ref:`model-building`. 43 | 44 | (3) Use the trained model to process directories of unlabeled images and save the results as a ``CoreColumn``. We provide `scripts/process_directory.py `__ to facilitate this step. It requires saved model weights, and a csv file listing the top and bottom depths for each image in the directory. To make creating these csv's easier, we provide `scripts/get_ocr_depths.py `_. 45 | 46 | The ``CoreSegmenter`` Class 47 | --------------------------- 48 | 49 | `scripts/process_directory.py `__ uses the ``corebreakout.CoreSegmenter`` API to handle converting images to ``CoreColumns``, and you may also use this class directly: 50 | 51 | .. code:: python 52 | 53 | segmenter = corebreakout.CoreSegmenter( 54 | model_dir, 55 | weights_path, 56 | model_config = corebreakout.defaults.DefaultConfig, 57 | class_names = corebreakout.defaults.CLASSES, 58 | layout_params = corebreakout.defaults.LAYOUT_PARAMS 59 | ) 60 | 61 | # `img` can be an array or a path to an image 62 | column = segmenter.segment(img, [top_depth, base_depth], **kwargs) 63 | 64 | # for iterables of images (or paths) and depth range pairs 65 | column = segmenter.segment_all(imgs, depth_ranges, **kwargs) 66 | 67 | ``class_names`` should correspond to those in the dataset on which the model was trained, and ``layout_params`` are explained in detail in the :ref:`layout-parameters` documentation. 68 | 69 | The ``CoreColumn`` Class 70 | ------------------------ 71 | 72 | This object is a container for depth-registered image data. Columns can be added, sliced, plotted, iterated over in chunks, saved, and loaded (in either single-file ``.pkl`` format or multi-file ``.npy`` format). 73 | 74 | For a demonstration of the ``CoreColumn`` API, see: `notebooks/column_demo.py `__ 75 | 76 | .. toctree:: 77 | :maxdepth: 2 78 | :caption: Usage Documentation: 79 | 80 | creating_datasets 81 | model_building 82 | layout_parameters 83 | scripts_reference 84 | 85 | 86 | Indices and tables 87 | ================== 88 | 89 | * :ref:`genindex` 90 | * :ref:`modindex` 91 | * :ref:`search` 92 | 93 | .. |workflowfigure| image:: images/JOSS_figure_workflow.png 94 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/layout_parameters.rst.txt: -------------------------------------------------------------------------------- 1 | .. _layout-parameters: 2 | 3 | Layout Parameters 4 | ================= 5 | 6 | ``corebreakout`` allows for processing core images with different 7 | layouts, and provides several different methods for finding and 8 | cropping the bounding boxes of individual columns within an image. 9 | 10 | Layout parameters are used within the ``CoreSegmenter.segment()`` 11 | method. 12 | 13 | Any and all default parameters can be overridden and updated in the 14 | ``CoreSegmenter`` constructor, or in any single call to the 15 | ``segment()`` method. In either case, pass any new parameters in a dict 16 | as the ``layout_params`` keyword argument. 17 | 18 | The default parameters reflect the characteristics of the BGS dataset on 19 | which the example model is trained: 20 | 21 | .. code:: python 22 | 23 | # corebreakout/defaults.py 24 | 25 | LAYOUT_PARAMS = { 26 | 'order' : 't2b', 27 | 'orientation' : 'l2r', 28 | 'col_height' : 1.0, 29 | 'col_class' : 'col', 30 | 'endpts' : 'tray' 31 | } 32 | 33 | **DEVELOPMENT NOTE:** The only allowed values for ``order`` and 34 | ``orientation`` are ``'t2b'`` and ``'l2r'``. This covers all 35 | conventional core image layouts that we are aware of, but we would 36 | consider adding ``'b2t'`` and ``'r2l'`` if provided with use-cases. If 37 | you have one, please open an issue (or submit a pull request :-). 38 | 39 | ``order`` and ``orientation`` 40 | ----------------------------- 41 | 42 | The ``'order'`` parameter specifies the depth order by which to sort the 43 | set of columns detected in an image: 44 | 45 | - ``'t2b'`` implies that columns are laid out horizontally, with the uppermost column coming first in order of depth. 46 | - ``'l2r'`` implies that columns are laid out vertically, with the leftmost column coming first in order of depth. 47 | 48 | The ``'orientation'`` parameter specifies the depth orientation of 49 | columns. This should be the converse of ``'order'``: 50 | 51 | - ``'t2b'`` implies that the top of a (vertical) column is toward the top of the image. 52 | - ``'l2r'`` implies that the top of a (horizontal) column is toward the left side of the image. 53 | 54 | Since it is required that ``order`` be one of these options and 55 | ``orientation`` be the other, requiring both *is* redundant. However, 56 | making both of them explicit improves code readability, and will make it 57 | easier to add other options should we choose to do so in future 58 | releases. 59 | 60 | ``col_height`` 61 | -------------- 62 | 63 | The ``'col_height'`` parameter specifies the height in depth units 64 | (usually meters or feet) of individual (and complete) columns. 65 | 66 | This value is used in conjunction with the ``depth_range`` positional 67 | argument to find the number of expected columns in an image when calling 68 | ``CoreSegmenter.segment(img, depth_range, **kwargs)``. 69 | 70 | ``col_class`` 71 | ------------- 72 | 73 | The name of the class representing core sample columns in the M-RCNN 74 | model used by ``CoreSegmenter`` instance. 75 | 76 | ``endpts`` 77 | ---------- 78 | 79 | The ``'endpts'`` parameter determines the method for making sure that 80 | before cropping, the ``top`` and ``base`` of partial columns are 81 | extended to locations that are approximately ``'col_height'`` apart. 82 | Different options may work better or worse depending on how clean the 83 | samples are and how consistent the layout is. 84 | 85 | Predicted masks tend to be subsets of the ‘true’ masks, so **short 86 | columns are extended**, but **longer columns are NOT shortened**. You 87 | can see this in the example images below, where the computed minimal 88 | endpoint locations are shown as solid yellow lines, and the resulting 89 | column bounding boxes are shown as green dashed lines. 90 | 91 | Allowed values of ``'endpts'`` include: 92 | 93 | - The name of a class (*e.g.*, ``'tray'``) 94 | - Results in columns being extended to the ``top`` and ``base`` of the strongest detection of this class 95 | - Must be found in the ``class_names`` attribute of the ``CoreSegmenter`` instance 96 | - Typical choices would be empty trays, or the measuring sticks commonly placed next to boxes of core 97 | 98 | |image0| 99 | 100 | - One of the keywords ``'auto'`` or ``'auto_all'`` 101 | 102 | - Results in columns being extended to the min/max coordinates of a 103 | set of detected objects 104 | - ``'auto'`` will use only objects of ``'col_class'`` as the 105 | relevant set (*e.g.*, all ``'col'`` detections – first example 106 | below) 107 | - ``'auto_all'`` will use all objects in the image (*e.g.*, all 108 | ``'col'`` **and** ``'tray'`` detections – second example below) 109 | 110 | |image1| 111 | 112 | |image2| 113 | 114 | - A 2-tuple of explicit integer endpoint coordinates (*e.g.*, 115 | ``(100, 6900)``) 116 | 117 | - Results in columns being extended to at least these min/max 118 | coordinates 119 | 120 | |image3| 121 | 122 | .. |image0| image:: images/endpts_tray.png 123 | .. |image1| image:: images/endpts_auto.png 124 | .. |image2| image:: images/endpts_auto_all.png 125 | .. |image3| image:: images/endpts_explicit.png 126 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/model_building.rst.txt: -------------------------------------------------------------------------------- 1 | .. _model-building: 2 | 3 | Building Mask R-CNN Models 4 | ========================== 5 | 6 | For best results, most users will want to train models on some of 7 | their own data. See :ref:`creating-datasets` for guidelines. 8 | 9 | We may release a more general pretrained model in the future, pending demand and the availability of open source datasets. Feel free to contact us if you would like to contribute your data for that purpose. 10 | 11 | The rough outline of model construction and training looks like this: 12 | 13 | .. code:: python 14 | 15 | import mrcnn.model as modellib 16 | from corebreakout import defaults, datasets 17 | 18 | model_config = defaults.DefaultConfig() 19 | 20 | train_dataset = datasets.PolygonDataset(...) 21 | test_dataset = datasets.PolygonDataset(...) 22 | 23 | model = modellib.MaskRCNN(...) 24 | 25 | model.train(train_dataset, test_dataset, ...) 26 | 27 | For the finer details see `scripts/train_mrcnn_model.py `__ 28 | 29 | ``mrcnn`` Model Configuration 30 | ----------------------------- 31 | 32 | Models are created with a subclass of ``mrcnn.config.Config``. See 33 | ``corebreakout.defaults.DefaultConfig`` for our latest model 34 | configuration. 35 | 36 | To see all available configuration parameters, see 37 | `mrcnn/config.py `__ 38 | 39 | The obvious parameters that a user might want to change include: 40 | 41 | - ``NAME`` 42 | - ``RPN_ANCHOR_RATIOS`` : defaults are set up for horizontal columns. Something like ``[1.0, 3.0, 7.0]`` would make more sense for vertical columns. 43 | - ``STEPS_PER_EPOCH``, ``VALIDATION_STEPS`` : batches per training epoch and validation step. Does not necessarily need to match dataset sizes. 44 | - ``IMAGES_PER_GPU`` : you can try to increase if you have a large GPU. 45 | - ``GPU_COUNT`` : you can increase if you multiple GPUs. 46 | 47 | You can either modify ``DefaultConfig`` directly, or instantiate your own ``Config`` subclass. 48 | 49 | Model Training 50 | -------------- 51 | 52 | The simplest way to train a model is by running 53 | `scripts/train_mrcnn_model.py `__. 54 | This script loads pretrained ``COCO`` weights, and executes a three step 55 | training + tuning run. 56 | 57 | It also serves as a demonstration of ``Dataset`` collection, and 58 | instantiating and training a ``mrcnn.modellib.MaskRCNN``. 59 | 60 | While training, ``mrcnn`` logs ``tensorboard`` files to the specified 61 | ``model_dir``. You can view the files by running: 62 | 63 | :: 64 | 65 | $ tensorboard --logdir 66 | 67 | Model Selection 68 | --------------- 69 | 70 | We recommend viewing the ``tensorboard`` files (and particularly the 71 | ``val_loss`` scalar) to select candidate models. 72 | 73 | `notebooks/select_model.ipynb `__ 74 | provides a template for viewing the output of candidate models on the 75 | test dataset. 76 | 77 | **Note**: ``mrcnn`` saves Checkpoints each epoch starting at ``0001``, 78 | while ``tensorboard`` logs epochs starting from ``0``. So, if epoch 79 | ``X`` looks good on ``tensorboard``, you will want to reference epoch 80 | ``X+1`` in your list of candidates to load the corresponding weights. 81 | 82 | Using a Model 83 | ------------- 84 | 85 | Once you have trained and selected a new model, you may want to change 86 | the ``*PATH`` variables in ``corebreakout/defaults.py`` to point to the location of the 87 | new model weights (these paths are what get referenced by default in 88 | ``scripts/process_directory.py``, etc.). 89 | 90 | Alternatively, you can always pass whatever ``model_dir`` and ``weights_path`` (and 91 | ``model_config`` instance) you like when constructing a ``CoreSegmenter``. 92 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/scripts_reference.rst.txt: -------------------------------------------------------------------------------- 1 | Scripts Reference 2 | ================= 3 | 4 | ``train_mrcnn_model.py`` 5 | ------------------------ 6 | Train a new ``mrcnn`` model starting from pretrained COCO weights 7 | 8 | optional arguments: 9 | -h, --help show this help message and exit 10 | --steps STEPS 1, 2, or 3. How many of the steps to train: (heads, 11 | 4+, entire model) 12 | --model_dir MODEL_DIR 13 | Directory in which to create new training 14 | subdirectory for checkpoints and tensorboard logs. 15 | --data_dir DATA_DIR Directory in which to find ``train`` and ``test`` 16 | subdirectories containing labeled images. 17 | 18 | Extract OCR top and base depths from images with `pytesseract` 19 | 20 | ``get_ocr_depths.py`` 21 | --------------------- 22 | Extract OCR top and base depths from images with ``pytesseract`` 23 | 24 | optional arguments: 25 | -h, --help show this help message and exit 26 | --root_dir ROOT_DIR A common parent directory of all target ```` directories. 27 | --subdir SUBDIR A string contained in the name of all target subdirectories. 28 | --save_name SAVE_NAME 29 | Name of depths csv file(s) to be saved in matching subdirs. 30 | --force Flag to force overwrite of any existing ``.csv`` files. 31 | --inspect Flag to inspect images and print OCR output whenever there is an issue. 32 | 33 | As an example, you can test the script by running: 34 | 35 | .. code:: none 36 | 37 | $ cd scripts 38 | $ python get_ocr_depths.py --root_dir ../tests/data --subdir two_image_dataset --save_name auto_depths_test 39 | 40 | This should save a new file at ``tests/data/two_image_dataset/auto_depths_test.csv``, with contents like: 41 | 42 | .. code:: 43 | 44 | ,top,bottom 45 | S00101409.jpeg,2348.0,2350.0 46 | S00111582.jpeg,7716.0,2220.0 47 | 48 | Note that ``7716.0`` is a misread, and should have been ``2218.0``. At least with our BGS images, some manual corrections are usually required, but this provides a template for the ``--depth_csv`` file required to run ``process_directory.py``. 49 | 50 | 51 | ``process_directory.py`` 52 | ------------------------ 53 | Process directory of raw images with Mask R-CNN and save results as a ``CoreColumn``. 54 | 55 | The ``path`` given should contain images as jpeg files, and a ``depth_csv`` file in the format: 56 | 57 | .. code:: 58 | 59 | , top, bottom 60 | , , 61 | ... 62 | , , 63 | 64 | **NOTE**: model ``Config``, ``class_names``, and segmentation ``layout_params`` can only be 65 | changed manually at the top of script, and default to those configured in `defaults.py `_ 66 | 67 | positional arguments: 68 | path Path to directory of images (and depth information csv) to process. 69 | 70 | optional arguments: 71 | -h, --help show help message and exit 72 | --model_dir MODEL_DIR 73 | Directory to load ``mrcnn`` model from. 74 | Default=``defaults.MODEL_DIR`` 75 | --weights_path WEIGHTS_PATH 76 | Path to model weights to load. 77 | Default=``defaults.CB_MODEL_PATH`` 78 | --add_tol ADD_TOL Gap tolerance when adding ``CoreColumn`` objects, 79 | default=5.0. 80 | --add_mode ADD_MODE ``CoreColumn.add_mode``. One of {'fill', 'collapse'}. 81 | --depth_csv DEPTH_CSV 82 | Name of filename + (top, bottom) csv to read from 83 | ``path``, default=``'auto_depths.csv'`` 84 | --save_dir SAVE_DIR Path to save ``CoreColumn`` to, default=None will save to 85 | ``path`` 86 | --save_name SAVE_NAME 87 | Name to use for ``CoreColumn.save``, default=None 88 | results in ``CoreColumn__`` 89 | --save_mode SAVE_MODE 90 | One of {'pickle', 'numpy'}. Whether to save as single 91 | ``pkl`` file or multiple ``npy`` files 92 | 93 | Assuming you've downloaded and unzipped the `assets` folder in the default location, you can test the script with default parameters by running: 94 | 95 | .. code:: 96 | 97 | $ cd scripts 98 | $ python process_directory.py ../tests/data/two_image_dataset --depth_csv dummy_depths.csv 99 | 100 | This should save the aggregated ``CoreColumn`` to ``tests/data/two_image_dataset/CoreColumn_1.00_5.00.pkl``. 101 | 102 | ``prune_imageData.py`` 103 | ---------------------- 104 | Remove the ``imageData`` field from all JSON files in tree below ``path``: 105 | 106 | positional arguments: 107 | path Path to parent of all target JSON files. 108 | 109 | optional arguments: 110 | -h, --help show this help message and exit 111 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/source/corebreakout.datasets.rst.txt: -------------------------------------------------------------------------------- 1 | corebreakout.datasets package 2 | ============================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | corebreakout.datasets.polygondataset module 8 | ------------------------------------------- 9 | 10 | .. automodule:: corebreakout.datasets.polygondataset 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: corebreakout.datasets 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/source/corebreakout.rst.txt: -------------------------------------------------------------------------------- 1 | corebreakout package 2 | ==================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | corebreakout.datasets 10 | 11 | Submodules 12 | ---------- 13 | 14 | corebreakout.column module 15 | -------------------------- 16 | 17 | .. automodule:: corebreakout.column 18 | :members: 19 | :undoc-members: 20 | :show-inheritance: 21 | 22 | corebreakout.defaults module 23 | ---------------------------- 24 | 25 | .. automodule:: corebreakout.defaults 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | corebreakout.segmenter module 31 | ----------------------------- 32 | 33 | .. automodule:: corebreakout.segmenter 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | 38 | corebreakout.utils module 39 | ------------------------- 40 | 41 | .. automodule:: corebreakout.utils 42 | :members: 43 | :undoc-members: 44 | :show-inheritance: 45 | 46 | corebreakout.viz module 47 | ----------------------- 48 | 49 | .. automodule:: corebreakout.viz 50 | :members: 51 | :undoc-members: 52 | :show-inheritance: 53 | 54 | 55 | Module contents 56 | --------------- 57 | 58 | .. automodule:: corebreakout 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/source/modules.rst.txt: -------------------------------------------------------------------------------- 1 | corebreakout 2 | ============ 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | corebreakout 8 | -------------------------------------------------------------------------------- /docs/_build/html/_static/classic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * classic.css_t 3 | * ~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- classic theme. 6 | * 7 | * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | html { 17 | /* CSS hack for macOS's scrollbar (see #1125) */ 18 | background-color: #FFFFFF; 19 | } 20 | 21 | body { 22 | font-family: sans-serif; 23 | font-size: 100%; 24 | background-color: #11303d; 25 | color: #000; 26 | margin: 0; 27 | padding: 0; 28 | } 29 | 30 | div.document { 31 | background-color: #1c4e63; 32 | } 33 | 34 | div.documentwrapper { 35 | float: left; 36 | width: 100%; 37 | } 38 | 39 | div.bodywrapper { 40 | margin: 0 0 0 230px; 41 | } 42 | 43 | div.body { 44 | background-color: #ffffff; 45 | color: #000000; 46 | padding: 0 20px 30px 20px; 47 | } 48 | 49 | div.footer { 50 | color: #ffffff; 51 | width: 100%; 52 | padding: 9px 0 9px 0; 53 | text-align: center; 54 | font-size: 75%; 55 | } 56 | 57 | div.footer a { 58 | color: #ffffff; 59 | text-decoration: underline; 60 | } 61 | 62 | div.related { 63 | background-color: #133f52; 64 | line-height: 30px; 65 | color: #ffffff; 66 | } 67 | 68 | div.related a { 69 | color: #ffffff; 70 | } 71 | 72 | div.sphinxsidebar { 73 | } 74 | 75 | div.sphinxsidebar h3 { 76 | font-family: 'Trebuchet MS', sans-serif; 77 | color: #ffffff; 78 | font-size: 1.4em; 79 | font-weight: normal; 80 | margin: 0; 81 | padding: 0; 82 | } 83 | 84 | div.sphinxsidebar h3 a { 85 | color: #ffffff; 86 | } 87 | 88 | div.sphinxsidebar h4 { 89 | font-family: 'Trebuchet MS', sans-serif; 90 | color: #ffffff; 91 | font-size: 1.3em; 92 | font-weight: normal; 93 | margin: 5px 0 0 0; 94 | padding: 0; 95 | } 96 | 97 | div.sphinxsidebar p { 98 | color: #ffffff; 99 | } 100 | 101 | div.sphinxsidebar p.topless { 102 | margin: 5px 10px 10px 10px; 103 | } 104 | 105 | div.sphinxsidebar ul { 106 | margin: 10px; 107 | padding: 0; 108 | color: #ffffff; 109 | } 110 | 111 | div.sphinxsidebar a { 112 | color: #98dbcc; 113 | } 114 | 115 | div.sphinxsidebar input { 116 | border: 1px solid #98dbcc; 117 | font-family: sans-serif; 118 | font-size: 1em; 119 | } 120 | 121 | 122 | 123 | /* -- hyperlink styles ------------------------------------------------------ */ 124 | 125 | a { 126 | color: #355f7c; 127 | text-decoration: none; 128 | } 129 | 130 | a:visited { 131 | color: #355f7c; 132 | text-decoration: none; 133 | } 134 | 135 | a:hover { 136 | text-decoration: underline; 137 | } 138 | 139 | 140 | 141 | /* -- body styles ----------------------------------------------------------- */ 142 | 143 | div.body h1, 144 | div.body h2, 145 | div.body h3, 146 | div.body h4, 147 | div.body h5, 148 | div.body h6 { 149 | font-family: 'Trebuchet MS', sans-serif; 150 | background-color: #f2f2f2; 151 | font-weight: normal; 152 | color: #20435c; 153 | border-bottom: 1px solid #ccc; 154 | margin: 20px -20px 10px -20px; 155 | padding: 3px 0 3px 10px; 156 | } 157 | 158 | div.body h1 { margin-top: 0; font-size: 200%; } 159 | div.body h2 { font-size: 160%; } 160 | div.body h3 { font-size: 140%; } 161 | div.body h4 { font-size: 120%; } 162 | div.body h5 { font-size: 110%; } 163 | div.body h6 { font-size: 100%; } 164 | 165 | a.headerlink { 166 | color: #c60f0f; 167 | font-size: 0.8em; 168 | padding: 0 4px 0 4px; 169 | text-decoration: none; 170 | } 171 | 172 | a.headerlink:hover { 173 | background-color: #c60f0f; 174 | color: white; 175 | } 176 | 177 | div.body p, div.body dd, div.body li, div.body blockquote { 178 | text-align: justify; 179 | line-height: 130%; 180 | } 181 | 182 | div.admonition p.admonition-title + p { 183 | display: inline; 184 | } 185 | 186 | div.admonition p { 187 | margin-bottom: 5px; 188 | } 189 | 190 | div.admonition pre { 191 | margin-bottom: 5px; 192 | } 193 | 194 | div.admonition ul, div.admonition ol { 195 | margin-bottom: 5px; 196 | } 197 | 198 | div.note { 199 | background-color: #eee; 200 | border: 1px solid #ccc; 201 | } 202 | 203 | div.seealso { 204 | background-color: #ffc; 205 | border: 1px solid #ff6; 206 | } 207 | 208 | div.topic { 209 | background-color: #eee; 210 | } 211 | 212 | div.warning { 213 | background-color: #ffe4e4; 214 | border: 1px solid #f66; 215 | } 216 | 217 | p.admonition-title { 218 | display: inline; 219 | } 220 | 221 | p.admonition-title:after { 222 | content: ":"; 223 | } 224 | 225 | pre { 226 | padding: 5px; 227 | background-color: #eeffcc; 228 | color: #333333; 229 | line-height: 120%; 230 | border: 1px solid #ac9; 231 | border-left: none; 232 | border-right: none; 233 | } 234 | 235 | code { 236 | background-color: #ecf0f3; 237 | padding: 0 1px 0 1px; 238 | font-size: 0.95em; 239 | } 240 | 241 | th, dl.field-list > dt { 242 | background-color: #ede; 243 | } 244 | 245 | .warning code { 246 | background: #efc2c2; 247 | } 248 | 249 | .note code { 250 | background: #d6d6d6; 251 | } 252 | 253 | .viewcode-back { 254 | font-family: sans-serif; 255 | } 256 | 257 | div.viewcode-block:target { 258 | background-color: #f4debf; 259 | border-top: 1px solid #ac9; 260 | border-bottom: 1px solid #ac9; 261 | } 262 | 263 | div.code-block-caption { 264 | color: #efefef; 265 | background-color: #1c4e63; 266 | } -------------------------------------------------------------------------------- /docs/_build/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} 2 | -------------------------------------------------------------------------------- /docs/_build/html/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s === 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node, addItems) { 70 | if (node.nodeType === 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && 74 | !jQuery(node.parentNode).hasClass(className) && 75 | !jQuery(node.parentNode).hasClass("nohighlight")) { 76 | var span; 77 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 78 | if (isInSVG) { 79 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 80 | } else { 81 | span = document.createElement("span"); 82 | span.className = className; 83 | } 84 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 85 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 86 | document.createTextNode(val.substr(pos + text.length)), 87 | node.nextSibling)); 88 | node.nodeValue = val.substr(0, pos); 89 | if (isInSVG) { 90 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 91 | var bbox = node.parentElement.getBBox(); 92 | rect.x.baseVal.value = bbox.x; 93 | rect.y.baseVal.value = bbox.y; 94 | rect.width.baseVal.value = bbox.width; 95 | rect.height.baseVal.value = bbox.height; 96 | rect.setAttribute('class', className); 97 | addItems.push({ 98 | "parent": node.parentNode, 99 | "target": rect}); 100 | } 101 | } 102 | } 103 | else if (!jQuery(node).is("button, select, textarea")) { 104 | jQuery.each(node.childNodes, function() { 105 | highlight(this, addItems); 106 | }); 107 | } 108 | } 109 | var addItems = []; 110 | var result = this.each(function() { 111 | highlight(this, addItems); 112 | }); 113 | for (var i = 0; i < addItems.length; ++i) { 114 | jQuery(addItems[i].parent).before(addItems[i].target); 115 | } 116 | return result; 117 | }; 118 | 119 | /* 120 | * backward compatibility for jQuery.browser 121 | * This will be supported until firefox bug is fixed. 122 | */ 123 | if (!jQuery.browser) { 124 | jQuery.uaMatch = function(ua) { 125 | ua = ua.toLowerCase(); 126 | 127 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 128 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 129 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 130 | /(msie) ([\w.]+)/.exec(ua) || 131 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 132 | []; 133 | 134 | return { 135 | browser: match[ 1 ] || "", 136 | version: match[ 2 ] || "0" 137 | }; 138 | }; 139 | jQuery.browser = {}; 140 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 141 | } 142 | 143 | /** 144 | * Small JavaScript module for the documentation. 145 | */ 146 | var Documentation = { 147 | 148 | init : function() { 149 | this.fixFirefoxAnchorBug(); 150 | this.highlightSearchWords(); 151 | this.initIndexTable(); 152 | if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { 153 | this.initOnKeyListeners(); 154 | } 155 | }, 156 | 157 | /** 158 | * i18n support 159 | */ 160 | TRANSLATIONS : {}, 161 | PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, 162 | LOCALE : 'unknown', 163 | 164 | // gettext and ngettext don't access this so that the functions 165 | // can safely bound to a different name (_ = Documentation.gettext) 166 | gettext : function(string) { 167 | var translated = Documentation.TRANSLATIONS[string]; 168 | if (typeof translated === 'undefined') 169 | return string; 170 | return (typeof translated === 'string') ? translated : translated[0]; 171 | }, 172 | 173 | ngettext : function(singular, plural, n) { 174 | var translated = Documentation.TRANSLATIONS[singular]; 175 | if (typeof translated === 'undefined') 176 | return (n == 1) ? singular : plural; 177 | return translated[Documentation.PLURALEXPR(n)]; 178 | }, 179 | 180 | addTranslations : function(catalog) { 181 | for (var key in catalog.messages) 182 | this.TRANSLATIONS[key] = catalog.messages[key]; 183 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 184 | this.LOCALE = catalog.locale; 185 | }, 186 | 187 | /** 188 | * add context elements like header anchor links 189 | */ 190 | addContextElements : function() { 191 | $('div[id] > :header:first').each(function() { 192 | $('\u00B6'). 193 | attr('href', '#' + this.id). 194 | attr('title', _('Permalink to this headline')). 195 | appendTo(this); 196 | }); 197 | $('dt[id]').each(function() { 198 | $('\u00B6'). 199 | attr('href', '#' + this.id). 200 | attr('title', _('Permalink to this definition')). 201 | appendTo(this); 202 | }); 203 | }, 204 | 205 | /** 206 | * workaround a firefox stupidity 207 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 208 | */ 209 | fixFirefoxAnchorBug : function() { 210 | if (document.location.hash && $.browser.mozilla) 211 | window.setTimeout(function() { 212 | document.location.href += ''; 213 | }, 10); 214 | }, 215 | 216 | /** 217 | * highlight the search words provided in the url in the text 218 | */ 219 | highlightSearchWords : function() { 220 | var params = $.getQueryParameters(); 221 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 222 | if (terms.length) { 223 | var body = $('div.body'); 224 | if (!body.length) { 225 | body = $('body'); 226 | } 227 | window.setTimeout(function() { 228 | $.each(terms, function() { 229 | body.highlightText(this.toLowerCase(), 'highlighted'); 230 | }); 231 | }, 10); 232 | $('') 234 | .appendTo($('#searchbox')); 235 | } 236 | }, 237 | 238 | /** 239 | * init the domain index toggle buttons 240 | */ 241 | initIndexTable : function() { 242 | var togglers = $('img.toggler').click(function() { 243 | var src = $(this).attr('src'); 244 | var idnum = $(this).attr('id').substr(7); 245 | $('tr.cg-' + idnum).toggle(); 246 | if (src.substr(-9) === 'minus.png') 247 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 248 | else 249 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 250 | }).css('display', ''); 251 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 252 | togglers.click(); 253 | } 254 | }, 255 | 256 | /** 257 | * helper function to hide the search marks again 258 | */ 259 | hideSearchWords : function() { 260 | $('#searchbox .highlight-link').fadeOut(300); 261 | $('span.highlighted').removeClass('highlighted'); 262 | }, 263 | 264 | /** 265 | * make the url absolute 266 | */ 267 | makeURL : function(relativeURL) { 268 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 269 | }, 270 | 271 | /** 272 | * get the current relative url 273 | */ 274 | getCurrentURL : function() { 275 | var path = document.location.pathname; 276 | var parts = path.split(/\//); 277 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 278 | if (this === '..') 279 | parts.pop(); 280 | }); 281 | var url = parts.join('/'); 282 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 283 | }, 284 | 285 | initOnKeyListeners: function() { 286 | $(document).keydown(function(event) { 287 | var activeElementType = document.activeElement.tagName; 288 | // don't navigate when in search box or textarea 289 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' 290 | && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { 291 | switch (event.keyCode) { 292 | case 37: // left 293 | var prevHref = $('link[rel="prev"]').prop('href'); 294 | if (prevHref) { 295 | window.location.href = prevHref; 296 | return false; 297 | } 298 | case 39: // right 299 | var nextHref = $('link[rel="next"]').prop('href'); 300 | if (nextHref) { 301 | window.location.href = nextHref; 302 | return false; 303 | } 304 | } 305 | } 306 | }); 307 | } 308 | }; 309 | 310 | // quick alias for translations 311 | _ = Documentation.gettext; 312 | 313 | $(document).ready(function() { 314 | Documentation.init(); 315 | }); 316 | -------------------------------------------------------------------------------- /docs/_build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '0.3', 4 | LANGUAGE: 'None', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | HAS_SOURCE: true, 9 | SOURCELINK_SUFFIX: '.txt', 10 | NAVIGATION_WITH_KEYS: false 11 | }; -------------------------------------------------------------------------------- /docs/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/file.png -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Inconsolata-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Inconsolata-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Inconsolata.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Inconsolata.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato-Bold.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato-Regular.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_build/html/_static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgmyr/corebreakout/73152155d4a7e69e9fb3cf11785306227cbeaace/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_build/html/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | /* sphinx_rtd_theme version 0.4.3 | MIT license */ 2 | /* Built 20190212 16:02 */ 3 | require=function r(s,a,l){function c(e,n){if(!a[e]){if(!s[e]){var i="function"==typeof require&&require;if(!n&&i)return i(e,!0);if(u)return u(e,!0);var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}var o=a[e]={exports:{}};s[e][0].call(o.exports,function(n){return c(s[e][1][n]||n)},o,o.exports,r,s,a,l)}return a[e].exports}for(var u="function"==typeof require&&require,n=0;n"),i("table.docutils.footnote").wrap("
"),i("table.docutils.citation").wrap("
"),i(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var e=i(this);expand=i(''),expand.on("click",function(n){return t.toggleCurrent(e),n.stopPropagation(),!1}),e.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}0this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav,StickyNav:e.exports.ThemeNav}),function(){for(var r=0,n=["ms","moz","webkit","o"],e=0;e