├── .circleci
└── config.yml
├── .github
└── workflows
│ └── pythonpackage.yml
├── .gitignore
├── AUTHORS
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Colab_TrainNetwork_VideoAnalysis_TF2.ipynb
├── LICENSE
├── README.md
├── Reaching-Mackenzie-2018-08-30
├── config.yaml
├── labeled-data
│ └── reachingvideo1
│ │ ├── CollectedData_Mackenzie.csv
│ │ ├── CollectedData_Mackenzie.h5
│ │ ├── img005.png
│ │ ├── img020.png
│ │ ├── img023.png
│ │ ├── img028.png
│ │ ├── img031.png
│ │ ├── img033.png
│ │ ├── img036.png
│ │ ├── img037.png
│ │ ├── img038.png
│ │ ├── img040.png
│ │ ├── img042.png
│ │ ├── img043.png
│ │ ├── img046.png
│ │ ├── img048.png
│ │ ├── img052.png
│ │ ├── img060.png
│ │ ├── img068.png
│ │ ├── img071.png
│ │ ├── img075.png
│ │ ├── img077.png
│ │ ├── img080.png
│ │ ├── img087.png
│ │ ├── img090.png
│ │ ├── img100.png
│ │ ├── img103.png
│ │ ├── img108.png
│ │ ├── img116.png
│ │ ├── img118.png
│ │ ├── img119.png
│ │ ├── img126.png
│ │ ├── img141.png
│ │ ├── img142.png
│ │ ├── img145.png
│ │ ├── img151.png
│ │ ├── img152.png
│ │ ├── img157.png
│ │ ├── img167.png
│ │ ├── img168.png
│ │ ├── img177.png
│ │ ├── img179.png
│ │ ├── img180.png
│ │ ├── img194.png
│ │ ├── img201.png
│ │ ├── img211.png
│ │ ├── img213.png
│ │ ├── img214.png
│ │ ├── img225.png
│ │ ├── img227.png
│ │ ├── img228.png
│ │ ├── img230.png
│ │ ├── img231.png
│ │ ├── img234.png
│ │ ├── img237.png
│ │ ├── img240.png
│ │ └── img245.png
└── videos
│ ├── MovieS2_Perturbation_noLaser_compressed.avi
│ └── reachingvideo1.avi
├── conda-environments
├── DLCcore-CPU.yaml
└── DLCcore-GPU.yaml
├── deeplabcutcore
├── __init__.py
├── create_project
│ ├── __init__.py
│ ├── add.py
│ ├── demo_data.py
│ ├── modelzoo.py
│ ├── new.py
│ └── new_3d.py
├── generate_training_dataset
│ ├── __init__.py
│ ├── frame_extraction.py
│ └── trainingsetmanipulation.py
├── pose_cfg.yaml
├── pose_estimation_3d
│ ├── __init__.py
│ ├── camera_calibration.py
│ ├── plotting3D.py
│ └── triangulation.py
├── pose_estimation_tensorflow
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── config.py
│ ├── dataset
│ │ ├── __init__.py
│ │ ├── factory.py
│ │ ├── pose_dataset.py
│ │ ├── pose_dataset_deterministic.py
│ │ ├── pose_dataset_imgaug.py
│ │ ├── pose_dataset_tensorpack.py
│ │ └── pose_defaultdataset.py
│ ├── default_config.py
│ ├── evaluate.py
│ ├── export.py
│ ├── models
│ │ ├── __init__.py
│ │ └── pretrained
│ │ │ ├── __init__.py
│ │ │ ├── download.sh
│ │ │ └── pretrained_model_urls.yaml
│ ├── nnet
│ │ ├── __init__.py
│ │ ├── conv_blocks.py
│ │ ├── losses.py
│ │ ├── mobilenet.py
│ │ ├── mobilenet_v2.py
│ │ ├── net_factory.py
│ │ ├── pose_net.py
│ │ ├── pose_net_mobilenet.py
│ │ └── predict.py
│ ├── predict_videos.py
│ ├── test.py
│ ├── train.py
│ ├── training.py
│ └── util
│ │ ├── __init__.py
│ │ ├── logging.py
│ │ └── visualize.py
├── post_processing
│ ├── __init__.py
│ ├── analyze_skeleton.py
│ └── filtering.py
├── refine_training_dataset
│ ├── __init__.py
│ └── outlier_frames.py
├── utils
│ ├── __init__.py
│ ├── auxfun_models.py
│ ├── auxfun_multianimal.py
│ ├── auxfun_videos.py
│ ├── auxiliaryfunctions.py
│ ├── auxiliaryfunctions_3d.py
│ ├── conversioncode.py
│ ├── frameselectiontools.py
│ ├── make_labeled_video.py
│ ├── plotting.py
│ ├── video_processor.py
│ └── visualization.py
└── version.py
├── reinstall.sh
├── requirements.txt
├── setup.py
├── testscript.py
└── testscript_cli.py
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2.1
2 |
3 | orbs:
4 | python: circleci/python@0.2.1
5 |
6 | jobs:
7 | build-and-test:
8 | working_directory: ~/circleci-demo-python-django
9 | docker:
10 | - image: circleci/python:3.7.9 # primary container for the build job
11 | auth:
12 | username: mydockerhub-user
13 | password: $DOCKERHUB_PASSWORD # context / project UI env-var reference
14 | steps:
15 | - checkout
16 | - python/load-cache
17 | - python/install-deps
18 | - python/save-cache
19 | - run:
20 | command: python testscript_cli.py
21 | name: TestDLC
22 |
23 | workflows:
24 | main:
25 | jobs:
26 | - build-and-test
27 |
--------------------------------------------------------------------------------
/.github/workflows/pythonpackage.yml:
--------------------------------------------------------------------------------
1 | name: Python package
2 |
3 | on: [push]
4 |
5 | jobs:
6 | build:
7 |
8 | runs-on: ubuntu-latest
9 | strategy:
10 | # You can use PyPy versions in python-version. For example, pypy2 and pypy3
11 | matrix:
12 | python-version: [3.6, 3.7, 3.8] #currently tensorflow not supported for 3.9; should be dec 2020 (https://github.com/tensorflow/tensorflow/issues/44485); #dec 15, 2020 MWM
13 |
14 | steps:
15 | - uses: actions/checkout@v2
16 | - name: Set up Python ${{ matrix.python-version }}
17 | uses: actions/setup-python@v2
18 | with:
19 | python-version: ${{ matrix.python-version }}
20 | # You can test your matrix by printing the current Python version
21 | - name: Display Python version
22 | run: python -c "import sys; print(sys.version)"
23 | #test installation of DLC-core dependencies:
24 | - name: Install dependencies
25 | run: pip install -r requirements.txt
26 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.ckpt #TF
6 |
7 |
8 | # C extensions
9 | *.so
10 |
11 | # tests
12 | /tests
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | pip-wheel-metadata/
29 | share/python-wheels/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 | MANIFEST
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .nox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | *.py,cover
56 | .hypothesis/
57 | .pytest_cache/
58 |
59 | # Translations
60 | *.mo
61 | *.pot
62 |
63 | # Django stuff:
64 | *.log
65 | local_settings.py
66 | db.sqlite3
67 | db.sqlite3-journal
68 |
69 | # Flask stuff:
70 | instance/
71 | .webassets-cache
72 |
73 | # Scrapy stuff:
74 | .scrapy
75 |
76 | # Sphinx documentation
77 | docs/_build/
78 |
79 | # PyBuilder
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # IPython
86 | profile_default/
87 | ipython_config.py
88 |
89 | # pyenv
90 | .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
100 | __pypackages__/
101 |
102 | # Celery stuff
103 | celerybeat-schedule
104 | celerybeat.pid
105 |
106 | # SageMath parsed files
107 | *.sage.py
108 |
109 | # Environments
110 | .env
111 | .venv
112 | env/
113 | venv/
114 | ENV/
115 | env.bak/
116 | venv.bak/
117 |
118 | # Spyder project settings
119 | .spyderproject
120 | .spyproject
121 |
122 | # Rope project settings
123 | .ropeproject
124 |
125 | # mkdocs documentation
126 | /site
127 |
128 | # mypy
129 | .mypy_cache/
130 | .dmypy.json
131 | dmypy.json
132 |
133 | # Pyre type checker
134 | .pyre/
135 |
--------------------------------------------------------------------------------
/AUTHORS:
--------------------------------------------------------------------------------
1 | DeepLabCut (www.deeplabcut.org) was initially developed by
2 | Alexander & Mackenzie Mathis in collaboration with Matthias Bethge.
3 |
4 | DeepLabCut is an open-source tool and has benefited from suggestions and edits by many
5 | individuals including Tanmay Nath, Richard Warren, Ronny Eichler, Jonas Rauber, Hao Wu,
6 | Federico Claudi, Gary Kane, Taiga Abe, and Jonny Saunders as well as the latest author
7 | contributors page for the many additions to this open source project:
8 | https://github.com/AlexEMG/DeepLabCut/graphs/contributors
9 |
10 | ############################################################################################################
11 |
12 | DeepLabCut 1.0 Toolbox
13 | A Mathis, alexander.mathis@bethgelab.org | https://github.com/AlexEMG/DeepLabCut
14 | M Mathis, mackenzie@post.harvard.edu | https://github.com/MMathisLab
15 |
16 | Specific external contributors:
17 | E Insafutdinov and co-authors of DeeperCut (see README) for feature detectors: https://github.com/eldar
18 | - Thus, code in this subdirectory https://github.com/AlexEMG/DeepLabCut/tree/master/deeplabcut/pose_estimation_tensorflow
19 | was adapted from: https://github.com/eldar/pose-tensorflow
20 |
21 | Products:
22 | DeepLabCut: markerless pose estimation of user-defined body parts with deep learning. Nature Neuroscience, 2018.
23 | https://doi.org/10.1038/s41593-018-0209-y
24 | A. Mathis, P. Mamidanna, K.M. Cury, T. Abe, V.N. Murthy, M.W. Mathis* & M. Bethge*
25 |
26 | Contributions:
27 | Conceptualization: A.M., M.W.M. and M.B.
28 | Software: A.M. and M.W.M.
29 | Formal analysis: A.M.
30 | Experiments: A.M. and V.N.M. (trail-tracking), M.W.M. (mouse reaching), K.M.C. (Drosophila).
31 | Image Labeling: P.M., K.M.C., T.A., M.W.M., A.M.
32 | Writing: A.M. and M.W.M. with input from all authors.
33 | These authors jointly directed this work: M. Mathis, M. Bethge
34 |
35 | ############################################################################################################
36 |
37 | DeepLabCut 2.0 Toolbox
38 | A Mathis, alexander.mathis@bethgelab.org | https://github.com/AlexEMG/DeepLabCut
39 | T Nath, nath@rowland.harvard.edu | https://github.com/meet10may
40 | M Mathis, mackenzie@post.harvard.edu | https://github.com/MMathisLab
41 |
42 | Products:
43 | Using DeepLabCut for 3D markerless pose estimation across species and behaviors. Nature Protocols, 2019.
44 | https://www.nature.com/articles/s41596-019-0176-0
45 | T. Nath*, A. Mathis*, AC. Chen, A. Patel, M. Bethge, M. Mathis
46 |
47 | Contributions:
48 | Conceptualization: AM, TN, MWM.
49 | Software: AM, TN and MWM.
50 | Dataset (cheetah): AP.
51 | Image Labeling: ACC.
52 | Formal analysis: ACC, AM and AP analyzed the cheetah data.
53 | Writing: MWM, AM and TN with inputs from all authors.
54 |
55 | ############################################################################################################
56 |
57 | DeepLabCut 2.1 additions
58 | A Mathis, alexander.mathis@bethgelab.org | https://github.com/AlexEMG/DeepLabCut
59 | T Nath, nath@rowland.harvard.edu | https://github.com/meet10may
60 | M Mathis, mackenzie@post.harvard.edu | https://github.com/MMathisLab
61 |
62 | Preprint:
63 | Pretraining boosts out-of-domain robustness for pose estimation
64 | A. Mathis, M. Yüksekgönül, B. Rogers, M. Bethge, M. Mathis
65 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at alexander.mathis@bethgelab.org. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute to DeepLabCut
2 |
3 | DeepLabCut is an actively developed package and we welcome community development and involvement. We are especially seeking people from underrepresented backgrounds in OSS to contribute their expertise and experience. Please get in touch if you want to discuss specific contributions you are interested in developing, and we can help shape a road-map.
4 |
5 | We are happy to receive code extensions, bug fixes, documentation updates, etc.
6 |
7 | If you want to contribute to the code, please make a [pull request](https://github.com/AlexEMG/DeepLabCut/pull/new/) that includes both a summary of and changes to:
8 | - how you modified the code and what new functionality it has
9 | - the **OS it has been tested on**
10 | - the **output of the [testscript.py](/examples/testscript.py)** and if you are editing the **3D code the [testscript_3d.py](/examples/testscript_3d.py)**.
11 | - DOCSTRING update for your change
12 | - a working example of how it works for users.
13 | - if it's a function that also can be used in downstream steps (i.e. could be plotted) we ask you (1) highlight this, and (2) idealy you provide that functionality as well. If you have any questions, please reach out: admin@deeplabcut.org
14 |
15 | DeepLabCut is an open-source tool and has benefited from suggestions and edits by many individuals:
16 | - the [authors](https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS)
17 | - [contributors](https://github.com/AlexEMG/DeepLabCut/graphs/contributors)
18 |
19 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
167 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DeepLabCut-core
2 | ## JUNE 2021: THIS CODE IS NOW DEPRECIATED! DeepLabCut now supports 2.2 for standard, multi-animal, and DeepLabCut-Live! See main repo for details!
3 |
4 |
5 | **UPDATE JAN 2021: We will be using this space as the tensorflow 2 test-bed, but then all DeepLabCut will be within the main package. The headless version will be `pip install deeplabcut`, while the full GUI supported version is `pip install deeplabcut[gui]`. This means deeplabcutcore will be depreciated once TF2 is merged into the main repo**
6 |
7 | **Currently up to date with [DeepLabCut v2.1.8.1](https://github.com/AlexEMG/DeepLabCut/tree/v2.1.8.1). AND uses tensorflow 2.X**
8 |
9 | Core functionalities of DeepLabCut, excluding all GUI functions.
10 |
11 | Please be aware, you can create projects, etc. with the full deeplabcut package. Here, you will need to create the training set and train, evaluate, etc. not inter-mixing with using the deeplabcut package (it currently supports tensorflow 1.x). We recommend looking at this [google colab notebook](/Colab_TrainNetwork_VideoAnalysis_TF2.ipynb) to help you, and [this blog post](http://www.mousemotorlab.org/deeplabcutblog/2020/11/23/rolling-up-to-tensorflow-2) about our transitition to tensorflow 2.
12 |
13 | 
14 | 
15 | [](https://github.com/DeepLabCut/deeplabcutcore/raw/master/LICENSE)
16 | [](https://pypi.org/project/deeplabcutcore)
17 | [](https://pypistats.org/packages/deeplabcutcore)
18 |
19 |
20 | Install from GitHub:
21 | ``` pip install git+https://github.com/DeepLabCut/DeepLabCut-core ```
22 |
23 | PyPi:
24 | ```pip install deeplabcutcore```
25 |
26 | Documentation is located at [DeepLabCut's main GitHub page](https://github.com/AlexEMG/DeepLabCut/blob/master/docs/UseOverviewGuide.md).
27 |
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/config.yaml:
--------------------------------------------------------------------------------
1 | # Project definitions (do not edit)
2 | Task: Reaching
3 | scorer: Mackenzie
4 | date: Aug30
5 |
6 | # Project path (change when moving around)
7 | project_path: WILL BE AUTOMATICALLY UPDATED BY DEMO CODE
8 |
9 | # Annotation data set configuration (and individual video cropping parameters)
10 | video_sets:
11 | WILL BE AUTOMATICALLY UPDATED BY DEMO CODE:
12 | crop: 0, 832, 0, 747
13 | bodyparts:
14 | - Hand
15 | - Finger1
16 | - Tongue
17 | - Joystick1
18 | - Joystick2
19 | start: 0
20 | stop: 1
21 | numframes2pick: 40
22 |
23 | # Plotting configuration
24 | skeleton: [['Hand', 'Finger1'], ['Joystick1', 'Joystick2']]
25 | skeleton_color: blue
26 | pcutoff: 0.4
27 | dotsize: 12
28 | alphavalue: 0.7
29 | colormap: jet
30 |
31 |
32 | # Training,Evaluation and Analysis configuration
33 | TrainingFraction:
34 | - 0.95
35 | iteration: 0
36 | default_net_type: resnet_50
37 | snapshotindex: -1
38 | batch_size: 4
39 |
40 | # Cropping Parameters (for analysis and outlier frame detection)
41 | cropping: false
42 | #if cropping is true for analysis, then set the values here:
43 | x1: 0
44 | x2: 640
45 | y1: 277
46 | y2: 624
47 |
48 | # Refinement configuration (parameters from annotation dataset configuration also relevant in this stage)
49 | corner2move2:
50 | - 50
51 | - 50
52 | move2corner: true
53 |
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/CollectedData_Mackenzie.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/CollectedData_Mackenzie.h5
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img005.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img020.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img023.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img028.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img031.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img033.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img033.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img036.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img036.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img037.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img037.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img038.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img038.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img040.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img040.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img042.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img042.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img043.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img043.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img046.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img046.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img048.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img048.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img052.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img052.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img060.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img060.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img068.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img068.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img071.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img071.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img075.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img075.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img077.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img077.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img080.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img080.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img087.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img087.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img090.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img090.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img100.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img100.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img103.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img103.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img108.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img108.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img116.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img116.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img118.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img118.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img119.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img119.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img126.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img126.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img141.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img141.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img142.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img142.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img145.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img145.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img151.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img151.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img152.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img152.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img157.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img157.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img167.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img167.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img168.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img168.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img177.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img177.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img179.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img179.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img180.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img180.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img194.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img194.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img201.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img201.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img211.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img211.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img213.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img213.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img214.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img214.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img225.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img225.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img227.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img227.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img228.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img228.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img230.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img230.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img231.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img231.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img234.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img234.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img237.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img237.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img240.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img240.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img245.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/labeled-data/reachingvideo1/img245.png
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/videos/MovieS2_Perturbation_noLaser_compressed.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/videos/MovieS2_Perturbation_noLaser_compressed.avi
--------------------------------------------------------------------------------
/Reaching-Mackenzie-2018-08-30/videos/reachingvideo1.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/Reaching-Mackenzie-2018-08-30/videos/reachingvideo1.avi
--------------------------------------------------------------------------------
/conda-environments/DLCcore-CPU.yaml:
--------------------------------------------------------------------------------
1 | # DLC-CPU.yaml
2 |
3 | #DeepLabCut2.0 Toolbox (deeplabcut.org)
4 | #© A. & M. Mathis Labs
5 | #https://github.com/AlexEMG/DeepLabCut
6 | #Please see AUTHORS for contributors.
7 |
8 | #https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
9 | #Licensed under GNU Lesser General Public License v3.0
10 |
11 | # install: conda env create -f DLCcore-CPU.yaml
12 | # update: conda env update -f DLCcore-CPU.yaml
13 |
14 | name: DLCcore-CPU
15 | dependencies:
16 | - python=3.7
17 | - pip
18 | - jupyter
19 | - nb_conda
20 | - tensorflow
21 | - wxpython<4.1.0
22 | - Shapely
23 | - pip:
24 | - deeplabcutcore
25 | - tf_slim
26 |
--------------------------------------------------------------------------------
/conda-environments/DLCcore-GPU.yaml:
--------------------------------------------------------------------------------
1 | # DLC-GPU.yaml
2 |
3 | #DeepLabCut2.0 Toolbox (deeplabcut.org)
4 | #© A. & M. Mathis Labs
5 | #https://github.com/AlexEMG/DeepLabCut
6 | #Please see AUTHORS for contributors.
7 |
8 | #https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
9 | #Licensed under GNU Lesser General Public License v3.0
10 | #
11 |
12 | # install: conda env create -f DLCcore-GPU.yaml
13 | # update: conda env update -f DLCcore-GPU.yaml
14 |
15 | name: DLCcore-GPU
16 | dependencies:
17 | - python=3.7
18 | - pip
19 | - tensorflow-gpu
20 | - cudnn=7
21 | - wxpython<4.1.0
22 | - jupyter
23 | - nb_conda
24 | - Shapely
25 | - pip:
26 | - deeplabcutcore
27 | - tf_slim
28 |
--------------------------------------------------------------------------------
/deeplabcutcore/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcutcore2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/deeplabcut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os
12 | import platform
13 |
14 | # Supress tensorflow warning messages
15 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
16 | DEBUG = True and "DEBUG" in os.environ and os.environ["DEBUG"]
17 | from deeplabcutcore import DEBUG
18 |
19 |
20 | from deeplabcutcore.create_project import (
21 | create_new_project,
22 | create_new_project_3d,
23 | add_new_videos,
24 | load_demo_data,
25 | )
26 | from deeplabcutcore.create_project import (
27 | create_pretrained_project,
28 | create_pretrained_human_project,
29 | )
30 | from deeplabcutcore.generate_training_dataset import (
31 | extract_frames,
32 | select_cropping_area,
33 | )
34 | from deeplabcutcore.generate_training_dataset import (
35 | check_labels,
36 | create_training_dataset,
37 | mergeandsplit,
38 | create_training_model_comparison,
39 | )
40 | from deeplabcutcore.utils import (
41 | create_labeled_video,
42 | plot_trajectories,
43 | auxiliaryfunctions,
44 | convertcsv2h5,
45 | convertannotationdata_fromwindows2unixstyle,
46 | analyze_videos_converth5_to_csv,
47 | auxfun_videos,
48 | )
49 | from deeplabcutcore.utils.auxfun_videos import ShortenVideo, DownSampleVideo, CropVideo
50 |
51 | # Train, evaluate & predict functions / require TF
52 | from deeplabcutcore.pose_estimation_tensorflow import (
53 | train_network,
54 | return_train_network_path,
55 | )
56 | from deeplabcutcore.pose_estimation_tensorflow import (
57 | evaluate_network,
58 | return_evaluate_network_data,
59 | )
60 | from deeplabcutcore.pose_estimation_tensorflow import (
61 | analyze_videos,
62 | analyze_time_lapse_frames,
63 | )
64 | from deeplabcutcore.pose_estimation_tensorflow import export_model
65 |
66 | from deeplabcutcore.pose_estimation_3d import (
67 | calibrate_cameras,
68 | check_undistortion,
69 | triangulate,
70 | create_labeled_video_3d,
71 | )
72 |
73 | from deeplabcutcore.refine_training_dataset import (
74 | extract_outlier_frames,
75 | merge_datasets,
76 | )
77 | from deeplabcutcore.post_processing import filterpredictions, analyzeskeleton
78 |
79 | from deeplabcutcore.version import __version__, VERSION
80 |
--------------------------------------------------------------------------------
/deeplabcutcore/create_project/__init__.py:
--------------------------------------------------------------------------------
1 | from deeplabcutcore.create_project.new import create_new_project
2 | from deeplabcutcore.create_project.new_3d import create_new_project_3d
3 | from deeplabcutcore.create_project.add import add_new_videos
4 | from deeplabcutcore.create_project.demo_data import load_demo_data
5 | from deeplabcutcore.create_project.modelzoo import (
6 | create_pretrained_human_project,
7 | create_pretrained_project,
8 | )
9 |
--------------------------------------------------------------------------------
/deeplabcutcore/create_project/add.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 |
12 | def add_new_videos(config, videos, copy_videos=False, coords=None):
13 | """
14 | Add new videos to the config file at any stage of the project.
15 |
16 | Parameters
17 | ----------
18 | config : string
19 | String containing the full path of the config file in the project.
20 |
21 | videos : list
22 | A list of string containing the full paths of the videos to include in the project.
23 |
24 | copy_videos : bool, optional
25 | If this is set to True, the symlink of the videos are copied to the project/videos directory. The default is
26 | ``False``; if provided it must be either ``True`` or ``False``.
27 | coords: list, optional
28 | A list containing the list of cropping coordinates of the video. The default is set to None.
29 | Examples
30 | --------
31 | Video will be added, with cropping dimenions according to the frame dimensinos of mouse5.avi
32 | >>> deeplabcutcore.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'])
33 |
34 | Video will be added, with cropping dimenions [0,100,0,200]
35 | >>> deeplabcutcore.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi'],copy_videos=False,coords=[[0,100,0,200]])
36 |
37 | Two videos will be added, with cropping dimenions [0,100,0,200] and [0,100,0,250], respectively.
38 | >>> deeplabcutcore.add_new_videos('/home/project/reaching-task-Tanmay-2018-08-23/config.yaml',['/data/videos/mouse5.avi','/data/videos/mouse6.avi'],copy_videos=False,coords=[[0,100,0,200],[0,100,0,250]])
39 |
40 | """
41 | import os
42 | import shutil
43 | from pathlib import Path
44 |
45 | from deeplabcutcore import DEBUG
46 | from deeplabcutcore.utils import auxiliaryfunctions
47 | import cv2
48 |
49 | # Read the config file
50 | cfg = auxiliaryfunctions.read_config(config)
51 |
52 | video_path = Path(config).parents[0] / "videos"
53 | data_path = Path(config).parents[0] / "labeled-data"
54 | videos = [Path(vp) for vp in videos]
55 |
56 | dirs = [data_path / Path(i.stem) for i in videos]
57 |
58 | for p in dirs:
59 | """
60 | Creates directory under data & perhaps copies videos (to /video)
61 | """
62 | p.mkdir(parents=True, exist_ok=True)
63 |
64 | destinations = [video_path.joinpath(vp.name) for vp in videos]
65 | if copy_videos == True:
66 | for src, dst in zip(videos, destinations):
67 | if dst.exists():
68 | pass
69 | else:
70 | print("Copying the videos")
71 | shutil.copy(os.fspath(src), os.fspath(dst))
72 | else:
73 | for src, dst in zip(videos, destinations):
74 | if dst.exists():
75 | pass
76 | else:
77 | print("Creating the symbolic link of the video")
78 | src = str(src)
79 | dst = str(dst)
80 | os.symlink(src, dst)
81 |
82 | if copy_videos == True:
83 | videos = (
84 | destinations
85 | ) # in this case the *new* location should be added to the config file
86 | # adds the video list to the config.yaml file
87 | for idx, video in enumerate(videos):
88 | try:
89 | # For windows os.path.realpath does not work and does not link to the real video.
90 | video_path = str(Path.resolve(Path(video)))
91 | # video_path = os.path.realpath(video)
92 | except:
93 | video_path = os.readlink(video)
94 |
95 | vcap = cv2.VideoCapture(video_path)
96 | if vcap.isOpened():
97 | # get vcap property
98 | width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
99 | height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
100 | if coords == None:
101 | cfg["video_sets"].update(
102 | {video_path: {"crop": ", ".join(map(str, [0, width, 0, height]))}}
103 | )
104 | else:
105 | c = coords[idx]
106 | cfg["video_sets"].update({video_path: {"crop": ", ".join(map(str, c))}})
107 | else:
108 | print("Cannot open the video file!")
109 |
110 | auxiliaryfunctions.write_config(config, cfg)
111 | print(
112 | "New video was added to the project! Use the function 'extract_frames' to select frames for labeling."
113 | )
114 |
--------------------------------------------------------------------------------
/deeplabcutcore/create_project/demo_data.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os
12 | from pathlib import Path
13 | import deeplabcutcore
14 | from deeplabcutcore.utils import auxiliaryfunctions
15 |
16 |
17 | def load_demo_data(config, createtrainingset=True):
18 | """
19 | Loads the demo data. Make sure that you are in the same directory where you have downloaded or cloned the deeplabcutcore.
20 |
21 | Parameter
22 | ----------
23 | config : string
24 | Full path of the config.yaml file of the provided demo dataset as a string.
25 |
26 | Example
27 | --------
28 | >>> deeplabcutcore.load_demo_data('config.yaml')
29 | --------
30 | """
31 | config = Path(config).resolve()
32 | config = str(config)
33 |
34 | transform_data(config)
35 | if createtrainingset:
36 | print("Loaded, now creating training data...")
37 | deeplabcutcore.create_training_dataset(config, num_shuffles=1)
38 |
39 |
40 | def transform_data(config):
41 | """
42 | This function adds the full path to labeling dataset.
43 | It also adds the correct path to the video file in the config file.
44 | """
45 | import pandas as pd
46 |
47 | cfg = auxiliaryfunctions.read_config(config)
48 | project_path = str(Path(config).parents[0])
49 |
50 | cfg["project_path"] = project_path
51 | if "Reaching" in project_path:
52 | video_file = os.path.join(project_path, "videos", "reachingvideo1.avi")
53 | elif "openfield" in project_path:
54 | video_file = os.path.join(project_path, "videos", "m4s1.mp4")
55 | else:
56 | print("This is not an offical demo dataset.")
57 |
58 | if "WILL BE AUTOMATICALLY UPDATED BY DEMO CODE" in cfg["video_sets"].keys():
59 | cfg["video_sets"][str(video_file)] = cfg["video_sets"].pop(
60 | "WILL BE AUTOMATICALLY UPDATED BY DEMO CODE"
61 | )
62 |
63 | auxiliaryfunctions.write_config(config, cfg)
64 |
--------------------------------------------------------------------------------
/deeplabcutcore/create_project/new.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os
12 | from pathlib import Path
13 | import cv2
14 | from deeplabcutcore import DEBUG
15 | import shutil
16 |
17 |
18 | def create_new_project(
19 | project,
20 | experimenter,
21 | videos,
22 | working_directory=None,
23 | copy_videos=False,
24 | videotype=".avi",
25 | ):
26 | """Creates a new project directory, sub-directories and a basic configuration file. The configuration file is loaded with the default values. Change its parameters to your projects need.
27 |
28 | Parameters
29 | ----------
30 | project : string
31 | String containing the name of the project.
32 |
33 | experimenter : string
34 | String containing the name of the experimenter.
35 |
36 | videos : list
37 | A list of string containing the full paths of the videos to include in the project.
38 | Attention: Can also be a directory, then all videos of videotype will be imported.
39 |
40 | working_directory : string, optional
41 | The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.
42 |
43 | copy_videos : bool, optional
44 | If this is set to True, the videos are copied to the ``videos`` directory. If it is False,symlink of the videos are copied to the project/videos directory. The default is ``False``; if provided it must be either
45 | ``True`` or ``False``.
46 |
47 | Example
48 | --------
49 | Linux/MacOs
50 | >>> deeplabcutcore.create_new_project('reaching-task','Linus',['/data/videos/mouse1.avi','/data/videos/mouse2.avi','/data/videos/mouse3.avi'],'/analysis/project/')
51 | >>> deeplabcutcore.create_new_project('reaching-task','Linus',['/data/videos'],videotype='.mp4')
52 |
53 | Windows:
54 | >>> deeplabcutcore.create_new_project('reaching-task','Bill',[r'C:\yourusername\rig-95\Videos\reachingvideo1.avi'], copy_videos=True)
55 | Users must format paths with either: r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )
56 |
57 | """
58 | from datetime import datetime as dt
59 | from deeplabcutcore.utils import auxiliaryfunctions
60 |
61 | date = dt.today()
62 | month = date.strftime("%B")
63 | day = date.day
64 | d = str(month[0:3] + str(day))
65 | date = dt.today().strftime("%Y-%m-%d")
66 | if working_directory == None:
67 | working_directory = "."
68 | wd = Path(working_directory).resolve()
69 | project_name = "{pn}-{exp}-{date}".format(pn=project, exp=experimenter, date=date)
70 | project_path = wd / project_name
71 |
72 | # Create project and sub-directories
73 | if not DEBUG and project_path.exists():
74 | print('Project "{}" already exists!'.format(project_path))
75 | return
76 | video_path = project_path / "videos"
77 | data_path = project_path / "labeled-data"
78 | shuffles_path = project_path / "training-datasets"
79 | results_path = project_path / "dlc-models"
80 | for p in [video_path, data_path, shuffles_path, results_path]:
81 | p.mkdir(parents=True, exist_ok=DEBUG)
82 | print('Created "{}"'.format(p))
83 |
84 | # Add all videos in the folder. Multiple folders can be passed in a list, similar to the video files. Folders and video files can also be passed!
85 | vids = []
86 | for i in videos:
87 | # Check if it is a folder
88 | if os.path.isdir(i):
89 | vids_in_dir = [
90 | os.path.join(i, vp) for vp in os.listdir(i) if videotype in vp
91 | ]
92 | vids = vids + vids_in_dir
93 | if len(vids_in_dir) == 0:
94 | print("No videos found in", i)
95 | print(
96 | "Perhaps change the videotype, which is currently set to:",
97 | videotype,
98 | )
99 | else:
100 | videos = vids
101 | print(
102 | len(vids_in_dir),
103 | " videos from the directory",
104 | i,
105 | "were added to the project.",
106 | )
107 | else:
108 | if os.path.isfile(i):
109 | vids = vids + [i]
110 | videos = vids
111 |
112 | videos = [Path(vp) for vp in videos]
113 | dirs = [data_path / Path(i.stem) for i in videos]
114 | for p in dirs:
115 | """
116 | Creates directory under data
117 | """
118 | p.mkdir(parents=True, exist_ok=True)
119 |
120 | destinations = [video_path.joinpath(vp.name) for vp in videos]
121 | if copy_videos == True:
122 | print("Copying the videos")
123 | for src, dst in zip(videos, destinations):
124 | shutil.copy(
125 | os.fspath(src), os.fspath(dst)
126 | ) # https://www.python.org/dev/peps/pep-0519/
127 | else:
128 | # creates the symlinks of the video and puts it in the videos directory.
129 | print("Attempting to create a symbolic link of the video ...")
130 | for src, dst in zip(videos, destinations):
131 | if dst.exists() and not DEBUG:
132 | raise FileExistsError("Video {} exists already!".format(dst))
133 | try:
134 | src = str(src)
135 | dst = str(dst)
136 | os.symlink(src, dst)
137 | except OSError:
138 | import subprocess
139 |
140 | subprocess.check_call("mklink %s %s" % (dst, src), shell=True)
141 | print("Created the symlink of {} to {}".format(src, dst))
142 | videos = destinations
143 |
144 | if copy_videos == True:
145 | videos = (
146 | destinations
147 | ) # in this case the *new* location should be added to the config file
148 |
149 | # adds the video list to the config.yaml file
150 | video_sets = {}
151 | for video in videos:
152 | print(video)
153 | try:
154 | # For windows os.path.realpath does not work and does not link to the real video. [old: rel_video_path = os.path.realpath(video)]
155 | rel_video_path = str(Path.resolve(Path(video)))
156 | except:
157 | rel_video_path = os.readlink(str(video))
158 |
159 | vcap = cv2.VideoCapture(rel_video_path)
160 | if vcap.isOpened():
161 | width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
162 | height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
163 | video_sets[rel_video_path] = {
164 | "crop": ", ".join(map(str, [0, width, 0, height]))
165 | }
166 | else:
167 | print("Cannot open the video file! Skipping to the next one...")
168 | os.remove(video) # Removing the video or link from the project
169 |
170 | if not len(video_sets):
171 | # Silently sweep the files that were already written.
172 | shutil.rmtree(project_path, ignore_errors=True)
173 | print("WARNING: No valid videos were found. The project was not created ...")
174 | print("Verify the video files and re-create the project.")
175 | return "nothingcreated"
176 |
177 | # Set values to config file:
178 | cfg_file, ruamelFile = auxiliaryfunctions.create_config_template()
179 | cfg_file["Task"] = project
180 | cfg_file["scorer"] = experimenter
181 | cfg_file["video_sets"] = video_sets
182 | cfg_file["project_path"] = str(project_path)
183 | cfg_file["date"] = d
184 | cfg_file["bodyparts"] = ["bodypart1", "bodypart2", "bodypart3", "objectA"]
185 | cfg_file["cropping"] = False
186 | cfg_file["start"] = 0
187 | cfg_file["stop"] = 1
188 | cfg_file["numframes2pick"] = 20
189 | cfg_file["TrainingFraction"] = [0.95]
190 | cfg_file["iteration"] = 0
191 | # cfg_file['resnet']=50
192 | cfg_file["default_net_type"] = "resnet_50"
193 | cfg_file["default_augmenter"] = "default"
194 | cfg_file["snapshotindex"] = -1
195 | cfg_file["x1"] = 0
196 | cfg_file["x2"] = 640
197 | cfg_file["y1"] = 277
198 | cfg_file["y2"] = 624
199 | cfg_file[
200 | "batch_size"
201 | ] = (
202 | 8
203 | ) # batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
204 | cfg_file["corner2move2"] = (50, 50)
205 | cfg_file["move2corner"] = True
206 | cfg_file["skeleton"] = [["bodypart1", "bodypart2"], ["objectA", "bodypart3"]]
207 | cfg_file["skeleton_color"] = "black"
208 | cfg_file["pcutoff"] = 0.6
209 | cfg_file["dotsize"] = 12 # for plots size of dots
210 | cfg_file["alphavalue"] = 0.7 # for plots transparency of markers
211 | cfg_file["colormap"] = "jet" # for plots type of colormap
212 |
213 | projconfigfile = os.path.join(str(project_path), "config.yaml")
214 | # Write dictionary to yaml config file
215 | auxiliaryfunctions.write_config(projconfigfile, cfg_file)
216 |
217 | print('Generated "{}"'.format(project_path / "config.yaml"))
218 | print(
219 | "\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. Change the parameters in this file to adapt to your project's needs.\n Once you have changed the configuration file, use the function 'extract_frames' to select frames for labeling.\n. [OPTIONAL] Use the function 'add_new_videos' to add new videos to your project (at any stage)."
220 | % (project_name, str(wd))
221 | )
222 | return projconfigfile
223 |
--------------------------------------------------------------------------------
/deeplabcutcore/create_project/new_3d.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os
12 | from pathlib import Path
13 | from deeplabcutcore import DEBUG
14 | import matplotlib.pyplot as plt
15 |
16 |
17 | def create_new_project_3d(project, experimenter, num_cameras=2, working_directory=None):
18 | """Creates a new project directory, sub-directories and a basic configuration file for 3d project.
19 | The configuration file is loaded with the default values. Adjust the parameters to your project's needs.
20 |
21 | Parameters
22 | ----------
23 | project : string
24 | String containing the name of the project.
25 |
26 | experimenter : string
27 | String containing the name of the experimenter.
28 |
29 | num_cameras : int
30 | An integer value specifying the number of cameras.
31 |
32 | working_directory : string, optional
33 | The directory where the project will be created. The default is the ``current working directory``; if provided, it must be a string.
34 |
35 |
36 | Example
37 | --------
38 | Linux/MacOs
39 | >>> deeplabcutcore.create_new_project_3d('reaching-task','Linus',2)
40 |
41 | Windows:
42 | >>> deeplabcutcore.create_new_project('reaching-task','Bill',2)
43 | Users must format paths with either: r'C:\ OR 'C:\\ <- i.e. a double backslash \ \ )
44 |
45 | """
46 | from datetime import datetime as dt
47 | from deeplabcutcore.utils import auxiliaryfunctions
48 |
49 | date = dt.today()
50 | month = date.strftime("%B")
51 | day = date.day
52 | d = str(month[0:3] + str(day))
53 | date = dt.today().strftime("%Y-%m-%d")
54 |
55 | if working_directory == None:
56 | working_directory = "."
57 |
58 | wd = Path(working_directory).resolve()
59 | project_name = "{pn}-{exp}-{date}-{triangulate}".format(
60 | pn=project, exp=experimenter, date=date, triangulate="3d"
61 | )
62 | project_path = wd / project_name
63 | # Create project and sub-directories
64 | if not DEBUG and project_path.exists():
65 | print('Project "{}" already exists!'.format(project_path))
66 | return
67 |
68 | camera_matrix_path = project_path / "camera_matrix"
69 | calibration_images_path = project_path / "calibration_images"
70 | undistortion_path = project_path / "undistortion"
71 | path_corners = project_path / "corners"
72 |
73 | for p in [
74 | camera_matrix_path,
75 | calibration_images_path,
76 | undistortion_path,
77 | path_corners,
78 | ]:
79 | p.mkdir(parents=True, exist_ok=DEBUG)
80 | print('Created "{}"'.format(p))
81 |
82 | # Create config file
83 | cfg_file_3d, ruamelFile_3d = auxiliaryfunctions.create_config_template_3d()
84 | cfg_file_3d["Task"] = project
85 | cfg_file_3d["scorer"] = experimenter
86 | cfg_file_3d["date"] = d
87 | cfg_file_3d["project_path"] = str(project_path)
88 | # cfg_file_3d['config_files']= [str('Enter the path of the config file ')+str(i)+ ' to include' for i in range(1,3)]
89 | # cfg_file_3d['config_files']= ['Enter the path of the config file 1']
90 | cfg_file_3d["colormap"] = "jet"
91 | cfg_file_3d["dotsize"] = 15
92 | cfg_file_3d["alphaValue"] = 0.8
93 | cfg_file_3d["markerType"] = "*"
94 | cfg_file_3d["markerColor"] = "r"
95 | cfg_file_3d["pcutoff"] = 0.4
96 | cfg_file_3d["num_cameras"] = num_cameras
97 | cfg_file_3d["camera_names"] = [
98 | str("camera-" + str(i)) for i in range(1, num_cameras + 1)
99 | ]
100 | cfg_file_3d["scorername_3d"] = "DLC_3D"
101 |
102 | cfg_file_3d["skeleton"] = [
103 | ["bodypart1", "bodypart2"],
104 | ["bodypart2", "bodypart3"],
105 | ["bodypart3", "bodypart4"],
106 | ["bodypart4", "bodypart5"],
107 | ]
108 | cfg_file_3d["skeleton_color"] = "black"
109 |
110 | for i in range(num_cameras):
111 | path = str(
112 | "/home/mackenzie/DEEPLABCUT/DeepLabCut/2DprojectCam"
113 | + str(i + 1)
114 | + "-Mackenzie-2019-06-05/config.yaml"
115 | )
116 | cfg_file_3d.insert(
117 | len(cfg_file_3d), str("config_file_camera-" + str(i + 1)), path
118 | )
119 |
120 | for i in range(num_cameras):
121 | cfg_file_3d.insert(len(cfg_file_3d), str("shuffle_camera-" + str(i + 1)), 1)
122 | cfg_file_3d.insert(
123 | len(cfg_file_3d), str("trainingsetindex_camera-" + str(i + 1)), 0
124 | )
125 |
126 | projconfigfile = os.path.join(str(project_path), "config.yaml")
127 | auxiliaryfunctions.write_config_3d(projconfigfile, cfg_file_3d)
128 |
129 | print('Generated "{}"'.format(project_path / "config.yaml"))
130 | print(
131 | "\nA new project with name %s is created at %s and a configurable file (config.yaml) is stored there. If you have not calibrated the cameras, then use the function 'calibrate_camera' to start calibrating the camera otherwise use the function ``triangulate`` to triangulate the dataframe"
132 | % (project_name, wd)
133 | )
134 | return projconfigfile
135 |
--------------------------------------------------------------------------------
/deeplabcutcore/generate_training_dataset/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcutcore2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/deeplabcut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | from deeplabcutcore import DEBUG
12 | from deeplabcutcore.generate_training_dataset.frame_extraction import *
13 | from deeplabcutcore.generate_training_dataset.trainingsetmanipulation import *
14 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_cfg.yaml:
--------------------------------------------------------------------------------
1 | dataset: willbeautomaticallyupdatedbycreate_training_datasetcode
2 | metadataset: willbeautomaticallyupdatedbycreate_training_datasetcode
3 | num_joints: willbeautomaticallyupdatedbycreate_training_datasetcode
4 | all_joints: willbeautomaticallyupdatedbycreate_training_datasetcode
5 | all_joints_names: willbeautomaticallyupdatedbycreate_training_datasetcode
6 | init_weights: willbeautomaticallyupdatedbycreate_training_datasetcode
7 | project_path: willbeautomaticallyupdatedbycreate_training_datasetcode
8 |
9 |
10 | # Hyperparameters below worked well for our tasks in
11 | # Mathis et al. Nature Neuroscience
12 | # https://www.nature.com/articles/s41593-018-0209-y
13 |
14 | # all locations within this distance threshold are considered
15 | # positive training samples for detector
16 | pos_dist_thresh: 17
17 |
18 | # all images in the dataset will be rescaled by the following
19 | # scaling factor to be processed by the CNN. You can select the
20 | # optimal scale by cross-validation
21 | global_scale: 0.8
22 |
23 | ##############################################################################
24 | #### Augmentation variables
25 | ##############################################################################
26 | # During training an image will be randomly scaled within the
27 | # range [scale_jitter_lo; scale_jitter_up] to augment training data,
28 | scale_jitter_lo: 0.5
29 | scale_jitter_up: 1.25
30 |
31 | # Randomly flips an image horizontally to augment training data
32 | mirror: False
33 |
34 | #Data loaders, i.e. with additional data augmentation options (as of 2.0.9+):
35 | dataset_type: default
36 | batch_size: 1
37 | #default with be with no extra dataloaders. Other options: 'tensorpack, deterministic'
38 | #types of datasets, see factory: deeplabcut/pose_estimation_tensorflow/dataset/factory.py
39 | #For deterministic, see https://github.com/AlexEMG/DeepLabCut/pull/324
40 | #For tensorpack, see https://github.com/AlexEMG/DeepLabCut/pull/409
41 |
42 | # Auto cropping is new (was not in Nature Neuroscience 2018 paper, but introduced in Nath et al. Nat. Protocols 2019)
43 | #and boosts performance by 2X, particularly on challenging datasets, like the cheetah in Nath et al.
44 | # Parameters for augmentation with regard to cropping
45 | crop: True
46 | cropratio: 0.4 #what is the fraction of training samples with cropping?
47 |
48 | minsize: 100 #what is the minimal frames size for cropping plus/minus ie.. [-100,100]^2 for an arb. joint
49 | leftwidth: 400
50 | rightwidth: 400
51 | topheight: 400
52 | bottomheight: 400
53 |
54 | #limit width [-leftwidth*u-100,100+u*rightwidth] x [-bottomwith*u-100,100+u*topwidth] where u is always a (different) random number in unit interval
55 |
56 | # NOTE: as of DLC 2.1 these are defined when creating the training set!
57 | # Type of the CNN to use, currently resnets + mobilenets are supported (see docs)
58 | net_type: resnet_50
59 | #init_weights: ./snapshot-5000
60 |
61 |
62 | # Location refinement parameters (check https://arxiv.org/abs/1511.06645)
63 | location_refinement: true
64 | locref_huber_loss: true
65 | locref_loss_weight: 0.05
66 | locref_stdev: 7.2801
67 |
68 | # Enabling this adds additional loss layer in the middle of the ConvNet,
69 | # which helps accuracy (you should set to true for ResNet-101, or 152!).
70 | intermediate_supervision: false
71 | intermediate_supervision_layer: 12
72 |
73 | # all images larger with size
74 | # width * height > max_input_size*max_input_size are not used in training.
75 | # Prevents training from crashing with out of memory exception for very
76 | # large images.
77 | max_input_size: 1500
78 | # all images smaller than 64*64 will be excluded.
79 | min_input_size: 64
80 |
81 | # Learning rate schedule for the SGD optimizer.
82 | multi_step:
83 | - [0.005, 10000]
84 | - [0.02, 430000]
85 | - [0.002, 730000]
86 | - [0.001, 1030000]
87 |
88 | # How often display loss
89 | display_iters: 1000
90 | # How often to save training snapshot
91 | save_iters: 50000
92 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_3d/__init__.py:
--------------------------------------------------------------------------------
1 | from deeplabcutcore.pose_estimation_3d.camera_calibration import *
2 | from deeplabcutcore.pose_estimation_3d.plotting3D import *
3 | from deeplabcutcore.pose_estimation_3d.triangulation import *
4 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
167 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/README.md:
--------------------------------------------------------------------------------
1 | # DeepLabCut2.0 Toolbox (deeplabcut.org)
2 | © A. & M. Mathis Labs
3 |
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 |
8 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
9 |
10 | The code in this folder is based of Eldar's DeeperCut code: https://github.com/eldar/pose-tensorflow
11 |
12 | We adopted it to be integrated in the pip package of DeepLabCut, and among other things added additional networks (MobileNets), faster inference code, additional augmentation code.
13 |
14 | Check out the following references for details:
15 |
16 | @article{Mathisetal2018,
17 | title={DeepLabCut: markerless pose estimation of user-defined body parts with deep learning},
18 | author = {Alexander Mathis and Pranav Mamidanna and Kevin M. Cury and Taiga Abe and Venkatesh N. Murthy and Mackenzie W. Mathis and Matthias Bethge},
19 | journal={Nature Neuroscience},
20 | year={2018},
21 | url={https://www.nature.com/articles/s41593-018-0209-y}
22 | }
23 |
24 | @article{mathis2019pretraining,
25 | title={Pretraining boosts out-of-domain robustness for pose estimation},
26 | author={Alexander Mathis and Mert Yüksekgönül and Byron Rogers and Matthias Bethge and Mackenzie W. Mathis},
27 | year={2019},
28 | eprint={1909.11229},
29 | archivePrefix={arXiv},
30 | primaryClass={cs.CV}
31 | }
32 |
33 | @article{insafutdinov2016deepercut,
34 | author = {Eldar Insafutdinov and Leonid Pishchulin and Bjoern Andres and Mykhaylo Andriluka and Bernt Schiele},
35 | url = {http://arxiv.org/abs/1605.03170}
36 | title = {DeeperCut: A Deeper, Stronger, and Faster Multi-Person Pose Estimation Model},
37 | year = {2016}
38 | }
39 |
40 | @inproceedings{pishchulin16cvpr,
41 | title = {DeepCut: Joint Subset Partition and Labeling for Multi Person Pose Estimation},
42 | booktitle = {CVPR'16},
43 | url = {https://arxiv.org/abs/1511.06645},
44 | author = {Leonid Pishchulin and Eldar Insafutdinov and Siyu Tang and Bjoern Andres and Mykhaylo Andriluka and Peter Gehler and Bernt Schiele}
45 | }
46 |
47 | # License:
48 |
49 | This project (DeepLabCut and DeeperCut) is licensed under the GNU Lesser General Public License v3.0.
50 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcutcore2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/deeplabcut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | from deeplabcutcore.pose_estimation_tensorflow.dataset import *
12 | from deeplabcutcore.pose_estimation_tensorflow.models import *
13 | from deeplabcutcore.pose_estimation_tensorflow.nnet import *
14 | from deeplabcutcore.pose_estimation_tensorflow.util import *
15 |
16 | from deeplabcutcore.pose_estimation_tensorflow.config import *
17 | from deeplabcutcore.pose_estimation_tensorflow.default_config import *
18 | from deeplabcutcore.pose_estimation_tensorflow.evaluate import *
19 | from deeplabcutcore.pose_estimation_tensorflow.export import *
20 | from deeplabcutcore.pose_estimation_tensorflow.predict_videos import *
21 | from deeplabcutcore.pose_estimation_tensorflow.test import *
22 | from deeplabcutcore.pose_estimation_tensorflow.train import *
23 | from deeplabcutcore.pose_estimation_tensorflow.training import *
24 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 |
6 | import pprint
7 | import logging
8 |
9 | import yaml
10 | from easydict import EasyDict as edict
11 |
12 | from . import default_config
13 |
14 | cfg = default_config.cfg
15 |
16 |
17 | def _merge_a_into_b(a, b):
18 | """Merge config dictionary a into config dictionary b, clobbering the
19 | options in b whenever they are also specified in a.
20 | """
21 | if type(a) is not edict:
22 | return
23 |
24 | for k, v in a.items():
25 | # a must specify keys that are in b
26 | # if k not in b:
27 | # raise KeyError('{} is not a valid config key'.format(k))
28 |
29 | # recursively merge dicts
30 | if type(v) is edict:
31 | try:
32 | _merge_a_into_b(a[k], b[k])
33 | except:
34 | print("Error under config key: {}".format(k))
35 | raise
36 | else:
37 | b[k] = v
38 |
39 |
40 | def cfg_from_file(filename):
41 | """Load a config from file filename and merge it into the default options.
42 | """
43 | with open(filename, "r") as f:
44 | yaml_cfg = edict(yaml.load(f, Loader=yaml.SafeLoader))
45 |
46 | # Update the snapshot path to the corresponding path!
47 | trainpath = str(filename).split("pose_cfg.yaml")[0]
48 | yaml_cfg["snapshot_prefix"] = trainpath + "snapshot"
49 | # the default is: "./snapshot"
50 | _merge_a_into_b(yaml_cfg, cfg)
51 |
52 | logging.info("Config:\n" + pprint.pformat(cfg))
53 | return cfg
54 |
55 |
56 | def load_config(filename="pose_cfg.yaml"):
57 | return cfg_from_file(filename)
58 |
59 |
60 | if __name__ == "__main__":
61 | print(load_config())
62 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 | from deeplabcutcore.pose_estimation_tensorflow.dataset.factory import *
11 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import *
12 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/dataset/factory.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 |
10 | Adopted from DeeperCut by Eldar Insafutdinov
11 | https://github.com/eldar/pose-tensorflow
12 |
13 | Updated to allow more data set loaders.
14 | """
15 |
16 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import Batch
17 |
18 |
19 | def create(cfg):
20 | dataset_type = cfg.dataset_type
21 | if dataset_type == "default":
22 | print("Starting with standard pose-dataset loader.")
23 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_defaultdataset import (
24 | PoseDataset,
25 | )
26 |
27 | data = PoseDataset(cfg)
28 | elif dataset_type == "deterministic":
29 | print("Starting with deterministic pose-dataset loader.")
30 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset_deterministic import (
31 | PoseDataset,
32 | )
33 |
34 | data = PoseDataset(cfg)
35 |
36 | elif dataset_type == "tensorpack":
37 | print("Starting with tensorpack pose-dataset loader.")
38 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset_tensorpack import (
39 | PoseDataset,
40 | )
41 |
42 | data = PoseDataset(cfg)
43 |
44 | elif dataset_type == "imgaug":
45 | print("Starting with imgaug pose-dataset loader.")
46 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset_imgaug import (
47 | PoseDataset,
48 | )
49 |
50 | data = PoseDataset(cfg)
51 |
52 | else:
53 | raise Exception('Unsupported dataset_type: "{}"'.format(dataset_type))
54 |
55 | return data
56 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/dataset/pose_dataset.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 |
5 | """
6 | from enum import Enum
7 | import numpy as np
8 |
9 |
10 | class Batch(Enum):
11 | inputs = 0
12 | part_score_targets = 1
13 | part_score_weights = 2
14 | locref_targets = 3
15 | locref_mask = 4
16 | pairwise_targets = 5
17 | pairwise_mask = 6
18 | data_item = 7
19 |
20 |
21 | class DataItem:
22 | pass
23 |
24 |
25 | def data_to_input(data):
26 | return np.expand_dims(data, axis=0).astype(float)
27 |
28 |
29 | def data_to_input_batch(batch_data):
30 | return np.array(batch_data)
31 |
32 |
33 | # Augmentation functions
34 | def mirror_joints_map(all_joints, num_joints):
35 | res = np.arange(num_joints)
36 | symmetric_joints = [p for p in all_joints if len(p) == 2]
37 | for pair in symmetric_joints:
38 | res[pair[0]] = pair[1]
39 | res[pair[1]] = pair[0]
40 | return res
41 |
42 |
43 | def CropImage(joints, im, Xlabel, Ylabel, cfg):
44 | """ Randomly cropping image around xlabel,ylabel taking into account size of image.
45 | Introduced in DLC 2 (Nature Protocols paper)"""
46 | widthforward = int(cfg["minsize"] + np.random.randint(cfg["rightwidth"]))
47 | widthback = int(cfg["minsize"] + np.random.randint(cfg["leftwidth"]))
48 | hup = int(cfg["minsize"] + np.random.randint(cfg["topheight"]))
49 | hdown = int(cfg["minsize"] + np.random.randint(cfg["bottomheight"]))
50 | Xstart = max(0, int(Xlabel - widthback))
51 | Xstop = min(np.shape(im)[1] - 1, int(Xlabel + widthforward))
52 | Ystart = max(0, int(Ylabel - hdown))
53 | Ystop = min(np.shape(im)[0] - 1, int(Ylabel + hup))
54 | joints[0, :, 1] -= Xstart
55 | joints[0, :, 2] -= Ystart
56 |
57 | inbounds = np.where(
58 | (joints[0, :, 1] > 0)
59 | * (joints[0, :, 1] < np.shape(im)[1])
60 | * (joints[0, :, 2] > 0)
61 | * (joints[0, :, 2] < np.shape(im)[0])
62 | )[0]
63 | return joints[:, inbounds, :], im[Ystart : Ystop + 1, Xstart : Xstop + 1, :]
64 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/default_config.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 |
10 | Adapted from DeeperCut by Eldar Insafutdinov
11 | https://github.com/eldar/pose-tensorflow
12 | """
13 |
14 | from easydict import EasyDict as edict
15 |
16 | cfg = edict()
17 |
18 | cfg.stride = 8.0
19 | cfg.weigh_part_predictions = False
20 | cfg.weigh_negatives = False
21 | cfg.fg_fraction = 0.25
22 | cfg.weigh_only_present_joints = False
23 | cfg.mean_pixel = [123.68, 116.779, 103.939]
24 | cfg.shuffle = True
25 | cfg.snapshot_prefix = "./snapshot"
26 | cfg.log_dir = "log"
27 | cfg.global_scale = 1.0
28 | cfg.location_refinement = False
29 | cfg.locref_stdev = 7.2801
30 | cfg.locref_loss_weight = 1.0
31 | cfg.locref_huber_loss = True
32 | cfg.optimizer = "sgd"
33 | cfg.intermediate_supervision = False
34 | cfg.intermediate_supervision_layer = 12
35 | cfg.regularize = False
36 | cfg.weight_decay = 0.0001
37 | cfg.mirror = False
38 | cfg.crop_pad = 0
39 | cfg.scoremap_dir = "test"
40 |
41 | cfg.batch_size = 1
42 |
43 | # types of datasets, see factory: deeplabcut/pose_estimation_tensorflow/dataset/factory.py
44 | cfg.dataset_type = "default"
45 | # you can also set this to deterministic, see https://github.com/AlexEMG/DeepLabCut/pull/324
46 | cfg.deterministic = False
47 |
48 | # Parameters for augmentation with regard to cropping
49 | # Added and described in "Using DeepLabCut for 3D markerless pose estimation across species and behaviors"
50 | # Source: https://www.nature.com/articles/s41596-019-0176-0
51 | cfg.crop = False
52 |
53 | cfg.cropratio = 0.25 # what is the fraction of training samples with cropping?
54 | cfg.minsize = (
55 | 100
56 | ) # what is the minimal frames size for cropping plus/minus ie.. [-100,100]^2 for an arb. joint
57 | cfg.leftwidth = 400
58 | # limit width [-leftwidth*u-100,100+u*rightwidth] x [-bottomwith*u-100,100+u*topwidth] where u is always a (different) random number in unit interval
59 | cfg.rightwidth = 400
60 | cfg.topheight = 400
61 | cfg.bottomheight = 400
62 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/deeplabcutcore/pose_estimation_tensorflow/models/__init__.py
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/models/pretrained/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-core/89727652eca751d98592f8d4388c4da3bff86187/deeplabcutcore/pose_estimation_tensorflow/models/pretrained/__init__.py
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/models/pretrained/download.sh:
--------------------------------------------------------------------------------
1 | # legacy.
2 | #!/bin/sh
3 |
4 | curl http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz | tar xvz
5 | curl http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz | tar xvz
6 | curl http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz | tar xvz
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/models/pretrained/pretrained_model_urls.yaml:
--------------------------------------------------------------------------------
1 | #Model Zoo from where the Tensor(s) flow
2 | resnet_50: http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
3 | resnet_101: http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz
4 | resnet_152: http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz
5 |
6 | #Source: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
7 | mobilenet_v2_1.0: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz
8 | mobilenet_v2_0.75: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_224.tgz
9 | mobilenet_v2_0.5: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_224.tgz
10 | mobilenet_v2_0.35: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_224.tgz
11 |
12 | #Model Zoo from where the Charles flow(s)
13 | full_human: http://deeplabcut.rowland.harvard.edu/models/DLC_human_fullbody_resnet_101.tar.gz
14 | full_dog: http://deeplabcut.rowland.harvard.edu/models/DLC_Dog_resnet_50_iteration-0_shuffle-0.tar.gz
15 | full_cat: http://deeplabcut.rowland.harvard.edu/models/DLC_Cat_resnet_50_iteration-0_shuffle-0.tar.gz
16 | primate_face: http://deeplabcut.rowland.harvard.edu/models/DLC_primate_face_resnet_50_iteration-1_shuffle-1.tar.gz
17 | mouse_pupil_vclose: http://deeplabcut.rowland.harvard.edu/models/DLC_mouse_pupil_vclose_resnet_50_iteration-0_shuffle-1.tar.gz
18 | horse_sideview: http://deeplabcut.rowland.harvard.edu/models/DLC_Horses_resnet_50_iteration-1_shuffle-1.tar.gz
19 | full_macaque: http://deeplabcut.rowland.harvard.edu/models/DLC_macaque_full_resnet50.tar.gz
20 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 |
10 | pose_estimation_tensorflow is based of Eldar's repository:
11 | https://github.com/eldar/pose-tensorflow
12 |
13 | """
14 | from deeplabcutcore.pose_estimation_tensorflow.nnet.losses import *
15 | from deeplabcutcore.pose_estimation_tensorflow.nnet.net_factory import *
16 | from deeplabcutcore.pose_estimation_tensorflow.nnet.pose_net import *
17 | from deeplabcutcore.pose_estimation_tensorflow.nnet.predict import *
18 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/losses.py:
--------------------------------------------------------------------------------
1 | """
2 | adapted from DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 | import tensorflow as tf
6 |
7 | vers = (tf.__version__).split(".")
8 | if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
9 | tf = tf.compat.v1
10 | else:
11 | tf = tf
12 |
13 | from tensorflow.python.ops import math_ops
14 | from tensorflow.python.framework import ops
15 |
16 |
17 | def huber_loss(labels, predictions, weight=1.0, k=1.0, scope=None):
18 | """Define a huber loss https://en.wikipedia.org/wiki/Huber_loss
19 | tensor: tensor to regularize.
20 | k: value of k in the huber loss
21 | scope: Optional scope for op_scope.
22 |
23 | Huber loss:
24 | f(x) = if |x| <= k:
25 | 0.5 * x^2
26 | else:
27 | k * |x| - 0.5 * k^2
28 |
29 | Returns:
30 | the L1 loss op.
31 |
32 | http://concise-bio.readthedocs.io/en/latest/_modules/concise/tf_helper.html
33 | """
34 | with ops.name_scope(scope, "absolute_difference", [predictions, labels]) as scope:
35 | predictions.get_shape().assert_is_compatible_with(labels.get_shape())
36 | if weight is None:
37 | raise ValueError("`weight` cannot be None")
38 | predictions = math_ops.to_float(predictions)
39 | labels = math_ops.to_float(labels)
40 | diff = math_ops.subtract(predictions, labels)
41 | abs_diff = tf.abs(diff)
42 | losses = tf.where(
43 | abs_diff < k, 0.5 * tf.square(diff), k * abs_diff - 0.5 * k ** 2
44 | )
45 | return tf.losses.compute_weighted_loss(losses, weight)
46 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/mobilenet_v2.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 |
17 | """Implementation of Mobilenet V2.
18 |
19 | Architecture: https://arxiv.org/abs/1801.04381
20 |
21 | The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
22 | 3.4 M parameters.
23 | """
24 |
25 | from __future__ import absolute_import
26 | from __future__ import division
27 | from __future__ import print_function
28 |
29 | import copy
30 | import functools
31 |
32 | import tensorflow as tf
33 |
34 | vers = (tf.__version__).split(".")
35 | if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
36 | tf = tf.compat.v1
37 | else:
38 | tf = tf
39 |
40 | if int(vers[0]) == 2:
41 | import tf_slim as slim
42 | else:
43 | import tensorflow.contrib.slim as slim
44 |
45 | # from nets.mobilenet import conv_blocks as ops
46 | # from nets.mobilenet import mobilenet as lib
47 | from deeplabcutcore.pose_estimation_tensorflow.nnet import conv_blocks as ops
48 | from deeplabcutcore.pose_estimation_tensorflow.nnet import mobilenet as lib
49 |
50 | op = lib.op
51 |
52 | expand_input = ops.expand_input_by_factor
53 |
54 |
55 | # pyformat: disable
56 | # Architecture: https://arxiv.org/abs/1801.04381
57 | V2_DEF = dict(
58 | defaults={
59 | # Note: these parameters of batch norm affect the architecture
60 | # that's why they are here and not in training_scope.
61 | (slim.batch_norm,): {"center": True, "scale": True},
62 | (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
63 | "normalizer_fn": slim.batch_norm,
64 | "activation_fn": tf.nn.relu6,
65 | },
66 | (ops.expanded_conv,): {
67 | "expansion_size": expand_input(6),
68 | "split_expansion": 1,
69 | "normalizer_fn": slim.batch_norm,
70 | "residual": True,
71 | },
72 | (slim.conv2d, slim.separable_conv2d): {"padding": "SAME"},
73 | },
74 | spec=[
75 | op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
76 | op(
77 | ops.expanded_conv,
78 | expansion_size=expand_input(1, divisible_by=1),
79 | num_outputs=16,
80 | ),
81 | op(ops.expanded_conv, stride=2, num_outputs=24),
82 | op(ops.expanded_conv, stride=1, num_outputs=24),
83 | op(ops.expanded_conv, stride=2, num_outputs=32),
84 | op(ops.expanded_conv, stride=1, num_outputs=32),
85 | op(ops.expanded_conv, stride=1, num_outputs=32),
86 | op(ops.expanded_conv, stride=2, num_outputs=64),
87 | op(ops.expanded_conv, stride=1, num_outputs=64),
88 | op(ops.expanded_conv, stride=1, num_outputs=64),
89 | op(ops.expanded_conv, stride=1, num_outputs=64),
90 | op(ops.expanded_conv, stride=1, num_outputs=96),
91 | op(ops.expanded_conv, stride=1, num_outputs=96),
92 | op(ops.expanded_conv, stride=1, num_outputs=96),
93 | op(
94 | ops.expanded_conv, stride=1, num_outputs=160
95 | ), # NOTE: WE changed this stride to achieve downsampling of 16 rather than 32.
96 | op(ops.expanded_conv, stride=1, num_outputs=160),
97 | op(ops.expanded_conv, stride=1, num_outputs=160),
98 | op(ops.expanded_conv, stride=1, num_outputs=320),
99 | op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280),
100 | ],
101 | )
102 | # pyformat: enable
103 |
104 |
105 | @slim.add_arg_scope
106 | def mobilenet(
107 | input_tensor,
108 | num_classes=1001,
109 | depth_multiplier=1.0,
110 | scope="MobilenetV2",
111 | conv_defs=None,
112 | finegrain_classification_mode=False,
113 | min_depth=None,
114 | divisible_by=None,
115 | activation_fn=None,
116 | **kwargs
117 | ):
118 | """Creates mobilenet V2 network.
119 |
120 | Inference mode is created by default. To create training use training_scope
121 | below.
122 |
123 | with tf.slim.arg_scope(mobilenet_v2.training_scope()):
124 | logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
125 |
126 | Args:
127 | input_tensor: The input tensor
128 | num_classes: number of classes
129 | depth_multiplier: The multiplier applied to scale number of
130 | channels in each layer.
131 | scope: Scope of the operator
132 | conv_defs: Allows to override default conv def.
133 | finegrain_classification_mode: When set to True, the model
134 | will keep the last layer large even for small multipliers. Following
135 | https://arxiv.org/abs/1801.04381
136 | suggests that it improves performance for ImageNet-type of problems.
137 | *Note* ignored if final_endpoint makes the builder exit earlier.
138 | min_depth: If provided, will ensure that all layers will have that
139 | many channels after application of depth multiplier.
140 | divisible_by: If provided will ensure that all layers # channels
141 | will be divisible by this number.
142 | activation_fn: Activation function to use, defaults to tf.nn.relu6 if not
143 | specified.
144 | **kwargs: passed directly to mobilenet.mobilenet:
145 | prediction_fn- what prediction function to use.
146 | reuse-: whether to reuse variables (if reuse set to true, scope
147 | must be given).
148 | Returns:
149 | logits/endpoints pair
150 |
151 | Raises:
152 | ValueError: On invalid arguments
153 | """
154 | if conv_defs is None:
155 | conv_defs = V2_DEF
156 | if "multiplier" in kwargs:
157 | raise ValueError(
158 | "mobilenetv2 doesn't support generic "
159 | 'multiplier parameter use "depth_multiplier" instead.'
160 | )
161 | if finegrain_classification_mode:
162 | conv_defs = copy.deepcopy(conv_defs)
163 | if depth_multiplier < 1:
164 | conv_defs["spec"][-1].params["num_outputs"] /= depth_multiplier
165 | if activation_fn:
166 | conv_defs = copy.deepcopy(conv_defs)
167 | defaults = conv_defs["defaults"]
168 | conv_defaults = defaults[
169 | (slim.conv2d, slim.fully_connected, slim.separable_conv2d)
170 | ]
171 | conv_defaults["activation_fn"] = activation_fn
172 |
173 | depth_args = {}
174 | # NB: do not set depth_args unless they are provided to avoid overriding
175 | # whatever default depth_multiplier might have thanks to arg_scope.
176 | if min_depth is not None:
177 | depth_args["min_depth"] = min_depth
178 | if divisible_by is not None:
179 | depth_args["divisible_by"] = divisible_by
180 |
181 | with slim.arg_scope((lib.depth_multiplier,), **depth_args):
182 | return lib.mobilenet(
183 | input_tensor,
184 | num_classes=num_classes,
185 | conv_defs=conv_defs,
186 | scope=scope,
187 | multiplier=depth_multiplier,
188 | **kwargs
189 | )
190 |
191 |
192 | mobilenet.default_image_size = 224
193 |
194 |
195 | def wrapped_partial(func, *args, **kwargs):
196 | partial_func = functools.partial(func, *args, **kwargs)
197 | functools.update_wrapper(partial_func, func)
198 | return partial_func
199 |
200 |
201 | # Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
202 | # 'finegrain_classification_mode' is set to True, which means the embedding
203 | # layer will not be shrinked when given a depth-multiplier < 1.0.
204 | mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4)
205 | mobilenet_v2_050 = wrapped_partial(
206 | mobilenet, depth_multiplier=0.50, finegrain_classification_mode=True
207 | )
208 | mobilenet_v2_035 = wrapped_partial(
209 | mobilenet, depth_multiplier=0.35, finegrain_classification_mode=True
210 | )
211 |
212 |
213 | @slim.add_arg_scope
214 | def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
215 | """Creates base of the mobilenet (no pooling and no logits) ."""
216 | return mobilenet(
217 | input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs
218 | )
219 |
220 |
221 | def training_scope(**kwargs):
222 | """Defines MobilenetV2 training scope.
223 |
224 | Usage:
225 | with tf.arg_scope(mobilenet_v2.training_scope()):
226 | logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
227 |
228 | with slim.
229 |
230 | Args:
231 | **kwargs: Passed to mobilenet.training_scope. The following parameters
232 | are supported:
233 | weight_decay- The weight decay to use for regularizing the model.
234 | stddev- Standard deviation for initialization, if negative uses xavier.
235 | dropout_keep_prob- dropout keep probability
236 | bn_decay- decay for the batch norm moving averages.
237 |
238 | Returns:
239 | An `arg_scope` to use for the mobilenet v2 model.
240 | """
241 | return lib.training_scope(**kwargs)
242 |
243 |
244 | __all__ = ["training_scope", "mobilenet_base", "mobilenet", "V2_DEF"]
245 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/net_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | Adopted: DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 |
6 |
7 | def pose_net(cfg):
8 | net_type = cfg.net_type
9 | if "mobilenet" in net_type:
10 | print("Initializing MobileNet")
11 | from deeplabcutcore.pose_estimation_tensorflow.nnet.pose_net_mobilenet import (
12 | PoseNet,
13 | )
14 |
15 | cls = PoseNet
16 | elif "resnet" in net_type:
17 | print("Initializing ResNet")
18 | from deeplabcutcore.pose_estimation_tensorflow.nnet.pose_net import PoseNet
19 |
20 | cls = PoseNet
21 | else:
22 | raise Exception('Unsupported class of network: "{}"'.format(net_type))
23 |
24 | return cls(cfg)
25 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/pose_net.py:
--------------------------------------------------------------------------------
1 | """
2 | Adopted: DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 |
5 | """
6 |
7 | import re
8 | import tensorflow as tf
9 |
10 | vers = (tf.__version__).split(".")
11 | if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
12 | tf = tf.compat.v1
13 | else:
14 | tf = tf
15 | if int(vers[0]) == 2:
16 | import tf_slim as slim
17 | from tf_slim.nets import resnet_v1
18 | else:
19 | import tensorflow.contrib.slim as slim
20 |
21 | print("WARNING! this code only supports tensorflow 2.x")
22 |
23 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import Batch
24 | from deeplabcutcore.pose_estimation_tensorflow.nnet import losses
25 |
26 | net_funcs = {
27 | "resnet_50": resnet_v1.resnet_v1_50,
28 | "resnet_101": resnet_v1.resnet_v1_101,
29 | "resnet_152": resnet_v1.resnet_v1_152,
30 | }
31 |
32 |
33 | def prediction_layer(cfg, input, name, num_outputs):
34 | with slim.arg_scope(
35 | [slim.conv2d, slim.conv2d_transpose],
36 | padding="SAME",
37 | activation_fn=None,
38 | normalizer_fn=None,
39 | weights_regularizer=tf.keras.regularizers.l2(0.5 * (cfg.weight_decay)),
40 | ):
41 | with tf.compat.v1.variable_scope(name):
42 | pred = slim.conv2d_transpose(
43 | input,
44 | num_outputs,
45 | kernel_size=[3, 3],
46 | stride=cfg.deconvolutionstride,
47 | scope="block4",
48 | )
49 | return pred
50 |
51 |
52 | class PoseNet:
53 | def __init__(self, cfg):
54 | self.cfg = cfg
55 | if "output_stride" not in self.cfg.keys():
56 | self.cfg.output_stride = 16
57 | if "deconvolutionstride" not in self.cfg.keys():
58 | self.cfg.deconvolutionstride = 2
59 |
60 | def extract_features(self, inputs):
61 | net_fun = net_funcs[self.cfg.net_type]
62 | mean = tf.constant(
63 | self.cfg.mean_pixel, dtype=tf.float32, shape=[1, 1, 1, 3], name="img_mean"
64 | )
65 | im_centered = inputs - mean
66 |
67 | # The next part of the code depends upon which tensorflow version you have.
68 | vers = tf.__version__
69 | vers = vers.split(
70 | "."
71 | ) # Updated based on https://github.com/AlexEMG/DeepLabCut/issues/44
72 | if int(vers[0]) == 1 and int(vers[1]) < 4: # check if lower than version 1.4.
73 | with slim.arg_scope(resnet_v1.resnet_arg_scope(False)):
74 | net, end_points = net_fun(
75 | im_centered, global_pool=False, output_stride=self.cfg.output_stride
76 | )
77 | else:
78 | with slim.arg_scope(resnet_v1.resnet_arg_scope()):
79 | net, end_points = net_fun(
80 | im_centered,
81 | global_pool=False,
82 | output_stride=self.cfg.output_stride,
83 | is_training=False,
84 | )
85 |
86 | return net, end_points
87 |
88 | def prediction_layers(self, features, end_points, reuse=None):
89 | cfg = self.cfg
90 | num_layers = re.findall("resnet_([0-9]*)", cfg.net_type)[0]
91 | layer_name = (
92 | "resnet_v1_{}".format(num_layers) + "/block{}/unit_{}/bottleneck_v1"
93 | )
94 |
95 | out = {}
96 | with tf.variable_scope("pose", reuse=reuse):
97 | out["part_pred"] = prediction_layer(
98 | cfg, features, "part_pred", cfg.num_joints
99 | )
100 | if cfg.location_refinement:
101 | out["locref"] = prediction_layer(
102 | cfg, features, "locref_pred", cfg.num_joints * 2
103 | )
104 | if cfg.intermediate_supervision:
105 | if (
106 | cfg.net_type == "resnet_50"
107 | and cfg.intermediate_supervision_layer > 6
108 | ):
109 | print(
110 | "Changing layer to 6! (higher ones don't exist in block 3 of ResNet 50)."
111 | )
112 | cfg.intermediate_supervision_layer = 6
113 | interm_name = layer_name.format(3, cfg.intermediate_supervision_layer)
114 | block_interm_out = end_points[interm_name]
115 | out["part_pred_interm"] = prediction_layer(
116 | cfg, block_interm_out, "intermediate_supervision", cfg.num_joints
117 | )
118 |
119 | return out
120 |
121 | def get_net(self, inputs):
122 | net, end_points = self.extract_features(inputs)
123 | return self.prediction_layers(net, end_points)
124 |
125 | def test(self, inputs):
126 | heads = self.get_net(inputs)
127 | prob = tf.sigmoid(heads["part_pred"])
128 | return {"part_prob": prob, "locref": heads["locref"]}
129 |
130 | def inference(self, inputs):
131 | """ Direct TF inference on GPU. Added with: https://arxiv.org/abs/1909.11229"""
132 | heads = self.get_net(inputs)
133 | # if cfg.location_refinement:
134 | locref = heads["locref"]
135 | probs = tf.sigmoid(heads["part_pred"])
136 |
137 | if self.cfg.batch_size == 1:
138 | # assuming batchsize 1 here!
139 | probs = tf.squeeze(probs, axis=0)
140 | locref = tf.squeeze(locref, axis=0)
141 | l_shape = tf.shape(input=probs)
142 |
143 | locref = tf.reshape(locref, (l_shape[0] * l_shape[1], -1, 2))
144 | probs = tf.reshape(probs, (l_shape[0] * l_shape[1], -1))
145 | maxloc = tf.argmax(input=probs, axis=0)
146 |
147 | loc = tf.unravel_index(
148 | maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))
149 | )
150 | maxloc = tf.reshape(maxloc, (1, -1))
151 |
152 | joints = tf.reshape(
153 | tf.range(0, tf.cast(l_shape[2], dtype=tf.int64)), (1, -1)
154 | )
155 | indices = tf.transpose(a=tf.concat([maxloc, joints], axis=0))
156 |
157 | offset = tf.gather_nd(locref, indices)
158 | offset = tf.gather(offset, [1, 0], axis=1)
159 | likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1, 1))
160 |
161 | pose = (
162 | self.cfg.stride * tf.cast(tf.transpose(a=loc), dtype=tf.float32)
163 | + self.cfg.stride * 0.5
164 | + offset * self.cfg.locref_stdev
165 | )
166 | pose = tf.concat([pose, likelihood], axis=1)
167 |
168 | return {"pose": pose}
169 | else:
170 | # probs = tf.squeeze(probs, axis=0)
171 | l_shape = tf.shape(
172 | input=probs
173 | ) # batchsize times x times y times body parts
174 | # locref = locref*cfg.locref_stdev
175 | locref = tf.reshape(
176 | locref, (l_shape[0], l_shape[1], l_shape[2], l_shape[3], 2)
177 | )
178 | # turn into x times y time bs * bpts
179 | locref = tf.transpose(a=locref, perm=[1, 2, 0, 3, 4])
180 | probs = tf.transpose(a=probs, perm=[1, 2, 0, 3])
181 |
182 | # print(locref.get_shape().as_list())
183 | # print(probs.get_shape().as_list())
184 | l_shape = tf.shape(input=probs) # x times y times batch times body parts
185 |
186 | locref = tf.reshape(locref, (l_shape[0] * l_shape[1], -1, 2))
187 | probs = tf.reshape(probs, (l_shape[0] * l_shape[1], -1))
188 | maxloc = tf.argmax(input=probs, axis=0)
189 | loc = tf.unravel_index(
190 | maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))
191 | ) # tuple of max indices
192 |
193 | maxloc = tf.reshape(maxloc, (1, -1))
194 | joints = tf.reshape(
195 | tf.range(0, tf.cast(l_shape[2] * l_shape[3], dtype=tf.int64)), (1, -1)
196 | )
197 | indices = tf.transpose(a=tf.concat([maxloc, joints], axis=0))
198 |
199 | # extract corresponding locref x and y as well as probability
200 | offset = tf.gather_nd(locref, indices)
201 | offset = tf.gather(offset, [1, 0], axis=1)
202 | likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1, 1))
203 |
204 | pose = (
205 | self.cfg.stride * tf.cast(tf.transpose(a=loc), dtype=tf.float32)
206 | + self.cfg.stride * 0.5
207 | + offset * self.cfg.locref_stdev
208 | )
209 | pose = tf.concat([pose, likelihood], axis=1)
210 | return {"pose": pose}
211 |
212 | def train(self, batch):
213 | cfg = self.cfg
214 |
215 | heads = self.get_net(batch[Batch.inputs])
216 |
217 | weigh_part_predictions = cfg.weigh_part_predictions
218 | part_score_weights = (
219 | batch[Batch.part_score_weights] if weigh_part_predictions else 1.0
220 | )
221 |
222 | def add_part_loss(pred_layer):
223 | return tf.compat.v1.losses.sigmoid_cross_entropy(
224 | batch[Batch.part_score_targets], heads[pred_layer], part_score_weights
225 | )
226 |
227 | loss = {}
228 | loss["part_loss"] = add_part_loss("part_pred")
229 | total_loss = loss["part_loss"]
230 | if cfg.intermediate_supervision:
231 | loss["part_loss_interm"] = add_part_loss("part_pred_interm")
232 | total_loss = total_loss + loss["part_loss_interm"]
233 |
234 | if cfg.location_refinement:
235 | locref_pred = heads["locref"]
236 | locref_targets = batch[Batch.locref_targets]
237 | locref_weights = batch[Batch.locref_mask]
238 |
239 | loss_func = (
240 | losses.huber_loss
241 | if cfg.locref_huber_loss
242 | else tf.compat.v1.losses.mean_squared_error
243 | )
244 | loss["locref_loss"] = cfg.locref_loss_weight * loss_func(
245 | locref_targets, locref_pred, locref_weights
246 | )
247 | total_loss = total_loss + loss["locref_loss"]
248 |
249 | # loss['total_loss'] = slim.losses.get_total_loss(add_regularization_losses=params.regularize)
250 | loss["total_loss"] = total_loss
251 | return loss
252 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/pose_net_mobilenet.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/DeepLabCut/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 |
10 | Added with:
11 | Pretraining boosts out-of-domain robustness for pose estimation
12 | by Alexander Mathis, Mert Yüksekgönül, Byron Rogers, Matthias Bethge, Mackenzie W. Mathis
13 | https://arxiv.org/abs/1909.11229
14 |
15 | Based on Slim implementation of mobilenets:
16 | https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
17 | """
18 |
19 | import functools
20 | import tensorflow as tf
21 |
22 | vers = (tf.__version__).split(".")
23 | if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
24 | tf = tf.compat.v1
25 | else:
26 | tf = tf
27 | if int(vers[0]) == 2:
28 | import tf_slim as slim
29 | else:
30 | import tensorflow.contrib.slim as slim
31 |
32 | from deeplabcutcore.pose_estimation_tensorflow.nnet import (
33 | mobilenet_v2,
34 | mobilenet,
35 | conv_blocks,
36 | )
37 | from ..dataset.pose_dataset import Batch
38 | from . import losses
39 |
40 |
41 | def wrapper(func, *args, **kwargs):
42 | partial_func = functools.partial(func, *args, **kwargs)
43 | functools.update_wrapper(partial_func, func)
44 | return partial_func
45 |
46 |
47 | networks = {
48 | "mobilenet_v2_1.0": (mobilenet_v2.mobilenet_base, mobilenet_v2.training_scope),
49 | "mobilenet_v2_0.75": (
50 | wrapper(
51 | mobilenet_v2.mobilenet_base,
52 | depth_multiplier=0.75,
53 | finegrain_classification_mode=True,
54 | ),
55 | mobilenet_v2.training_scope,
56 | ),
57 | "mobilenet_v2_0.5": (
58 | wrapper(
59 | mobilenet_v2.mobilenet_base,
60 | depth_multiplier=0.5,
61 | finegrain_classification_mode=True,
62 | ),
63 | mobilenet_v2.training_scope,
64 | ),
65 | "mobilenet_v2_0.35": (
66 | wrapper(
67 | mobilenet_v2.mobilenet_base,
68 | depth_multiplier=0.35,
69 | finegrain_classification_mode=True,
70 | ),
71 | mobilenet_v2.training_scope,
72 | ),
73 | }
74 |
75 |
76 | def prediction_layer(cfg, input, name, num_outputs):
77 | with slim.arg_scope(
78 | [slim.conv2d, slim.conv2d_transpose],
79 | padding="SAME",
80 | activation_fn=None,
81 | normalizer_fn=None,
82 | weights_regularizer=tf.keras.regularizers.l2(0.5 * (cfg.weight_decay)),
83 | ):
84 | with tf.variable_scope(name):
85 | pred = slim.conv2d_transpose(
86 | input, num_outputs, kernel_size=[3, 3], stride=2, scope="block4"
87 | )
88 | return pred
89 |
90 |
91 | class PoseNet:
92 | def __init__(self, cfg):
93 | self.cfg = cfg
94 |
95 | def extract_features(self, inputs):
96 | net_fun, net_arg_scope = networks[self.cfg.net_type]
97 | mean = tf.constant(
98 | self.cfg.mean_pixel, dtype=tf.float32, shape=[1, 1, 1, 3], name="img_mean"
99 | )
100 | im_centered = inputs - mean
101 | with slim.arg_scope(net_arg_scope()):
102 | net, end_points = net_fun(im_centered)
103 |
104 | return net, end_points
105 |
106 | def prediction_layers(self, features, end_points, reuse=None):
107 | cfg = self.cfg
108 |
109 | out = {}
110 | with tf.variable_scope("pose", reuse=reuse):
111 | out["part_pred"] = prediction_layer(
112 | cfg, features, "part_pred", cfg.num_joints
113 | )
114 | if cfg.location_refinement:
115 | out["locref"] = prediction_layer(
116 | cfg, features, "locref_pred", cfg.num_joints * 2
117 | )
118 | if cfg.intermediate_supervision:
119 | # print(end_points.keys()) >> to see what else is available.
120 | out["part_pred_interm"] = prediction_layer(
121 | cfg,
122 | end_points["layer_" + str(cfg["intermediate_supervision_layer"])],
123 | "intermediate_supervision",
124 | cfg.num_joints,
125 | )
126 |
127 | return out
128 |
129 | def get_net(self, inputs):
130 | net, end_points = self.extract_features(inputs)
131 | return self.prediction_layers(net, end_points)
132 |
133 | def test(self, inputs):
134 | heads = self.get_net(inputs)
135 | prob = tf.sigmoid(heads["part_pred"])
136 | return {"part_prob": prob, "locref": heads["locref"]}
137 |
138 | def inference(self, inputs):
139 | """ Direct TF inference on GPU. Added with: https://arxiv.org/abs/1909.11229"""
140 | cfg = self.cfg
141 | heads = self.get_net(inputs)
142 | locref = heads["locref"]
143 | probs = tf.sigmoid(heads["part_pred"])
144 |
145 | if cfg.batch_size == 1:
146 | probs = tf.squeeze(probs, axis=0)
147 | locref = tf.squeeze(locref, axis=0)
148 | l_shape = tf.shape(input=probs)
149 |
150 | locref = tf.reshape(locref, (l_shape[0] * l_shape[1], -1, 2))
151 | probs = tf.reshape(probs, (l_shape[0] * l_shape[1], -1))
152 | maxloc = tf.argmax(input=probs, axis=0)
153 |
154 | loc = tf.unravel_index(
155 | maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))
156 | )
157 | maxloc = tf.reshape(maxloc, (1, -1))
158 |
159 | joints = tf.reshape(
160 | tf.range(0, tf.cast(l_shape[2], dtype=tf.int64)), (1, -1)
161 | )
162 | indices = tf.transpose(a=tf.concat([maxloc, joints], axis=0))
163 |
164 | offset = tf.gather_nd(locref, indices)
165 | offset = tf.gather(offset, [1, 0], axis=1)
166 | likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1, 1))
167 |
168 | pose = (
169 | self.cfg.stride * tf.cast(tf.transpose(a=loc), dtype=tf.float32)
170 | + self.cfg.stride * 0.5
171 | + offset * cfg.locref_stdev
172 | )
173 | pose = tf.concat([pose, likelihood], axis=1)
174 |
175 | return {"pose": pose}
176 | else:
177 | # probs = tf.squeeze(probs, axis=0)
178 | l_shape = tf.shape(
179 | input=probs
180 | ) # batchsize times x times y times body parts
181 | # locref = locref*cfg.locref_stdev
182 | locref = tf.reshape(
183 | locref, (l_shape[0], l_shape[1], l_shape[2], l_shape[3], 2)
184 | )
185 | # turn into x times y time bs * bpts
186 | locref = tf.transpose(a=locref, perm=[1, 2, 0, 3, 4])
187 | probs = tf.transpose(a=probs, perm=[1, 2, 0, 3])
188 |
189 | # print(locref.get_shape().as_list())
190 | # print(probs.get_shape().as_list())
191 | l_shape = tf.shape(input=probs) # x times y times batch times body parts
192 |
193 | locref = tf.reshape(locref, (l_shape[0] * l_shape[1], -1, 2))
194 | probs = tf.reshape(probs, (l_shape[0] * l_shape[1], -1))
195 | maxloc = tf.argmax(input=probs, axis=0)
196 | loc = tf.unravel_index(
197 | maxloc, (tf.cast(l_shape[0], tf.int64), tf.cast(l_shape[1], tf.int64))
198 | ) # tuple of max indices
199 |
200 | maxloc = tf.reshape(maxloc, (1, -1))
201 | joints = tf.reshape(
202 | tf.range(0, tf.cast(l_shape[2] * l_shape[3], dtype=tf.int64)), (1, -1)
203 | )
204 | indices = tf.transpose(a=tf.concat([maxloc, joints], axis=0))
205 |
206 | # extract corresponding locref x and y as well as probability
207 | offset = tf.gather_nd(locref, indices)
208 | offset = tf.gather(offset, [1, 0], axis=1)
209 | likelihood = tf.reshape(tf.gather_nd(probs, indices), (-1, 1))
210 |
211 | pose = (
212 | self.cfg.stride * tf.cast(tf.transpose(a=loc), dtype=tf.float32)
213 | + self.cfg.stride * 0.5
214 | + offset * cfg.locref_stdev
215 | )
216 | pose = tf.concat([pose, likelihood], axis=1)
217 | return {"pose": pose}
218 |
219 | def train(self, batch):
220 | cfg = self.cfg
221 |
222 | heads = self.get_net(batch[Batch.inputs])
223 |
224 | weigh_part_predictions = cfg.weigh_part_predictions
225 | part_score_weights = (
226 | batch[Batch.part_score_weights] if weigh_part_predictions else 1.0
227 | )
228 |
229 | def add_part_loss(pred_layer):
230 | return tf.compat.v1.losses.sigmoid_cross_entropy(
231 | batch[Batch.part_score_targets], heads[pred_layer], part_score_weights
232 | )
233 |
234 | loss = {}
235 | loss["part_loss"] = add_part_loss("part_pred")
236 | total_loss = loss["part_loss"]
237 | if cfg.intermediate_supervision:
238 | loss["part_loss_interm"] = add_part_loss("part_pred_interm")
239 | total_loss = total_loss + loss["part_loss_interm"]
240 |
241 | if cfg.location_refinement:
242 | locref_pred = heads["locref"]
243 | locref_targets = batch[Batch.locref_targets]
244 | locref_weights = batch[Batch.locref_mask]
245 |
246 | loss_func = (
247 | losses.huber_loss
248 | if cfg.locref_huber_loss
249 | else tf.compat.v1.losses.mean_squared_error
250 | )
251 | loss["locref_loss"] = cfg.locref_loss_weight * loss_func(
252 | locref_targets, locref_pred, locref_weights
253 | )
254 | total_loss = total_loss + loss["locref_loss"]
255 |
256 | # loss['total_loss'] = slim.losses.get_total_loss(add_regularization_losses=params.regularize)
257 | loss["total_loss"] = total_loss
258 | return loss
259 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/nnet/predict.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from original predict.py by Eldar Insafutdinov's implementation of [DeeperCut](https://github.com/eldar/pose-tensorflow)
3 |
4 | Source: DeeperCut by Eldar Insafutdinov
5 | https://github.com/eldar/pose-tensorflow
6 |
7 | To do faster inference on videos (with numpy based code; introduced in Oct 2018)
8 | "On the inference speed and video-compression robustness of DeepLabCut"
9 | Alexander Mathis & Richard Warren
10 | doi: https://doi.org/10.1101/457242
11 | See https://www.biorxiv.org/content/early/2018/10/30/457242
12 |
13 | To do even faster inference on videos (with TensorFlow based code; introduced in Oct 2019)
14 | Pretraining boosts out-of-domain robustness for pose estimation
15 | by Alexander Mathis, Mert Yüksekgönül, Byron Rogers, Matthias Bethge, Mackenzie W. Mathis
16 | https://arxiv.org/abs/1909.11229
17 | """
18 |
19 | import numpy as np
20 | import tensorflow as tf
21 |
22 | vers = (tf.__version__).split(".")
23 | if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
24 | tf = tf.compat.v1
25 | else:
26 | tf = tf
27 | from deeplabcutcore.pose_estimation_tensorflow.nnet.net_factory import pose_net
28 |
29 |
30 | def setup_pose_prediction(cfg):
31 | tf.reset_default_graph()
32 | inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
33 | net_heads = pose_net(cfg).test(inputs)
34 | outputs = [net_heads["part_prob"]]
35 | if cfg.location_refinement:
36 | outputs.append(net_heads["locref"])
37 |
38 | restorer = tf.train.Saver()
39 | sess = tf.Session()
40 | sess.run(tf.global_variables_initializer())
41 | sess.run(tf.local_variables_initializer())
42 |
43 | # Restore variables from disk.
44 | restorer.restore(sess, cfg.init_weights)
45 |
46 | return sess, inputs, outputs
47 |
48 |
49 | def extract_cnn_output(outputs_np, cfg):
50 | """ extract locref + scmap from network """
51 | scmap = outputs_np[0]
52 | scmap = np.squeeze(scmap)
53 | locref = None
54 | if cfg.location_refinement:
55 | locref = np.squeeze(outputs_np[1])
56 | shape = locref.shape
57 | locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
58 | locref *= cfg.locref_stdev
59 | if len(scmap.shape) == 2: # for single body part!
60 | scmap = np.expand_dims(scmap, axis=2)
61 | return scmap, locref
62 |
63 |
64 | def argmax_pose_predict(scmap, offmat, stride):
65 | """Combine scoremat and offsets to the final pose."""
66 | num_joints = scmap.shape[2]
67 | pose = []
68 | for joint_idx in range(num_joints):
69 | maxloc = np.unravel_index(
70 | np.argmax(scmap[:, :, joint_idx]), scmap[:, :, joint_idx].shape
71 | )
72 | offset = np.array(offmat[maxloc][joint_idx])[::-1]
73 | pos_f8 = np.array(maxloc).astype("float") * stride + 0.5 * stride + offset
74 | pose.append(np.hstack((pos_f8[::-1], [scmap[maxloc][joint_idx]])))
75 | return np.array(pose)
76 |
77 |
78 | def multi_pose_predict(scmap, locref, stride, num_outputs):
79 | Y, X = get_top_values(scmap[None], num_outputs)
80 | Y, X = Y[:, 0], X[:, 0]
81 | num_joints = scmap.shape[2]
82 | DZ = np.zeros((num_outputs, num_joints, 3))
83 | for m in range(num_outputs):
84 | for k in range(num_joints):
85 | x = X[m, k]
86 | y = Y[m, k]
87 | DZ[m, k, :2] = locref[y, x, k, :]
88 | DZ[m, k, 2] = scmap[y, x, k]
89 |
90 | X = X.astype("float32") * stride + 0.5 * stride + DZ[:, :, 0]
91 | Y = Y.astype("float32") * stride + 0.5 * stride + DZ[:, :, 1]
92 | P = DZ[:, :, 2]
93 |
94 | pose = np.empty((num_joints, num_outputs * 3), dtype="float32")
95 | pose[:, 0::3] = X.T
96 | pose[:, 1::3] = Y.T
97 | pose[:, 2::3] = P.T
98 |
99 | return pose
100 |
101 |
102 | def getpose(image, cfg, sess, inputs, outputs, outall=False):
103 | """ Extract pose """
104 | im = np.expand_dims(image, axis=0).astype(float)
105 | outputs_np = sess.run(outputs, feed_dict={inputs: im})
106 | scmap, locref = extract_cnn_output(outputs_np, cfg)
107 | num_outputs = cfg.get("num_outputs", 1)
108 | if num_outputs > 1:
109 | pose = multi_pose_predict(scmap, locref, cfg.stride, num_outputs)
110 | else:
111 | pose = argmax_pose_predict(scmap, locref, cfg.stride)
112 | if outall:
113 | return scmap, locref, pose
114 | else:
115 | return pose
116 |
117 |
118 | ## Functions below implement are for batch sizes > 1:
119 | def extract_cnn_outputmulti(outputs_np, cfg):
120 | """ extract locref + scmap from network
121 | Dimensions: image batch x imagedim1 x imagedim2 x bodypart"""
122 | scmap = outputs_np[0]
123 | locref = None
124 | if cfg.location_refinement:
125 | locref = outputs_np[1]
126 | shape = locref.shape
127 | locref = np.reshape(locref, (shape[0], shape[1], shape[2], -1, 2))
128 | locref *= cfg.locref_stdev
129 | if len(scmap.shape) == 2: # for single body part!
130 | scmap = np.expand_dims(scmap, axis=2)
131 | return scmap, locref
132 |
133 |
134 | def get_top_values(scmap, n_top=5):
135 | batchsize, ny, nx, num_joints = scmap.shape
136 | scmap_flat = scmap.reshape(batchsize, nx * ny, num_joints)
137 | if n_top == 1:
138 | scmap_top = np.argmax(scmap_flat, axis=1)[None]
139 | else:
140 | scmap_top = np.argpartition(scmap_flat, -n_top, axis=1)[:, -n_top:]
141 | for ix in range(batchsize):
142 | vals = scmap_flat[ix, scmap_top[ix], np.arange(num_joints)]
143 | arg = np.argsort(-vals, axis=0)
144 | scmap_top[ix] = scmap_top[ix, arg, np.arange(num_joints)]
145 | scmap_top = scmap_top.swapaxes(0, 1)
146 |
147 | Y, X = np.unravel_index(scmap_top, (ny, nx))
148 | return Y, X
149 |
150 |
151 | def getposeNP(image, cfg, sess, inputs, outputs, outall=False):
152 | """ Adapted from DeeperCut, performs numpy-based faster inference on batches.
153 | Introduced in https://www.biorxiv.org/content/10.1101/457242v1 """
154 |
155 | num_outputs = cfg.get("num_outputs", 1)
156 | outputs_np = sess.run(outputs, feed_dict={inputs: image})
157 |
158 | scmap, locref = extract_cnn_outputmulti(outputs_np, cfg) # processes image batch.
159 | batchsize, ny, nx, num_joints = scmap.shape
160 |
161 | Y, X = get_top_values(scmap, n_top=num_outputs)
162 |
163 | # Combine scoremat and offsets to the final pose.
164 | DZ = np.zeros((num_outputs, batchsize, num_joints, 3))
165 | for m in range(num_outputs):
166 | for l in range(batchsize):
167 | for k in range(num_joints):
168 | x = X[m, l, k]
169 | y = Y[m, l, k]
170 | DZ[m, l, k, :2] = locref[l, y, x, k, :]
171 | DZ[m, l, k, 2] = scmap[l, y, x, k]
172 |
173 | X = X.astype("float32") * cfg.stride + 0.5 * cfg.stride + DZ[:, :, :, 0]
174 | Y = Y.astype("float32") * cfg.stride + 0.5 * cfg.stride + DZ[:, :, :, 1]
175 | P = DZ[:, :, :, 2]
176 |
177 | Xs = X.swapaxes(0, 2).swapaxes(0, 1)
178 | Ys = Y.swapaxes(0, 2).swapaxes(0, 1)
179 | Ps = P.swapaxes(0, 2).swapaxes(0, 1)
180 |
181 | pose = np.empty(
182 | (cfg["batch_size"], num_outputs * cfg["num_joints"] * 3), dtype=X.dtype
183 | )
184 | pose[:, 0::3] = Xs.reshape(batchsize, -1)
185 | pose[:, 1::3] = Ys.reshape(batchsize, -1)
186 | pose[:, 2::3] = Ps.reshape(batchsize, -1)
187 |
188 | if outall:
189 | return scmap, locref, pose
190 | else:
191 | return pose
192 |
193 |
194 | ### Code for TF inference on GPU
195 | def setup_GPUpose_prediction(cfg):
196 | tf.reset_default_graph()
197 | inputs = tf.placeholder(tf.float32, shape=[cfg.batch_size, None, None, 3])
198 | net_heads = pose_net(cfg).inference(inputs)
199 | outputs = [net_heads["pose"]]
200 |
201 | restorer = tf.train.Saver()
202 | sess = tf.Session()
203 |
204 | sess.run(tf.global_variables_initializer())
205 | sess.run(tf.local_variables_initializer())
206 |
207 | # Restore variables from disk.
208 | restorer.restore(sess, cfg.init_weights)
209 |
210 | return sess, inputs, outputs
211 |
212 |
213 | def extract_GPUprediction(outputs, cfg):
214 | return outputs[0]
215 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/test.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 |
6 | import argparse
7 | import logging
8 | import os
9 |
10 | import numpy as np
11 | import scipy.io
12 | import scipy.ndimage
13 |
14 | from deeplabcutcore.pose_estimation_tensorflow.config import load_config
15 | from deeplabcutcore.pose_estimation_tensorflow.dataset.factory import (
16 | create as create_dataset,
17 | )
18 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import Batch
19 | from deeplabcutcore.pose_estimation_tensorflow.nnet.predict import (
20 | setup_pose_prediction,
21 | extract_cnn_output,
22 | argmax_pose_predict,
23 | )
24 | from deeplabcutcore.pose_estimation_tensorflow.util import visualize
25 |
26 |
27 | def test_net(visualise, cache_scoremaps):
28 | logging.basicConfig(level=logging.INFO)
29 |
30 | cfg = load_config()
31 | dataset = create_dataset(cfg)
32 | dataset.set_shuffle(False)
33 | dataset.set_test_mode(True)
34 |
35 | sess, inputs, outputs = setup_pose_prediction(cfg)
36 |
37 | if cache_scoremaps:
38 | out_dir = cfg.scoremap_dir
39 | if not os.path.exists(out_dir):
40 | os.makedirs(out_dir)
41 |
42 | num_images = dataset.num_images
43 | predictions = np.zeros((num_images,), dtype=np.object)
44 |
45 | for k in range(num_images):
46 | print("processing image {}/{}".format(k, num_images - 1))
47 |
48 | batch = dataset.next_batch()
49 |
50 | outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]})
51 |
52 | scmap, locref = extract_cnn_output(outputs_np, cfg)
53 |
54 | pose = argmax_pose_predict(scmap, locref, cfg.stride)
55 |
56 | pose_refscale = np.copy(pose)
57 | pose_refscale[:, 0:2] /= cfg.global_scale
58 | predictions[k] = pose_refscale
59 |
60 | if visualise:
61 | img = np.squeeze(batch[Batch.inputs]).astype("uint8")
62 | visualize.show_heatmaps(cfg, img, scmap, pose)
63 | visualize.waitforbuttonpress()
64 |
65 | if cache_scoremaps:
66 | base = os.path.basename(batch[Batch.data_item].im_path)
67 | raw_name = os.path.splitext(base)[0]
68 | out_fn = os.path.join(out_dir, raw_name + ".mat")
69 | scipy.io.savemat(out_fn, mdict={"scoremaps": scmap.astype("float32")})
70 |
71 | out_fn = os.path.join(out_dir, raw_name + "_locreg" + ".mat")
72 | if cfg.location_refinement:
73 | scipy.io.savemat(
74 | out_fn, mdict={"locreg_pred": locref.astype("float32")}
75 | )
76 |
77 | scipy.io.savemat("predictions.mat", mdict={"joints": predictions})
78 |
79 | sess.close()
80 |
81 |
82 | if __name__ == "__main__":
83 | parser = argparse.ArgumentParser()
84 | parser.add_argument("--novis", default=False, action="store_true")
85 | parser.add_argument("--cache", default=False, action="store_true")
86 | args, unparsed = parser.parse_known_args()
87 |
88 | test_net(not args.novis, args.cache)
89 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/train.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 |
10 | Adapted from DeeperCut by Eldar Insafutdinov
11 | https://github.com/eldar/pose-tensorflow
12 |
13 | """
14 | import logging, os
15 | import threading
16 | import argparse
17 | from pathlib import Path
18 | import tensorflow as tf
19 |
20 | vers = (tf.__version__).split(".")
21 | if int(vers[0]) == 2 or int(vers[0]) == 1 and int(vers[1]) > 12:
22 | TF = tf.compat.v1
23 | else:
24 | TF = tf.compat.v1
25 | import tf_slim as slim
26 |
27 | tf.compat.v1.disable_eager_execution()
28 |
29 | from deeplabcutcore.pose_estimation_tensorflow.config import load_config
30 | from deeplabcutcore.pose_estimation_tensorflow.dataset.pose_dataset import Batch
31 | from deeplabcutcore.pose_estimation_tensorflow.dataset.factory import (
32 | create as create_dataset,
33 | )
34 | from deeplabcutcore.pose_estimation_tensorflow.nnet.net_factory import pose_net
35 | from deeplabcutcore.pose_estimation_tensorflow.util.logging import setup_logging
36 |
37 |
38 | class LearningRate(object):
39 | def __init__(self, cfg):
40 | self.steps = cfg.multi_step
41 | self.current_step = 0
42 |
43 | def get_lr(self, iteration):
44 | lr = self.steps[self.current_step][0]
45 | if iteration == self.steps[self.current_step][1]:
46 | self.current_step += 1
47 |
48 | return lr
49 |
50 |
51 | def get_batch_spec(cfg):
52 | num_joints = cfg.num_joints
53 | batch_size = cfg.batch_size
54 | return {
55 | Batch.inputs: [batch_size, None, None, 3],
56 | Batch.part_score_targets: [batch_size, None, None, num_joints],
57 | Batch.part_score_weights: [batch_size, None, None, num_joints],
58 | Batch.locref_targets: [batch_size, None, None, num_joints * 2],
59 | Batch.locref_mask: [batch_size, None, None, num_joints * 2],
60 | }
61 |
62 |
63 | def setup_preloading(batch_spec):
64 | placeholders = {
65 | name: TF.placeholder(tf.float32, shape=spec)
66 | for (name, spec) in batch_spec.items()
67 | }
68 | names = placeholders.keys()
69 | placeholders_list = list(placeholders.values())
70 |
71 | QUEUE_SIZE = 20
72 | vers = (tf.__version__).split(".")
73 | if int(vers[0]) == 1 and int(vers[1]) > 12:
74 | q = tf.queue.FIFOQueue(QUEUE_SIZE, [tf.float32] * len(batch_spec))
75 | else:
76 | q = tf.queue.FIFOQueue(QUEUE_SIZE, [tf.float32] * len(batch_spec))
77 | enqueue_op = q.enqueue(placeholders_list)
78 | batch_list = q.dequeue()
79 |
80 | batch = {}
81 | for idx, name in enumerate(names):
82 | batch[name] = batch_list[idx]
83 | batch[name].set_shape(batch_spec[name])
84 | return batch, enqueue_op, placeholders
85 |
86 |
87 | def load_and_enqueue(sess, enqueue_op, coord, dataset, placeholders):
88 | while not coord.should_stop():
89 | batch_np = dataset.next_batch()
90 | food = {pl: batch_np[name] for (name, pl) in placeholders.items()}
91 | sess.run(enqueue_op, feed_dict=food)
92 |
93 |
94 | def start_preloading(sess, enqueue_op, dataset, placeholders):
95 | coord = TF.train.Coordinator()
96 |
97 | t = threading.Thread(
98 | target=load_and_enqueue, args=(sess, enqueue_op, coord, dataset, placeholders)
99 | )
100 | t.start()
101 |
102 | return coord, t
103 |
104 |
105 | def get_optimizer(loss_op, cfg):
106 | learning_rate = TF.placeholder(tf.float32, shape=[])
107 |
108 | if cfg.optimizer == "sgd":
109 | optimizer = TF.train.MomentumOptimizer(
110 | learning_rate=learning_rate, momentum=0.9
111 | )
112 | elif cfg.optimizer == "adam":
113 | optimizer = TF.train.AdamOptimizer(learning_rate)
114 | else:
115 | raise ValueError("unknown optimizer {}".format(cfg.optimizer))
116 | train_op = slim.learning.create_train_op(loss_op, optimizer)
117 |
118 | return learning_rate, train_op
119 |
120 |
121 | def train(
122 | config_yaml,
123 | displayiters,
124 | saveiters,
125 | maxiters,
126 | max_to_keep=5,
127 | keepdeconvweights=True,
128 | allow_growth=False,
129 | ):
130 | start_path = os.getcwd()
131 | os.chdir(
132 | str(Path(config_yaml).parents[0])
133 | ) # switch to folder of config_yaml (for logging)
134 | setup_logging()
135 |
136 | cfg = load_config(config_yaml)
137 | if (
138 | cfg.dataset_type == "default"
139 | or cfg.dataset_type == "tensorpack"
140 | or cfg.dataset_type == "deterministic"
141 | ):
142 | print(
143 | "Switching batchsize to 1, as default/tensorpack/deterministic loaders do not support batches >1. Use imgaug loader."
144 | )
145 | cfg["batch_size"] = 1 # in case this was edited for analysis.-
146 |
147 | dataset = create_dataset(cfg)
148 | batch_spec = get_batch_spec(cfg)
149 | batch, enqueue_op, placeholders = setup_preloading(batch_spec)
150 | losses = pose_net(cfg).train(batch)
151 | total_loss = losses["total_loss"]
152 |
153 | for k, t in losses.items():
154 | TF.summary.scalar(k, t)
155 | merged_summaries = TF.summary.merge_all()
156 |
157 | if "snapshot" in Path(cfg.init_weights).stem and keepdeconvweights:
158 | print("Loading already trained DLC with backbone:", cfg.net_type)
159 | variables_to_restore = slim.get_variables_to_restore()
160 | else:
161 | print("Loading ImageNet-pretrained", cfg.net_type)
162 | # loading backbone from ResNet, MobileNet etc.
163 | if "resnet" in cfg.net_type:
164 | variables_to_restore = slim.get_variables_to_restore(include=["resnet_v1"])
165 | elif "mobilenet" in cfg.net_type:
166 | variables_to_restore = slim.get_variables_to_restore(
167 | include=["MobilenetV2"]
168 | )
169 | else:
170 | print("Wait for DLC 2.3.")
171 |
172 | restorer = TF.train.Saver(variables_to_restore)
173 | saver = TF.train.Saver(
174 | max_to_keep=max_to_keep
175 | ) # selects how many snapshots are stored, see https://github.com/AlexEMG/DeepLabCut/issues/8#issuecomment-387404835
176 |
177 | if allow_growth == True:
178 | config = tf.compat.v1.ConfigProto()
179 | config.gpu_options.allow_growth = True
180 | sess = TF.Session(config=config)
181 | else:
182 | sess = TF.Session()
183 |
184 | coord, thread = start_preloading(sess, enqueue_op, dataset, placeholders)
185 | train_writer = TF.summary.FileWriter(cfg.log_dir, sess.graph)
186 | learning_rate, train_op = get_optimizer(total_loss, cfg)
187 |
188 | sess.run(TF.global_variables_initializer())
189 | sess.run(TF.local_variables_initializer())
190 |
191 | # Restore variables from disk.
192 | restorer.restore(sess, cfg.init_weights)
193 | if maxiters == None:
194 | max_iter = int(cfg.multi_step[-1][1])
195 | else:
196 | max_iter = min(int(cfg.multi_step[-1][1]), int(maxiters))
197 | # display_iters = max(1,int(displayiters))
198 | print("Max_iters overwritten as", max_iter)
199 |
200 | if displayiters == None:
201 | display_iters = max(1, int(cfg.display_iters))
202 | else:
203 | display_iters = max(1, int(displayiters))
204 | print("Display_iters overwritten as", display_iters)
205 |
206 | if saveiters == None:
207 | save_iters = max(1, int(cfg.save_iters))
208 |
209 | else:
210 | save_iters = max(1, int(saveiters))
211 | print("Save_iters overwritten as", save_iters)
212 |
213 | cum_loss = 0.0
214 | lr_gen = LearningRate(cfg)
215 |
216 | stats_path = Path(config_yaml).with_name("learning_stats.csv")
217 | lrf = open(str(stats_path), "w")
218 |
219 | print("Training parameter:")
220 | print(cfg)
221 | print("Starting training....")
222 | for it in range(max_iter + 1):
223 | current_lr = lr_gen.get_lr(it)
224 | [_, loss_val, summary] = sess.run(
225 | [train_op, total_loss, merged_summaries],
226 | feed_dict={learning_rate: current_lr},
227 | )
228 | cum_loss += loss_val
229 | train_writer.add_summary(summary, it)
230 |
231 | if it % display_iters == 0 and it > 0:
232 | average_loss = cum_loss / display_iters
233 | cum_loss = 0.0
234 | logging.info(
235 | "iteration: {} loss: {} lr: {}".format(
236 | it, "{0:.4f}".format(average_loss), current_lr
237 | )
238 | )
239 | lrf.write("{}, {:.5f}, {}\n".format(it, average_loss, current_lr))
240 | lrf.flush()
241 |
242 | # Save snapshot
243 | if (it % save_iters == 0 and it != 0) or it == max_iter:
244 | model_name = cfg.snapshot_prefix
245 | saver.save(sess, model_name, global_step=it)
246 |
247 | lrf.close()
248 | sess.close()
249 | coord.request_stop()
250 | coord.join([thread])
251 | # return to original path.
252 | os.chdir(str(start_path))
253 |
254 |
255 | if __name__ == "__main__":
256 | parser = argparse.ArgumentParser()
257 | parser.add_argument("config", help="Path to yaml configuration file.")
258 | cli_args = parser.parse_args()
259 |
260 | train(Path(cli_args.config).resolve())
261 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/training.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os
12 | from pathlib import Path
13 |
14 |
15 | def return_train_network_path(config, shuffle, trainFraction):
16 | """ Returns the training and test pose config file names as well as the folder where the snapshot is
17 | Parameter
18 | ----------
19 | config : string
20 | Full path of the config.yaml file as a string.
21 |
22 | shuffle: int
23 | Integer value specifying the shuffle index to select for training.
24 |
25 | trainFraction: float
26 | Float specifying which training set fraction to use.
27 |
28 | Returns the triple: trainposeconfigfile, testposeconfigfile, snapshotfolder
29 |
30 | """
31 | from deeplabcutcore.utils import auxiliaryfunctions
32 |
33 | # Read file path for pose_config file. >> pass it on
34 | cfg = auxiliaryfunctions.read_config(config)
35 |
36 | modelfoldername = auxiliaryfunctions.GetModelFolder(trainFraction, shuffle, cfg)
37 | trainposeconfigfile = Path(
38 | os.path.join(
39 | cfg["project_path"], str(modelfoldername), "train", "pose_cfg.yaml"
40 | )
41 | )
42 | testposeconfigfile = Path(
43 | os.path.join(cfg["project_path"], str(modelfoldername), "test", "pose_cfg.yaml")
44 | )
45 | snapshotfolder = Path(
46 | os.path.join(cfg["project_path"], str(modelfoldername), "train")
47 | )
48 |
49 | return trainposeconfigfile, testposeconfigfile, snapshotfolder
50 |
51 |
52 | def train_network(
53 | config,
54 | shuffle=1,
55 | trainingsetindex=0,
56 | max_snapshots_to_keep=5,
57 | displayiters=None,
58 | saveiters=None,
59 | maxiters=None,
60 | allow_growth=False,
61 | gputouse=None,
62 | autotune=False,
63 | keepdeconvweights=True,
64 | ):
65 | """Trains the network with the labels in the training dataset.
66 |
67 | Parameter
68 | ----------
69 | config : string
70 | Full path of the config.yaml file as a string.
71 |
72 | shuffle: int, optional
73 | Integer value specifying the shuffle index to select for training. Default is set to 1
74 |
75 | trainingsetindex: int, optional
76 | Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
77 |
78 | Additional parameters:
79 |
80 | max_snapshots_to_keep: int, or None. Sets how many snapshots are kept, i.e. states of the trained network. Every savinginteration many times
81 | a snapshot is stored, however only the last max_snapshots_to_keep many are kept! If you change this to None, then all are kept.
82 | See: https://github.com/AlexEMG/DeepLabCut/issues/8#issuecomment-387404835
83 |
84 | displayiters: this variable is actually set in pose_config.yaml. However, you can overwrite it with this hack. Don't use this regularly, just if you are too lazy to dig out
85 | the pose_config.yaml file for the corresponding project. If None, the value from there is used, otherwise it is overwritten! Default: None
86 |
87 | saveiters: this variable is actually set in pose_config.yaml. However, you can overwrite it with this hack. Don't use this regularly, just if you are too lazy to dig out
88 | the pose_config.yaml file for the corresponding project. If None, the value from there is used, otherwise it is overwritten! Default: None
89 |
90 | maxiters: this variable is actually set in pose_config.yaml. However, you can overwrite it with this hack. Don't use this regularly, just if you are too lazy to dig out
91 | the pose_config.yaml file for the corresponding project. If None, the value from there is used, otherwise it is overwritten! Default: None
92 |
93 | allow_groth: bool, default false.
94 | For some smaller GPUs the memory issues happen. If true, the memory allocator does not pre-allocate the entire specified
95 | GPU memory region, instead starting small and growing as needed. See issue: https://forum.image.sc/t/how-to-stop-running-out-of-vram/30551/2
96 |
97 | gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
98 | See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries
99 |
100 | autotune: property of TensorFlow, somehow faster if 'false' (as Eldar found out, see https://github.com/tensorflow/tensorflow/issues/13317). Default: False
101 |
102 | keepdeconvweights: bool, default: true
103 | Also restores the weights of the deconvolution layers (and the backbone) when training from a snapshot. Note that if you change the number of bodyparts, you need to
104 | set this to false for re-training.
105 |
106 | Example
107 | --------
108 | for training the network for first shuffle of the training dataset.
109 | >>> deeplabcutcore.train_network('/analysis/project/reaching-task/config.yaml')
110 | --------
111 |
112 | for training the network for second shuffle of the training dataset.
113 | >>> deeplabcutcore.train_network('/analysis/project/reaching-task/config.yaml',shuffle=2,keepdeconvweights=True)
114 | --------
115 |
116 | """
117 | import tensorflow as tf
118 |
119 | vers = (tf.__version__).split(".")
120 | if int(vers[0]) == 1 and int(vers[1]) > 12:
121 | TF = tf.compat.v1
122 | else:
123 | TF = tf.compat.v1
124 |
125 | # reload logger.
126 | import importlib
127 | import logging
128 |
129 | importlib.reload(logging)
130 | logging.shutdown()
131 |
132 | from deeplabcutcore.pose_estimation_tensorflow.train import train
133 | from deeplabcutcore.utils import auxiliaryfunctions
134 |
135 | TF.reset_default_graph()
136 | start_path = os.getcwd()
137 |
138 | # Read file path for pose_config file. >> pass it on
139 | cfg = auxiliaryfunctions.read_config(config)
140 | modelfoldername = auxiliaryfunctions.GetModelFolder(
141 | cfg["TrainingFraction"][trainingsetindex], shuffle, cfg
142 | )
143 | poseconfigfile = Path(
144 | os.path.join(
145 | cfg["project_path"], str(modelfoldername), "train", "pose_cfg.yaml"
146 | )
147 | )
148 | if not poseconfigfile.is_file():
149 | print("The training datafile ", poseconfigfile, " is not present.")
150 | print(
151 | "Probably, the training dataset for this specific shuffle index was not created."
152 | )
153 | print(
154 | "Try with a different shuffle/trainingsetfraction or use function 'create_training_dataset' to create a new trainingdataset with this shuffle index."
155 | )
156 | else:
157 | # Set environment variables
158 | if (
159 | autotune is not False
160 | ): # see: https://github.com/tensorflow/tensorflow/issues/13317
161 | os.environ["TF_CUDNN_USE_AUTOTUNE"] = "0"
162 | if gputouse is not None:
163 | os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)
164 | try:
165 | train(
166 | str(poseconfigfile),
167 | displayiters,
168 | saveiters,
169 | maxiters,
170 | max_to_keep=max_snapshots_to_keep,
171 | keepdeconvweights=keepdeconvweights,
172 | allow_growth=allow_growth,
173 | ) # pass on path and file name for pose_cfg.yaml!
174 | except BaseException as e:
175 | raise e
176 | finally:
177 | os.chdir(str(start_path))
178 | print(
179 | "The network is now trained and ready to evaluate. Use the function 'evaluate_network' to evaluate the network."
180 | )
181 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/util/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from DeeperCut by Eldar Insafutdinov:
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 |
6 | import os
7 | from deeplabcutcore.pose_estimation_tensorflow.util.logging import *
8 |
9 | if os.environ.get("DLClight", default=False) == "True":
10 | pass
11 | else:
12 | from deeplabcutcore.pose_estimation_tensorflow.util.visualize import *
13 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/util/logging.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 | import logging, os
6 |
7 |
8 | def setup_logging():
9 | FORMAT = "%(asctime)-15s %(message)s"
10 | logging.basicConfig(
11 | filename=os.path.join("log.txt"),
12 | filemode="w",
13 | datefmt="%Y-%m-%d %H:%M:%S",
14 | level=logging.INFO,
15 | format=FORMAT,
16 | )
17 |
18 | console = logging.StreamHandler()
19 | console.setLevel(logging.INFO)
20 | logging.getLogger("").addHandler(console)
21 |
--------------------------------------------------------------------------------
/deeplabcutcore/pose_estimation_tensorflow/util/visualize.py:
--------------------------------------------------------------------------------
1 | """
2 | Adapted from DeeperCut by Eldar Insafutdinov
3 | https://github.com/eldar/pose-tensorflow
4 | """
5 |
6 | import math, os
7 | import numpy as np
8 |
9 | # from scipy.misc import imresize
10 | from deeplabcutcore.utils.auxfun_videos import imresize
11 |
12 | import matplotlib as mpl
13 | import platform
14 |
15 |
16 | # assume headless == DLClight
17 | mpl.use("AGG") # anti-grain geometry engine #https://matplotlib.org/faq/usage_faq.html
18 | import matplotlib.pyplot as plt
19 |
20 |
21 | def _npcircle(image, cx, cy, radius, color, transparency=0.0):
22 | """Draw a circle on an image using only numpy methods."""
23 | radius = int(radius)
24 | cx = int(cx)
25 | cy = int(cy)
26 | y, x = np.ogrid[-radius:radius, -radius:radius]
27 | index = x ** 2 + y ** 2 <= radius ** 2
28 | image[cy - radius : cy + radius, cx - radius : cx + radius][index] = (
29 | image[cy - radius : cy + radius, cx - radius : cx + radius][index].astype(
30 | "float32"
31 | )
32 | * transparency
33 | + np.array(color).astype("float32") * (1.0 - transparency)
34 | ).astype("uint8")
35 |
36 |
37 | def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
38 | return minx < cur_x < maxx and miny < cur_y < maxy
39 |
40 |
41 | def visualize_joints(image, pose):
42 | marker_size = 8
43 | minx = 2 * marker_size
44 | miny = 2 * marker_size
45 | maxx = image.shape[1] - 2 * marker_size
46 | maxy = image.shape[0] - 2 * marker_size
47 | num_joints = pose.shape[0]
48 |
49 | visim = image.copy()
50 | colors = [
51 | [255, 0, 0],
52 | [0, 255, 0],
53 | [0, 0, 255],
54 | [0, 245, 255],
55 | [255, 131, 250],
56 | [255, 255, 0],
57 | [255, 0, 0],
58 | [0, 255, 0],
59 | [0, 0, 255],
60 | [0, 245, 255],
61 | [255, 131, 250],
62 | [255, 255, 0],
63 | [0, 0, 0],
64 | [255, 255, 255],
65 | [255, 0, 0],
66 | [0, 255, 0],
67 | [0, 0, 255],
68 | ]
69 | for p_idx in range(num_joints):
70 | cur_x = pose[p_idx, 0]
71 | cur_y = pose[p_idx, 1]
72 | if check_point(cur_x, cur_y, minx, miny, maxx, maxy):
73 | _npcircle(visim, cur_x, cur_y, marker_size, colors[p_idx], 0.0)
74 | return visim
75 |
76 |
77 | def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
78 | interp = "bilinear"
79 | all_joints = cfg.all_joints
80 | all_joints_names = cfg.all_joints_names
81 | subplot_width = 3
82 | subplot_height = math.ceil((len(all_joints) + 1) / subplot_width)
83 | f, axarr = plt.subplots(subplot_height, subplot_width)
84 | for pidx, part in enumerate(all_joints):
85 | plot_j = (pidx + 1) // subplot_width
86 | plot_i = (pidx + 1) % subplot_width
87 | scmap_part = np.sum(scmap[:, :, part], axis=2)
88 | scmap_part = imresize(scmap_part, 8.0, interp="bicubic")
89 | scmap_part = np.lib.pad(scmap_part, ((4, 0), (4, 0)), "minimum")
90 | curr_plot = axarr[plot_j, plot_i]
91 | curr_plot.set_title(all_joints_names[pidx])
92 | curr_plot.axis("off")
93 | curr_plot.imshow(img, interpolation=interp)
94 | curr_plot.imshow(scmap_part, alpha=0.5, cmap=cmap, interpolation=interp)
95 |
96 | curr_plot = axarr[0, 0]
97 | curr_plot.set_title("Pose")
98 | curr_plot.axis("off")
99 | curr_plot.imshow(visualize_joints(img, pose))
100 |
101 | plt.show()
102 |
103 |
104 | def waitforbuttonpress():
105 | plt.waitforbuttonpress(timeout=1)
106 |
--------------------------------------------------------------------------------
/deeplabcutcore/post_processing/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcutcore2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/deeplabcut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | from deeplabcutcore.post_processing.filtering import *
12 | from deeplabcutcore.post_processing.analyze_skeleton import analyzeskeleton
13 |
--------------------------------------------------------------------------------
/deeplabcutcore/post_processing/analyze_skeleton.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | from tqdm import tqdm
4 | import os
5 | from pathlib import Path
6 | import argparse
7 | from scipy.spatial import distance
8 | from math import factorial, atan2, degrees, acos, sqrt, pi
9 |
10 | from deeplabcutcore.utils import auxiliaryfunctions
11 |
12 | # utility functions
13 | def calc_distance_between_points_two_vectors_2d(v1, v2):
14 | """calc_distance_between_points_two_vectors_2d [pairwise distance between vectors points]
15 |
16 | Arguments:
17 | v1 {[np.array]} -- [description]
18 | v2 {[type]} -- [description]
19 |
20 | Raises:
21 | ValueError -- [description]
22 | ValueError -- [description]
23 | ValueError -- [description]
24 |
25 | Returns:
26 | [type] -- [description]
27 |
28 | testing:
29 | >>> v1 = np.zeros((2, 5))
30 | >>> v2 = np.zeros((2, 5))
31 | >>> v2[1, :] = [0, 10, 25, 50, 100]
32 | >>> d = calc_distance_between_points_two_vectors_2d(v1.T, v2.T)
33 | """
34 |
35 | # Check dataformats
36 | if not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray):
37 | raise ValueError("Invalid argument data format")
38 | if not v1.shape[1] == 2 or not v2.shape[1] == 2:
39 | raise ValueError("Invalid shape for input arrays")
40 | if not v1.shape[0] == v2.shape[0]:
41 | raise ValueError("Error: input arrays should have the same length")
42 |
43 | # Calculate distance
44 | dist = [distance.euclidean(p1, p2) for p1, p2 in zip(v1, v2)]
45 | return dist
46 |
47 |
48 | def angle_between_points_2d_anticlockwise(p1, p2):
49 | """angle_between_points_2d_clockwise [Determines the angle of a straight line drawn between point one and two.
50 | The number returned, which is a double in degrees, tells us how much we have to rotate
51 | a horizontal line anti-clockwise for it to match the line between the two points.]
52 |
53 | Arguments:
54 | p1 {[np.ndarray, list]} -- np.array or list [ with the X and Y coordinates of the point]
55 | p2 {[np.ndarray, list]} -- np.array or list [ with the X and Y coordinates of the point]
56 |
57 | Returns:
58 | [int] -- [clockwise angle between p1, p2 using the inner product and the deterinant of the two vectors]
59 |
60 | Testing: - to check: print(zero, ninety, oneeighty, twoseventy)
61 | >>> zero = angle_between_points_2d_clockwise([0, 1], [0, 1])
62 | >>> ninety = angle_between_points_2d_clockwise([1, 0], [0, 1])
63 | >>> oneeighty = angle_between_points_2d_clockwise([0, -1], [0, 1])
64 | >>> twoseventy = angle_between_points_2d_clockwise([-1, 0], [0, 1])
65 | >>> ninety2 = angle_between_points_2d_clockwise([10, 0], [10, 1])
66 | >>> print(ninety2)
67 | """
68 |
69 | """
70 | Determines the angle of a straight line drawn between point one and two.
71 | The number returned, which is a double in degrees, tells us how much we have to rotate
72 | a horizontal line anit-clockwise for it to match the line between the two points.
73 | """
74 |
75 | xDiff = p2[0] - p1[0]
76 | yDiff = p2[1] - p1[1]
77 | ang = degrees(atan2(yDiff, xDiff))
78 | if ang < 0:
79 | ang += 360
80 | # if not 0 <= ang <+ 360: raise ValueError('Ang was not computed correctly')
81 | return ang
82 |
83 |
84 | def calc_angle_between_vectors_of_points_2d(v1, v2):
85 | """calc_angle_between_vectors_of_points_2d [calculates the clockwise angle between each set of point for two 2d arrays of points]
86 |
87 | Arguments:
88 | v1 {[np.ndarray]} -- [2d array with X,Y position at each timepoint]
89 | v2 {[np.ndarray]} -- [2d array with X,Y position at each timepoint]
90 |
91 | Returns:
92 | [np.ndarray] -- [1d array with clockwise angle between pairwise points in v1,v2]
93 |
94 | Testing:
95 | >>> v1 = np.zeros((2, 4))
96 | >>> v1[1, :] = [1, 1, 1, 1, ]
97 | >>> v2 = np.zeros((2, 4))
98 | >>> v2[0, :] = [0, 1, 0, -1]
99 | >>> v2[1, :] = [1, 0, -1, 0]
100 | >>> a = calc_angle_between_vectors_of_points_2d(v2, v1)
101 | """
102 |
103 | # Check data format
104 | if (
105 | v1 is None
106 | or v2 is None
107 | or not isinstance(v1, np.ndarray)
108 | or not isinstance(v2, np.ndarray)
109 | ):
110 | raise ValueError("Invalid format for input arguments")
111 | if len(v1) != len(v2):
112 | raise ValueError(
113 | "Input arrays should have the same length, instead: ", len(v1), len(v2)
114 | )
115 | if not v1.shape[0] == 2 or not v2.shape[0] == 2:
116 | raise ValueError("Invalid shape for input arrays: ", v1.shape, v2.shape)
117 |
118 | # Calculate
119 | n_points = v1.shape[1]
120 | angs = np.zeros(n_points)
121 | for i in range(v1.shape[1]):
122 | p1, p2 = v1[:, i], v2[:, i]
123 | angs[i] = angle_between_points_2d_anticlockwise(p1, p2)
124 |
125 | return angs
126 |
127 |
128 | # Process single bone
129 | def analyzebone(bp1, bp2):
130 | """[Computes length and orientation of the bone at each frame]
131 |
132 | Arguments:
133 | bp1 {[type]} -- [description]
134 | bp2 {[type]} -- [description]
135 | """
136 | bp1_pos = np.vstack([bp1.x.values, bp1.y.values]).T
137 | bp2_pos = np.vstack([bp2.x.values, bp2.y.values]).T
138 |
139 | # get bone length and orientation
140 | bone_length = calc_distance_between_points_two_vectors_2d(bp1_pos, bp2_pos)
141 | bone_orientation = calc_angle_between_vectors_of_points_2d(bp1_pos.T, bp2_pos.T)
142 |
143 | # keep the smallest of the two likelihoods
144 | likelihoods = np.vstack([bp2.likelihood.values, bp2.likelihood.values]).T
145 | likelihood = np.min(likelihoods, 1)
146 |
147 | # Create dataframe and return
148 | df = pd.DataFrame.from_dict(
149 | dict(length=bone_length, orientation=bone_orientation, likelihood=likelihood)
150 | )
151 | # df.index.name=name
152 |
153 | return df
154 |
155 |
156 | # MAIN FUNC
157 | def analyzeskeleton(
158 | config,
159 | videos,
160 | videotype="avi",
161 | shuffle=1,
162 | trainingsetindex=0,
163 | save_as_csv=False,
164 | destfolder=None,
165 | ):
166 | """
167 | Extracts length and orientation of each "bone" of the skeleton as defined in the config file.
168 |
169 | Parameter
170 | ----------
171 | config : string
172 | Full path of the config.yaml file as a string.
173 |
174 | videos : list
175 | A list of strings containing the full paths to videos for analysis or a path to the directory, where all the videos with same extension are stored.
176 |
177 | shuffle : int, optional
178 | The shufle index of training dataset. The extracted frames will be stored in the labeled-dataset for
179 | the corresponding shuffle of training dataset. Default is set to 1
180 |
181 | trainingsetindex: int, optional
182 | Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
183 |
184 | save_as_csv: bool, optional
185 | Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``
186 |
187 | destfolder: string, optional
188 | Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
189 | folder also needs to be passed.
190 | """
191 | # Load config file, scorer and videos
192 | cfg = auxiliaryfunctions.read_config(config)
193 | DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
194 | cfg, shuffle, trainFraction=cfg["TrainingFraction"][trainingsetindex]
195 | )
196 |
197 | Videos = auxiliaryfunctions.Getlistofvideos(videos, videotype)
198 | for video in Videos:
199 | print("Processing %s" % (video))
200 | if destfolder is None:
201 | destfolder = str(Path(video).parents[0])
202 |
203 | vname = Path(video).stem
204 | notanalyzed, outdataname, sourcedataname, scorer = auxiliaryfunctions.CheckifPostProcessing(
205 | destfolder, vname, DLCscorer, DLCscorerlegacy, suffix="_skeleton"
206 | )
207 | if notanalyzed:
208 | Dataframe = pd.read_hdf(sourcedataname, "df_with_missing")
209 | # Process skeleton
210 | bones = {}
211 | for bp1, bp2 in cfg["skeleton"]:
212 | name = "{}_{}".format(bp1, bp2)
213 | bones[name] = analyzebone(
214 | Dataframe[scorer][bp1], Dataframe[scorer][bp2]
215 | )
216 |
217 | skeleton = pd.concat(bones, axis=1)
218 | # save
219 | skeleton.to_hdf(outdataname, "df_with_missing", format="table", mode="w")
220 | if save_as_csv:
221 | skeleton.to_csv(outdataname.split(".h5")[0] + ".csv")
222 |
223 |
224 | if __name__ == "__main__":
225 | parser = argparse.ArgumentParser()
226 | parser.add_argument("config")
227 | parser.add_argument("videos")
228 | cli_args = parser.parse_args()
229 |
--------------------------------------------------------------------------------
/deeplabcutcore/post_processing/filtering.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcutcore2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/deeplabcut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 | import numpy as np
11 | import os
12 | from pathlib import Path
13 | import pandas as pd
14 |
15 | from deeplabcutcore.utils import auxiliaryfunctions, visualization
16 | from deeplabcutcore.utils import frameselectiontools
17 | from deeplabcutcore.refine_training_dataset.outlier_frames import FitSARIMAXModel
18 |
19 | import argparse
20 | from tqdm import tqdm
21 | import matplotlib.pyplot as plt
22 | from skimage.util import img_as_ubyte
23 | from scipy import signal
24 |
25 |
26 | def filterpredictions(
27 | config,
28 | video,
29 | videotype="avi",
30 | shuffle=1,
31 | trainingsetindex=0,
32 | filtertype="median",
33 | windowlength=5,
34 | p_bound=0.001,
35 | ARdegree=3,
36 | MAdegree=1,
37 | alpha=0.01,
38 | save_as_csv=True,
39 | destfolder=None,
40 | ):
41 | """
42 |
43 | Fits frame-by-frame pose predictions with ARIMA model (filtertype='arima') or median filter (default).
44 |
45 | Parameter
46 | ----------
47 | config : string
48 | Full path of the config.yaml file as a string.
49 |
50 | video : string
51 | Full path of the video to extract the frame from. Make sure that this video is already analyzed.
52 |
53 | shuffle : int, optional
54 | The shufle index of training dataset. The extracted frames will be stored in the labeled-dataset for
55 | the corresponding shuffle of training dataset. Default is set to 1
56 |
57 | trainingsetindex: int, optional
58 | Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).
59 |
60 | filtertype: string
61 | Select which filter, 'arima' or 'median' filter.
62 |
63 | windowlength: int
64 | For filtertype='median' filters the input array using a local window-size given by windowlength. The array will automatically be zero-padded.
65 | https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.medfilt.html The windowlenght should be an odd number.
66 |
67 | p_bound: float between 0 and 1, optional
68 | For filtertype 'arima' this parameter defines the likelihood below,
69 | below which a body part will be consided as missing data for filtering purposes.
70 |
71 | ARdegree: int, optional
72 | For filtertype 'arima' Autoregressive degree of Sarimax model degree.
73 | see https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html
74 |
75 | MAdegree: int
76 | For filtertype 'arima' Moving Avarage degree of Sarimax model degree.
77 | See https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html
78 |
79 | alpha: float
80 | Significance level for detecting outliers based on confidence interval of fitted SARIMAX model.
81 |
82 | save_as_csv: bool, optional
83 | Saves the predictions in a .csv file. The default is ``False``; if provided it must be either ``True`` or ``False``
84 |
85 | destfolder: string, optional
86 | Specifies the destination folder for analysis data (default is the path of the video). Note that for subsequent analysis this
87 | folder also needs to be passed.
88 |
89 | Example
90 | --------
91 | Arima model:
92 | deeplabcutcore.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filterype='arima',ARdegree=5,MAdegree=2)
93 |
94 | Use median filter over 10bins:
95 | deeplabcutcore.filterpredictions('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,windowlength=10)
96 |
97 | One can then use the filtered rather than the frame-by-frame predictions by calling:
98 |
99 | deeplabcutcore.plot_trajectories('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
100 |
101 | deeplabcutcore.create_labeled_video('C:\\myproject\\reaching-task\\config.yaml',['C:\\myproject\\trailtracking-task\\test.mp4'],shuffle=3,filtered=True)
102 | --------
103 |
104 | Returns filtered pandas array with the same structure as normal output of network.
105 | """
106 | cfg = auxiliaryfunctions.read_config(config)
107 | DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
108 | cfg, shuffle, trainFraction=cfg["TrainingFraction"][trainingsetindex]
109 | )
110 | Videos = auxiliaryfunctions.Getlistofvideos(video, videotype)
111 |
112 | if len(Videos) > 0:
113 | for video in Videos:
114 | if destfolder is None:
115 | destfolder = str(Path(video).parents[0])
116 |
117 | print("Filtering with %s model %s" % (filtertype, video))
118 | videofolder = destfolder
119 | vname = Path(video).stem
120 | notanalyzed, outdataname, sourcedataname, scorer = auxiliaryfunctions.CheckifPostProcessing(
121 | destfolder, vname, DLCscorer, DLCscorerlegacy, suffix="filtered"
122 | )
123 | if notanalyzed:
124 | Dataframe = pd.read_hdf(sourcedataname, "df_with_missing")
125 | for bpindex, bp in tqdm(enumerate(cfg["bodyparts"])):
126 | pdindex = pd.MultiIndex.from_product(
127 | [[scorer], [bp], ["x", "y", "likelihood"]],
128 | names=["scorer", "bodyparts", "coords"],
129 | )
130 | x, y, p = (
131 | Dataframe[scorer][bp]["x"].values,
132 | Dataframe[scorer][bp]["y"].values,
133 | Dataframe[scorer][bp]["likelihood"].values,
134 | )
135 |
136 | if filtertype == "arima":
137 | meanx, CIx = FitSARIMAXModel(
138 | x, p, p_bound, alpha, ARdegree, MAdegree, False
139 | )
140 | meany, CIy = FitSARIMAXModel(
141 | y, p, p_bound, alpha, ARdegree, MAdegree, False
142 | )
143 |
144 | meanx[0] = x[0]
145 | meany[0] = y[0]
146 | else:
147 | meanx = signal.medfilt(x, kernel_size=windowlength)
148 | meany = signal.medfilt(y, kernel_size=windowlength)
149 |
150 | if bpindex == 0:
151 | data = pd.DataFrame(
152 | np.hstack(
153 | [
154 | np.expand_dims(meanx, axis=1),
155 | np.expand_dims(meany, axis=1),
156 | np.expand_dims(p, axis=1),
157 | ]
158 | ),
159 | columns=pdindex,
160 | )
161 | else:
162 | item = pd.DataFrame(
163 | np.hstack(
164 | [
165 | np.expand_dims(meanx, axis=1),
166 | np.expand_dims(meany, axis=1),
167 | np.expand_dims(p, axis=1),
168 | ]
169 | ),
170 | columns=pdindex,
171 | )
172 | data = pd.concat([data.T, item.T]).T
173 |
174 | data.to_hdf(outdataname, "df_with_missing", format="table", mode="w")
175 | if save_as_csv:
176 | print("Saving filtered csv poses!")
177 | data.to_csv(outdataname.split(".h5")[0] + ".csv")
178 |
179 |
180 | if __name__ == "__main__":
181 | parser = argparse.ArgumentParser()
182 | parser.add_argument("config")
183 | parser.add_argument("videos")
184 | cli_args = parser.parse_args()
185 |
--------------------------------------------------------------------------------
/deeplabcutcore/refine_training_dataset/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcutcorecore2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/deeplabcut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | from deeplabcutcore.refine_training_dataset.outlier_frames import *
12 |
--------------------------------------------------------------------------------
/deeplabcutcore/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from deeplabcutcore.utils.make_labeled_video import *
2 | from deeplabcutcore.utils.auxiliaryfunctions import *
3 | from deeplabcutcore.utils.video_processor import *
4 | from deeplabcutcore.utils.plotting import *
5 |
6 | from deeplabcutcore.utils.conversioncode import *
7 | from deeplabcutcore.utils.frameselectiontools import *
8 | from deeplabcutcore.utils.auxfun_videos import *
9 |
--------------------------------------------------------------------------------
/deeplabcutcore/utils/auxfun_models.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os
12 | from deeplabcutcore.utils import auxiliaryfunctions
13 | from pathlib import Path
14 |
15 |
16 | def Check4weights(modeltype, parent_path, num_shuffles):
17 | """ gets local path to network weights and checks if they are present. If not, downloads them from tensorflow.org """
18 | if "resnet_50" == modeltype:
19 | model_path = (
20 | parent_path
21 | / "pose_estimation_tensorflow/models/pretrained/resnet_v1_50.ckpt"
22 | )
23 | elif "resnet_101" == modeltype:
24 | model_path = (
25 | parent_path
26 | / "pose_estimation_tensorflow/models/pretrained/resnet_v1_101.ckpt"
27 | )
28 | elif "resnet_152" == modeltype:
29 | model_path = (
30 | parent_path
31 | / "pose_estimation_tensorflow/models/pretrained/resnet_v1_152.ckpt"
32 | )
33 | elif "mobilenet" in modeltype:
34 | model_path = Path(
35 | os.path.join(
36 | parent_path,
37 | "pose_estimation_tensorflow/models/pretrained/"
38 | + str(modeltype)
39 | + "_224.ckpt",
40 | )
41 | )
42 | else:
43 | print(
44 | "Currently ResNet (50, 101, 152) and MobilenetV2 (1, 0.75, 0.5 and 0.35) are supported, please change 'resnet' entry in config.yaml!"
45 | )
46 | num_shuffles = -1 # thus the loop below is empty...
47 | model_path = parent_path
48 |
49 | if num_shuffles > 0:
50 | if not model_path.is_file():
51 | Downloadweights(modeltype, model_path)
52 |
53 | return str(model_path), num_shuffles
54 |
55 |
56 | def Downloadweights(modeltype, model_path):
57 | """
58 | Downloads the ImageNet pretrained weights for ResNets, MobileNets et al. from TensorFlow...
59 | """
60 | import urllib
61 | import tarfile
62 | from io import BytesIO
63 |
64 | target_dir = model_path.parents[0]
65 | neturls = auxiliaryfunctions.read_plainconfig(
66 | target_dir / "pretrained_model_urls.yaml"
67 | )
68 | try:
69 | url = neturls[modeltype]
70 | print("Downloading a ImageNet-pretrained model from {}....".format(url))
71 | response = urllib.request.urlopen(url)
72 | with tarfile.open(fileobj=BytesIO(response.read()), mode="r:gz") as tar:
73 | tar.extractall(path=target_dir)
74 | except KeyError:
75 | print("Model does not exist: ", modeltype)
76 | print("Pick one of the following: ", neturls.keys())
77 |
78 |
79 | def DownloadModel(modelname, target_dir):
80 | """
81 | Downloads a DeepLabCut Model Zoo Project
82 | """
83 | import urllib
84 | import urllib.request
85 | import tarfile
86 | from io import BytesIO
87 | from tqdm import tqdm
88 |
89 | def show_progress(count, block_size, total_size):
90 | pbar.update(block_size)
91 |
92 | def tarfilenamecutting(tarf):
93 | """' auxfun to extract folder path
94 | ie. /xyz-trainsetxyshufflez/
95 | """
96 | for memberid, member in enumerate(tarf.getmembers()):
97 | if memberid == 0:
98 | parent = str(member.path)
99 | l = len(parent) + 1
100 | if member.path.startswith(parent):
101 | member.path = member.path[l:]
102 | yield member
103 |
104 | # TODO: update how DLC path is obtained
105 | import deeplabcutcore
106 |
107 | neturls = auxiliaryfunctions.read_plainconfig(
108 | os.path.join(
109 | os.path.dirname(deeplabcutcore.__file__),
110 | "pose_estimation_tensorflow/models/pretrained/pretrained_model_urls.yaml",
111 | )
112 | )
113 | if modelname in neturls.keys():
114 | url = neturls[modelname]
115 | response = urllib.request.urlopen(url)
116 | print(
117 | "Downloading the model from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format(
118 | url
119 | )
120 | )
121 | total_size = int(response.getheader("Content-Length"))
122 | pbar = tqdm(unit="B", total=total_size, position=0)
123 | filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress)
124 | with tarfile.open(filename, mode="r:gz") as tar:
125 | tar.extractall(target_dir, members=tarfilenamecutting(tar))
126 | else:
127 | models = [
128 | fn
129 | for fn in neturls.keys()
130 | if "resnet_" not in fn and "mobilenet_" not in fn
131 | ]
132 | print("Model does not exist: ", modelname)
133 | print("Pick one of the following: ", models)
134 |
135 |
136 | def download_mpii_weights(wd):
137 | """ Downloads weights pretrained on human data from DeeperCut. """
138 | import urllib.request
139 | from pathlib import Path
140 |
141 | url = [
142 | "https://datasets.d2.mpi-inf.mpg.de/deepercut-models-tensorflow/mpii-single-resnet-101.data-00000-of-00001",
143 | "https://datasets.d2.mpi-inf.mpg.de/deepercut-models-tensorflow/mpii-single-resnet-101.meta",
144 | "https://datasets.d2.mpi-inf.mpg.de/deepercut-models-tensorflow/mpii-single-resnet-101.index",
145 | ]
146 | for i in url:
147 | file = str(Path(i).name)
148 | filename = file.replace("mpii-single-resnet-101", "snapshot-103000")
149 | filename = os.path.join(wd, filename)
150 | if os.path.isfile(filename):
151 | print("Weights already present!")
152 | break # not checking all the 3 files.
153 | else:
154 | urllib.request.urlretrieve(i, filename)
155 |
156 | return filename
157 |
--------------------------------------------------------------------------------
/deeplabcutcore/utils/auxfun_multianimal.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import os, pickle
12 | import pandas as pd
13 | import numpy as np
14 | from deeplabcutcore.utils.auxiliaryfunctions import read_config, write_config
15 | from pathlib import Path
16 |
17 |
18 | def extractindividualsandbodyparts(cfg):
19 | individuals = cfg["individuals"]
20 | if len(cfg["uniquebodyparts"]) > 0:
21 | individuals.extend("single")
22 | return individuals, cfg["uniquebodyparts"], cfg["multianimalbodyparts"]
23 |
--------------------------------------------------------------------------------
/deeplabcutcore/utils/auxiliaryfunctions_3d.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 | from mpl_toolkits.mplot3d import Axes3D
14 | import cv2
15 | import os
16 | import pickle
17 | import pandas as pd
18 | from pathlib import Path
19 | import glob
20 |
21 |
22 | def Foldernames3Dproject(cfg_3d):
23 | """ Definitions of subfolders in 3D projects """
24 |
25 | img_path = os.path.join(cfg_3d["project_path"], "calibration_images")
26 | path_corners = os.path.join(cfg_3d["project_path"], "corners")
27 | path_camera_matrix = os.path.join(cfg_3d["project_path"], "camera_matrix")
28 | path_undistort = os.path.join(cfg_3d["project_path"], "undistortion")
29 |
30 | return img_path, path_corners, path_camera_matrix, path_undistort
31 |
32 |
33 | def create_empty_df(dataframe, scorer, flag):
34 | # Creates an empty dataFrame of same shape as df_side_view
35 | # flag = 2d or 3d
36 |
37 | df = dataframe
38 | bodyparts = df.columns.get_level_values(1)
39 | _, idx = np.unique(bodyparts, return_index=True)
40 | bodyparts = list(bodyparts[np.sort(idx)])
41 | a = np.empty((df.shape[0], 3))
42 | a[:] = np.nan
43 | dataFrame = None
44 | for bodypart in bodyparts:
45 | if flag == "2d":
46 | pdindex = pd.MultiIndex.from_product(
47 | [[scorer], [bodypart], ["x", "y", "likelihood"]],
48 | names=["scorer", "bodyparts", "coords"],
49 | )
50 | elif flag == "3d":
51 | pdindex = pd.MultiIndex.from_product(
52 | [[scorer], [bodypart], ["x", "y", "z"]],
53 | names=["scorer", "bodyparts", "coords"],
54 | )
55 | frame = pd.DataFrame(a, columns=pdindex, index=range(0, df.shape[0]))
56 | dataFrame = pd.concat([frame, dataFrame], axis=1)
57 | return (dataFrame, scorer, bodyparts)
58 |
59 |
60 | def compute_triangulation_calibration_images(
61 | stereo_matrix, projectedPoints1, projectedPoints2, path_undistort, cfg_3d, plot=True
62 | ):
63 | """
64 | Performs triangulation of the calibration images.
65 | """
66 | triangulate = []
67 | P1 = stereo_matrix["P1"]
68 | P2 = stereo_matrix["P2"]
69 | cmap = cfg_3d["colormap"]
70 | colormap = plt.get_cmap(cmap)
71 | markerSize = cfg_3d["dotsize"]
72 | markerType = cfg_3d["markerType"]
73 |
74 | for i in range(projectedPoints1.shape[0]):
75 | X_l = triangulatePoints(P1, P2, projectedPoints1[i], projectedPoints2[i])
76 | triangulate.append(X_l)
77 | triangulate = np.asanyarray(triangulate)
78 |
79 | # Plotting
80 | if plot == True:
81 | col = colormap(np.linspace(0, 1, triangulate.shape[0]))
82 | fig = plt.figure()
83 | ax = fig.add_subplot(111, projection="3d")
84 |
85 | for i in range(triangulate.shape[0]):
86 | xs = triangulate[i, 0, :]
87 | ys = triangulate[i, 1, :]
88 | zs = triangulate[i, 2, :]
89 | ax.scatter(xs, ys, zs, c=col[i], marker=markerType, s=markerSize)
90 | ax.set_xlabel("X")
91 | ax.set_ylabel("Y")
92 | ax.set_zlabel("Z")
93 | plt.savefig(os.path.join(str(path_undistort), "checkerboard_3d.png"))
94 | return triangulate
95 |
96 |
97 | def triangulatePoints(P1, P2, x1, x2):
98 | X = cv2.triangulatePoints(P1[:3], P2[:3], x1, x2)
99 | return X / X[3]
100 |
101 |
102 | def get_camerawise_videos(path, cam_names, videotype):
103 | """
104 | This function returns the list of videos corresponding to the camera names specified in the cam_names.
105 | e.g. if cam_names = ['camera-1','camera-2']
106 |
107 | then it will return [['somename-camera-1-othername.avi', 'somename-camera-2-othername.avi']]
108 | """
109 | import glob
110 | from pathlib import Path
111 |
112 | vid = []
113 |
114 | # Find videos only specific to the cam names
115 | videos = [
116 | glob.glob(os.path.join(path, str("*" + cam_names[i] + "*" + videotype)))
117 | for i in range(len(cam_names))
118 | ]
119 | videos = [y for x in videos for y in x]
120 |
121 | # Exclude the labeled video files
122 | if "." in videotype:
123 | file_to_exclude = str("labeled" + videotype)
124 | else:
125 | file_to_exclude = str("labeled." + videotype)
126 | videos = [v for v in videos if os.path.isfile(v) and not (file_to_exclude in v)]
127 | video_list = []
128 | cam = cam_names[0] # camera1
129 | vid.append(
130 | [
131 | name
132 | for name in glob.glob(os.path.join(path, str("*" + cam + "*" + videotype)))
133 | ]
134 | ) # all videos with cam
135 | # print("here is what I found",vid)
136 | for k in range(len(vid[0])):
137 | if cam in str(Path(vid[0][k]).stem):
138 | ending = Path(vid[0][k]).suffix
139 | pref = str(Path(vid[0][k]).stem).split(cam)[0]
140 | suf = str(Path(vid[0][k]).stem).split(cam)[1]
141 | if pref == "":
142 | if suf == "":
143 | print("Strange naming convention on your part. Respect.")
144 | else:
145 | putativecam2name = os.path.join(path, cam_names[1] + suf + ending)
146 | else:
147 | if suf == "":
148 | putativecam2name = os.path.join(path, pref + cam_names[1] + ending)
149 | else:
150 | putativecam2name = os.path.join(
151 | path, pref + cam_names[1] + suf + ending
152 | )
153 | # print([os.path.join(path,pref+cam+suf+ending),putativecam2name])
154 | if os.path.isfile(putativecam2name):
155 | # found a pair!!!
156 | video_list.append(
157 | [os.path.join(path, pref + cam + suf + ending), putativecam2name]
158 | )
159 | return video_list
160 |
161 |
162 | def Get_list_of_triangulated_and_videoFiles(
163 | filepath, videotype, scorer_3d, cam_names, videofolder
164 | ):
165 | """
166 | Returns the list of triangulated h5 and the corresponding video files.
167 | """
168 |
169 | prefix = []
170 | suffix = []
171 | file_list = []
172 | string_to_search = scorer_3d + ".h5"
173 |
174 | # Checks if filepath is a directory
175 | if [os.path.isdir(i) for i in filepath] == [True]:
176 | """
177 | Analyzes all the videos in the directory.
178 | """
179 | print("Analyzing all the videos in the directory")
180 | videofolder = filepath[0]
181 | cwd = os.getcwd()
182 | os.chdir(videofolder)
183 | triangulated_file_list = [
184 | fn for fn in os.listdir(os.curdir) if (string_to_search in fn)
185 | ]
186 | video_list = get_camerawise_videos(videofolder, cam_names, videotype)
187 | os.chdir(cwd)
188 | triangulated_folder = videofolder
189 | else:
190 | triangulated_file_list = [
191 | str(Path(fn).name) for fn in filepath if (string_to_search in fn)
192 | ]
193 | triangulated_folder = [
194 | str(Path(fn).parents[0]) for fn in filepath if (string_to_search in fn)
195 | ]
196 | triangulated_folder = triangulated_folder[0]
197 |
198 | if videofolder == None:
199 | videofolder = str(Path(filepath[0]).parents[0])
200 | video_list = get_camerawise_videos(videofolder, cam_names, videotype)
201 |
202 | # Get the filename of the triangulated file excluing the scorer name and remove any '-' or _ from it
203 | filename = [i.split(string_to_search)[0] for i in triangulated_file_list]
204 | for i in range(len(filename)):
205 | if filename[i][-1] == "_" or filename[i][-1] == "-":
206 | filename[i] = filename[i][:-1]
207 | if filename[i][0] == "_" or filename[i][0] == "-":
208 | filename[i] = filename[i][1:]
209 |
210 | # Get the suffix and prefix of the video filenames so that they can be used for matching the triangulated file names.
211 | for i in range(len(video_list)):
212 | pre = [
213 | str(Path(video_list[i][0]).stem).split(cam_names[0])[0],
214 | str(Path(video_list[i][1]).stem).split(cam_names[1])[0],
215 | ]
216 | suf = [
217 | str(Path(video_list[i][0]).stem).split(cam_names[0])[-1],
218 | str(Path(video_list[i][1]).stem).split(cam_names[1])[-1],
219 | ]
220 | for i in range(len(cam_names)):
221 | if pre[i] == "":
222 | pass
223 | elif pre[i][-1] == "_" or pre[i][-1] == "-":
224 | pre[i] = pre[i][:-1]
225 | if suf[i] == "":
226 | pass
227 | elif suf[i][0] == "_" or suf[i][0] == "-":
228 | suf[i] = suf[i][1:]
229 | suffix.append(suf)
230 | prefix.append(pre)
231 |
232 | # Match the suffix and prefix with the triangulated file name and return the list with triangulated file and corresponding video files.
233 | for k in range(len(filename)):
234 | for j in range(len(prefix)):
235 | if (prefix[j][0] in filename[k] and prefix[j][1] in filename[k]) and (
236 | suffix[j][0] in filename[k] and suffix[j][1] in filename[k]
237 | ):
238 | triangulated_file = glob.glob(
239 | os.path.join(
240 | triangulated_folder,
241 | str("*" + filename[k] + "*" + string_to_search),
242 | )
243 | )
244 | vfiles = get_camerawise_videos(videofolder, cam_names, videotype)
245 | vfiles = [
246 | z for z in vfiles if prefix[j][0] in z[0] and suffix[j][0] in z[1]
247 | ][0]
248 | file_list.append(triangulated_file + vfiles)
249 |
250 | return file_list
251 |
252 |
253 | def SaveMetadata3d(metadatafilename, metadata):
254 | with open(metadatafilename, "wb") as f:
255 | pickle.dump(metadata, f, pickle.HIGHEST_PROTOCOL)
256 |
257 |
258 | def LoadMetadata3d(metadatafilename):
259 | with open(metadatafilename, "rb") as f:
260 | metadata = pickle.load(f)
261 | return metadata
262 |
--------------------------------------------------------------------------------
/deeplabcutcore/utils/video_processor.py:
--------------------------------------------------------------------------------
1 | """
2 | Author: Hao Wu
3 | hwu01@g.harvard.edu
4 |
5 | This is the helper class for video reading and saving in deeplabcutcore.
6 | Updated by AM
7 |
8 | You can set various codecs below,
9 | fourcc = cv2.VideoWriter_fourcc(*'MJPG')
10 | i.e. 'XVID'
11 | """
12 |
13 | import numpy as np
14 | import cv2
15 |
16 |
17 | class VideoProcessor(object):
18 | """
19 | Base class for a video processing unit, implementation is required for video loading and saving
20 |
21 | sh and sw are the output height and width respectively.
22 | """
23 |
24 | def __init__(
25 | self, fname="", sname="", nframes=-1, fps=30, codec="X264", sh="", sw=""
26 | ):
27 | self.fname = fname
28 | self.sname = sname
29 | self.nframes = nframes
30 | self.codec = codec
31 | self.h = 0
32 | self.w = 0
33 | self.FPS = fps
34 | self.nc = 3
35 | self.i = 0
36 |
37 | try:
38 | if self.fname != "":
39 | self.vid = self.get_video()
40 | self.get_info()
41 | self.sh = 0
42 | self.sw = 0
43 | if self.sname != "":
44 | if sh == "" and sw == "":
45 | self.sh = self.h
46 | self.sw = self.w
47 | else:
48 | self.sw = sw
49 | self.sh = sh
50 | self.svid = self.create_video()
51 |
52 | except Exception as ex:
53 | print("Error: %s", ex)
54 |
55 | def load_frame(self):
56 | try:
57 | frame = self._read_frame()
58 | self.i += 1
59 | return frame
60 | except Exception as ex:
61 | print("Error: %s", ex)
62 |
63 | def height(self):
64 | return self.h
65 |
66 | def width(self):
67 | return self.w
68 |
69 | def fps(self):
70 | return self.FPS
71 |
72 | def counter(self):
73 | return self.i
74 |
75 | def frame_count(self):
76 | return self.nframes
77 |
78 | def get_video(self):
79 | """
80 | implement your own
81 | """
82 | pass
83 |
84 | def get_info(self):
85 | """
86 | implement your own
87 | """
88 | pass
89 |
90 | def create_video(self):
91 | """
92 | implement your own
93 | """
94 | pass
95 |
96 | def _read_frame(self):
97 | """
98 | implement your own
99 | """
100 | pass
101 |
102 | def save_frame(self, frame):
103 | """
104 | implement your own
105 | """
106 | pass
107 |
108 | def close(self):
109 | """
110 | implement your own
111 | """
112 | pass
113 |
114 |
115 | class VideoProcessorCV(VideoProcessor):
116 | """
117 | OpenCV implementation of VideoProcessor
118 | requires opencv-python==3.4.0.12
119 | """
120 |
121 | def __init__(self, *args, **kwargs):
122 | super(VideoProcessorCV, self).__init__(*args, **kwargs)
123 |
124 | def get_video(self):
125 | return cv2.VideoCapture(self.fname)
126 |
127 | def get_info(self):
128 | self.w = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH))
129 | self.h = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
130 | all_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
131 | self.FPS = self.vid.get(cv2.CAP_PROP_FPS)
132 | self.nc = 3
133 | if self.nframes == -1 or self.nframes > all_frames:
134 | self.nframes = all_frames
135 | print(self.nframes)
136 |
137 | def create_video(self):
138 | fourcc = cv2.VideoWriter_fourcc(*self.codec)
139 | return cv2.VideoWriter(self.sname, fourcc, self.FPS, (self.sw, self.sh), True)
140 |
141 | def _read_frame(self): # return RGB (rather than BGR)!
142 | # return cv2.cvtColor(np.flip(self.vid.read()[1],2), cv2.COLOR_BGR2RGB)
143 | return np.flip(self.vid.read()[1], 2)
144 |
145 | def save_frame(self, frame):
146 | self.svid.write(np.flip(frame, 2))
147 |
148 | def close(self):
149 | self.svid.release()
150 | self.vid.release()
151 |
--------------------------------------------------------------------------------
/deeplabcutcore/utils/visualization.py:
--------------------------------------------------------------------------------
1 | """
2 | DeepLabCut2.0 Toolbox (deeplabcut.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/DeepLabCut
5 | Please see AUTHORS for contributors.
6 |
7 | https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 |
12 | import os
13 | import numpy as np
14 | import matplotlib as mpl
15 | import platform
16 | from pathlib import Path
17 |
18 | # assume headless == DLClight
19 | mpl.use("AGG") # anti-grain geometry engine #https://matplotlib.org/faq/usage_faq.html
20 | import matplotlib.pyplot as plt
21 | from deeplabcutcore.utils.auxiliaryfunctions import attempttomakefolder
22 | from matplotlib.collections import LineCollection
23 | from skimage import io
24 | from tqdm import trange
25 |
26 |
27 | def get_cmap(n, name="hsv"):
28 | """Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
29 | RGB color; the keyword argument name must be a standard mpl colormap name."""
30 | return plt.cm.get_cmap(name, n)
31 |
32 |
33 | def MakeLabeledImage(
34 | DataCombined,
35 | imagenr,
36 | pcutoff,
37 | imagebasefolder,
38 | Scorers,
39 | bodyparts,
40 | colors,
41 | cfg,
42 | labels=["+", ".", "x"],
43 | scaling=1,
44 | ):
45 | """Creating a labeled image with the original human labels, as well as the DeepLabCut's! """
46 | from skimage import io
47 |
48 | alphavalue = cfg["alphavalue"] # .5
49 | dotsize = cfg["dotsize"] # =15
50 |
51 | plt.axis("off")
52 | im = io.imread(os.path.join(imagebasefolder, DataCombined.index[imagenr]))
53 | if np.ndim(im) > 2: # color image!
54 | h, w, numcolors = np.shape(im)
55 | else:
56 | h, w = np.shape(im)
57 | plt.figure(
58 | frameon=False, figsize=(w * 1.0 / 100 * scaling, h * 1.0 / 100 * scaling)
59 | )
60 | plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
61 | plt.imshow(im, "gray")
62 | for scorerindex, loopscorer in enumerate(Scorers):
63 | for bpindex, bp in enumerate(bodyparts):
64 | if np.isfinite(
65 | DataCombined[loopscorer][bp]["y"][imagenr]
66 | + DataCombined[loopscorer][bp]["x"][imagenr]
67 | ):
68 | y, x = (
69 | int(DataCombined[loopscorer][bp]["y"][imagenr]),
70 | int(DataCombined[loopscorer][bp]["x"][imagenr]),
71 | )
72 | if cfg["scorer"] not in loopscorer:
73 | p = DataCombined[loopscorer][bp]["likelihood"][imagenr]
74 | if p > pcutoff:
75 | plt.plot(
76 | x,
77 | y,
78 | labels[1],
79 | ms=dotsize,
80 | alpha=alphavalue,
81 | color=colors(int(bpindex)),
82 | )
83 | else:
84 | plt.plot(
85 | x,
86 | y,
87 | labels[2],
88 | ms=dotsize,
89 | alpha=alphavalue,
90 | color=colors(int(bpindex)),
91 | )
92 | else: # this is the human labeler
93 | plt.plot(
94 | x,
95 | y,
96 | labels[0],
97 | ms=dotsize,
98 | alpha=alphavalue,
99 | color=colors(int(bpindex)),
100 | )
101 | plt.xlim(0, w)
102 | plt.ylim(0, h)
103 | plt.axis("off")
104 | plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
105 | plt.gca().invert_yaxis()
106 |
107 |
108 | def PlottingandSaveLabeledFrame(
109 | DataCombined,
110 | ind,
111 | trainIndices,
112 | cfg,
113 | colors,
114 | comparisonbodyparts,
115 | DLCscorer,
116 | foldername,
117 | scaling=1,
118 | ):
119 | fn = Path(cfg["project_path"] + "/" + DataCombined.index[ind])
120 | imagename = fn.parts[-1] # fn.stem+fn.suffix
121 | imfoldername = fn.parts[-2] # fn.suffix
122 | fig = plt.figure()
123 | ax = fig.add_subplot(1, 1, 1)
124 | MakeLabeledImage(
125 | DataCombined,
126 | ind,
127 | cfg["pcutoff"],
128 | cfg["project_path"],
129 | [cfg["scorer"], DLCscorer],
130 | comparisonbodyparts,
131 | colors,
132 | cfg,
133 | scaling=scaling,
134 | )
135 |
136 | if ind in trainIndices:
137 | full_path = os.path.join(
138 | foldername, "Training-" + imfoldername + "-" + imagename
139 | )
140 | else:
141 | full_path = os.path.join(foldername, "Test-" + imfoldername + "-" + imagename)
142 |
143 | # windows throws error if file path is > 260 characters, can fix with prefix. see https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file#maximum-path-length-limitation
144 | if (len(full_path) >= 260) and (os.name == "nt"):
145 | full_path = "\\\\?\\" + full_path
146 | plt.savefig(full_path)
147 |
148 | plt.close("all")
149 |
150 |
151 | def prepare_figure_axes(width, height, scale=1, dpi=100):
152 | fig = plt.figure(frameon=False, figsize=(width * scale / dpi, height * scale / dpi))
153 | ax = fig.add_subplot(111)
154 | ax.axis("off")
155 | ax.set_xlim(0, width)
156 | ax.set_ylim(0, height)
157 | ax.invert_yaxis()
158 | return fig, ax
159 |
160 |
161 | def make_labeled_images_from_dataframe(
162 | df,
163 | cfg,
164 | destfolder="",
165 | scale=1.0,
166 | dpi=100,
167 | keypoint="+",
168 | draw_skeleton=True,
169 | color_by="bodypart",
170 | ):
171 | """
172 | Write labeled frames to disk from a DataFrame.
173 |
174 | Parameters
175 | ----------
176 | df : pd.DataFrame
177 | DataFrame containing the labeled data. Typically, the DataFrame is obtained
178 | through pandas.read_csv() or pandas.read_hdf().
179 |
180 | cfg : dict
181 | Project configuration.
182 |
183 | destfolder : string, optional
184 | Destination folder into which images will be stored. By default, same location as the labeled data.
185 | Note that the folder will be created if it does not exist.
186 |
187 | scale : float, optional
188 | Up/downscale the output dimensions.
189 | By default, outputs are of the same dimensions as the original images.
190 |
191 | dpi : int, optional
192 | Output resolution. 100 dpi by default.
193 |
194 | keypoint : str, optional
195 | Keypoint appearance. By default, keypoints are marked by a + sign.
196 | Refer to https://matplotlib.org/3.2.1/api/markers_api.html for a list of all possible options.
197 |
198 | draw_skeleton : bool, optional
199 | Whether to draw the animal skeleton as defined in *cfg*. True by default.
200 |
201 | color_by : str, optional
202 | Color scheme of the keypoints. Must be either 'bodypart' or 'individual'.
203 | By default, keypoints are colored relative to the bodypart they represent.
204 | """
205 |
206 | bodyparts = df.columns.get_level_values("bodyparts")
207 | bodypart_names = bodyparts.unique()
208 | nbodyparts = len(bodypart_names)
209 | bodyparts = bodyparts[::2]
210 |
211 | if color_by == "bodypart":
212 | map_ = bodyparts.map(dict(zip(bodypart_names, range(nbodyparts))))
213 | cmap = get_cmap(nbodyparts, cfg["colormap"])
214 | colors = cmap(map_)
215 | elif color_by == "individual":
216 | try:
217 | individuals = df.columns.get_level_values("individuals")
218 | individual_names = individuals.unique().to_list()
219 | nindividuals = len(individual_names)
220 | individuals = individuals[::2]
221 | map_ = individuals.map(dict(zip(individual_names, range(nindividuals))))
222 | cmap = get_cmap(nindividuals, cfg["colormap"])
223 | colors = cmap(map_)
224 | except KeyError as e:
225 | raise Exception(
226 | "Coloring by individuals is only valid for multi-animal data"
227 | ) from e
228 | else:
229 | raise ValueError("`color_by` must be either `bodypart` or `individual`.")
230 |
231 | bones = []
232 | if draw_skeleton:
233 | for bp1, bp2 in cfg["skeleton"]:
234 | match1, match2 = [], []
235 | for j, bp in enumerate(bodyparts):
236 | if bp == bp1:
237 | match1.append(j)
238 | elif bp == bp2:
239 | match2.append(j)
240 | bones.extend(zip(match1, match2))
241 | ind_bones = tuple(zip(*bones))
242 |
243 | sep = "/" if "/" in df.index[0] else "\\"
244 | images = cfg["project_path"] + sep + df.index
245 | if sep != os.path.sep:
246 | images = images.str.replace(sep, os.path.sep)
247 | if not destfolder:
248 | destfolder = os.path.dirname(images[0])
249 | tmpfolder = destfolder + "_labeled"
250 | attempttomakefolder(tmpfolder)
251 | ic = io.imread_collection(images.to_list())
252 |
253 | h, w = ic[0].shape[:2]
254 | fig, ax = prepare_figure_axes(w, h, scale, dpi)
255 | im = ax.imshow(np.zeros((h, w)), "gray")
256 | scat = ax.scatter(
257 | [], [], s=cfg["dotsize"], alpha=cfg["alphavalue"], marker=keypoint
258 | )
259 | scat.set_color(colors)
260 | xy = df.values.reshape((df.shape[0], -1, 2))
261 | segs = xy[:, ind_bones].swapaxes(1, 2)
262 | coll = LineCollection([], colors=cfg["skeleton_color"])
263 | ax.add_collection(coll)
264 | for i in trange(len(ic)):
265 | coords = xy[i]
266 | im.set_array(ic[i])
267 | scat.set_offsets(coords)
268 | if ind_bones:
269 | coll.set_segments(segs[i])
270 | imagename = os.path.basename(ic.files[i])
271 | fig.savefig(
272 | os.path.join(tmpfolder, imagename.replace(".png", f"_{color_by}.png"))
273 | )
274 | plt.close(fig)
275 |
--------------------------------------------------------------------------------
/deeplabcutcore/version.py:
--------------------------------------------------------------------------------
1 | """
2 | deeplabcut-core2.0 Toolbox (deeplabcut-core.org)
3 | © A. & M. Mathis Labs
4 | https://github.com/AlexEMG/deeplabcut-core
5 |
6 | Please see AUTHORS for contributors.
7 | https://github.com/AlexEMG/deeplabcut-core/blob/master/AUTHORS
8 | Licensed under GNU Lesser General Public License v3.0
9 | """
10 |
11 | __version__ = "0.0"
12 | VERSION = __version__
13 |
--------------------------------------------------------------------------------
/reinstall.sh:
--------------------------------------------------------------------------------
1 | pip uninstall deeplabcutcore
2 | python3 setup.py sdist bdist_wheel
3 | pip install dist/deeplabcutcore-0.0-py3-none-any.whl
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi
2 | chardet
3 | click
4 | easydict
5 | h5py~=2.7
6 | intel-openmp
7 | imgaug
8 | ipython
9 | ipython-genutils
10 | tf_slim
11 | matplotlib
12 | moviepy
13 | numpy
14 | opencv-python
15 | pandas
16 | patsy
17 | python-dateutil
18 | pyyaml
19 | requests
20 | ruamel_yaml
21 | setuptools
22 | scikit-image
23 | scikit-learn
24 | scipy
25 | six
26 | statsmodels
27 | tables
28 | tensorflow
29 | tensorpack
30 | tqdm
31 | wheel
32 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | deeplabcutcore Toolbox (deeplabcut.org)
5 | © A. & M. Mathis Labs
6 |
7 | Licensed under GNU Lesser General Public License v3.0
8 | """
9 |
10 |
11 | import setuptools
12 |
13 | with open("README.md", "r") as fh:
14 | long_description = fh.read()
15 |
16 | setuptools.setup(
17 | name="deeplabcutcore",
18 | version="0.0",
19 | author="A. & M.W. Mathis Labs",
20 | author_email="alexander@deeplabcut.org",
21 | description="Headless DeepLabCut",
22 | long_description=long_description,
23 | long_description_content_type="text/markdown",
24 | url="https://github.com/deeplabcutcore/deeplabcutcorecore",
25 | install_requires=[
26 | "certifi",
27 | "chardet",
28 | "click",
29 | "easydict",
30 | "h5py~=2.7",
31 | "intel-openmp",
32 | "imgaug",
33 | "ipython",
34 | "ipython-genutils",
35 | "matplotlib==3.0.3",
36 | "moviepy<=1.0.1",
37 | "numpy==1.16.4",
38 | "opencv-python-headless~=3.4",
39 | "pandas",
40 | "patsy",
41 | "python-dateutil",
42 | "pyyaml>=5.1",
43 | "requests",
44 | "ruamel.yaml~=0.15",
45 | "setuptools",
46 | "scikit-image",
47 | "scikit-learn",
48 | "scipy",
49 | "six",
50 | "statsmodels",
51 | "tables",
52 | "tensorpack>=0.9.7.1",
53 | "tqdm",
54 | "wheel",
55 | ],
56 | scripts=["deeplabcutcore/pose_estimation_tensorflow/models/pretrained/download.sh"],
57 | packages=setuptools.find_packages(),
58 | data_files=[
59 | (
60 | "deeplabcutcore",
61 | [
62 | "deeplabcutcore/pose_cfg.yaml",
63 | "deeplabcutcore/pose_estimation_tensorflow/models/pretrained/pretrained_model_urls.yaml",
64 | ],
65 | )
66 | ],
67 | include_package_data=True,
68 | classifiers=(
69 | "Programming Language :: Python :: 3",
70 | "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
71 | "Operating System :: OS Independent",
72 | ),
73 | entry_points="",
74 | )
75 |
--------------------------------------------------------------------------------
/testscript_cli.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Tue Oct 2 13:56:11 2018
5 | @author: alex
6 |
7 | DEVELOPERS:
8 | This script tests various functionalities in an automatic way.
9 |
10 | It should take about 3:30 minutes to run this in a CPU.
11 | It should take about 1:30 minutes on a GPU (incl. downloading the ResNet weights)
12 |
13 | It produces nothing of interest scientifically.
14 | """
15 |
16 | task = "Testcore" # Enter the name of your experiment Task
17 | scorer = "Mackenzie" # Enter the name of the experimenter/labeler
18 |
19 | import os, subprocess
20 |
21 | import deeplabcutcore as dlc
22 |
23 | from pathlib import Path
24 | import pandas as pd
25 | import numpy as np
26 | import platform
27 |
28 | print("Imported DLC!")
29 | import tensorflow
30 |
31 | print("TF version:")
32 | print(tensorflow.__version__)
33 |
34 | basepath = os.path.dirname(os.path.abspath("testscript.py"))
35 | videoname = "reachingvideo1"
36 | # video=[os.path.join(Path(basepath).parents[0],'DLCreleases/DeepLabCut/examples/Reaching-Mackenzie-2018-08-30','videos',videoname+'.avi')]
37 |
38 | video = [
39 | os.path.join(
40 | basepath, "Reaching-Mackenzie-2018-08-30", "videos", videoname + ".avi"
41 | )
42 | ]
43 | # For testing a color video:
44 | # videoname='baby4hin2min'
45 | # video=[os.path.join('/home/alex/Desktop/Data',videoname+'.mp4')]
46 | # to test destination folder:
47 | # dfolder=basepath
48 | print(video)
49 |
50 | dfolder = None
51 | net_type = "resnet_50" #'mobilenet_v2_0.35' #'resnet_50'
52 | augmenter_type = "default"
53 | augmenter_type2 = "imgaug"
54 |
55 | if platform.system() == "Darwin" or platform.system() == "Windows":
56 | print("On Windows/OSX tensorpack is not tested by default.")
57 | augmenter_type3 = "imgaug"
58 | else:
59 | augmenter_type3 = "tensorpack" # Does not work on WINDOWS
60 |
61 | numiter = 7
62 |
63 | print("CREATING PROJECT")
64 | path_config_file = dlc.create_new_project(task, scorer, video, copy_videos=True)
65 |
66 | cfg = dlc.auxiliaryfunctions.read_config(path_config_file)
67 | cfg["numframes2pick"] = 5
68 | cfg["pcutoff"] = 0.01
69 | cfg["TrainingFraction"] = [0.8]
70 | cfg["skeleton"] = [["bodypart1", "bodypart2"], ["bodypart1", "bodypart3"]]
71 |
72 | dlc.auxiliaryfunctions.write_config(path_config_file, cfg)
73 |
74 | print("EXTRACTING FRAMES")
75 | dlc.extract_frames(path_config_file, mode="automatic", userfeedback=False)
76 |
77 | print("CREATING-SOME LABELS FOR THE FRAMES")
78 | frames = os.listdir(os.path.join(cfg["project_path"], "labeled-data", videoname))
79 | # As this next step is manual, we update the labels by putting them on the diagonal (fixed for all frames)
80 | for index, bodypart in enumerate(cfg["bodyparts"]):
81 | columnindex = pd.MultiIndex.from_product(
82 | [[scorer], [bodypart], ["x", "y"]], names=["scorer", "bodyparts", "coords"]
83 | )
84 | frame = pd.DataFrame(
85 | 100 + np.ones((len(frames), 2)) * 50 * index,
86 | columns=columnindex,
87 | index=[os.path.join("labeled-data", videoname, fn) for fn in frames],
88 | )
89 | if index == 0:
90 | dataFrame = frame
91 | else:
92 | dataFrame = pd.concat([dataFrame, frame], axis=1)
93 |
94 | dataFrame.to_csv(
95 | os.path.join(
96 | cfg["project_path"],
97 | "labeled-data",
98 | videoname,
99 | "CollectedData_" + scorer + ".csv",
100 | )
101 | )
102 | dataFrame.to_hdf(
103 | os.path.join(
104 | cfg["project_path"],
105 | "labeled-data",
106 | videoname,
107 | "CollectedData_" + scorer + ".h5",
108 | ),
109 | "df_with_missing",
110 | format="table",
111 | mode="w",
112 | )
113 |
114 | print("Plot labels...")
115 |
116 | dlc.check_labels(path_config_file)
117 |
118 | print("CREATING TRAININGSET")
119 | dlc.create_training_dataset(
120 | path_config_file, net_type=net_type, augmenter_type=augmenter_type
121 | )
122 |
123 | posefile = os.path.join(
124 | cfg["project_path"],
125 | "dlc-models/iteration-"
126 | + str(cfg["iteration"])
127 | + "/"
128 | + cfg["Task"]
129 | + cfg["date"]
130 | + "-trainset"
131 | + str(int(cfg["TrainingFraction"][0] * 100))
132 | + "shuffle"
133 | + str(1),
134 | "train/pose_cfg.yaml",
135 | )
136 |
137 | DLC_config = dlc.auxiliaryfunctions.read_plainconfig(posefile)
138 | DLC_config["save_iters"] = numiter
139 | DLC_config["display_iters"] = 2
140 | DLC_config["multi_step"] = [[0.001, numiter]]
141 |
142 | print("CHANGING training parameters to end quickly!")
143 | dlc.auxiliaryfunctions.write_plainconfig(posefile, DLC_config)
144 |
145 | print("TRAIN")
146 | dlc.train_network(path_config_file)
147 |
148 | print("EVALUATE")
149 | dlc.evaluate_network(path_config_file, plotting=True)
150 |
151 | videotest = os.path.join(cfg["project_path"], "videos", videoname + ".avi")
152 |
153 | print(videotest)
154 |
155 | # memory on CLI issues: #persists Nov 22 2020 -- one recieves a kill signal
156 | """
157 | print("VIDEO ANALYSIS")
158 | dlc.analyze_videos(path_config_file, [videotest], save_as_csv=True)
159 |
160 | print("CREATE VIDEO")
161 | dlc.create_labeled_video(path_config_file,[videotest], save_frames=False)
162 |
163 | print("Making plots")
164 | dlc.plot_trajectories(path_config_file,[videotest])
165 |
166 | print("CREATING TRAININGSET 2")
167 | dlc.create_training_dataset(path_config_file, Shuffles=[2],net_type=net_type,augmenter_type=augmenter_type2)
168 |
169 | cfg=dlc.auxiliaryfunctions.read_config(path_config_file)
170 | posefile=os.path.join(cfg['project_path'],'dlc-models/iteration-'+str(cfg['iteration'])+'/'+ cfg['Task'] + cfg['date'] + '-trainset' + str(int(cfg['TrainingFraction'][0] * 100)) + 'shuffle' + str(2),'train/pose_cfg.yaml')
171 | DLC_config=dlc.auxiliaryfunctions.read_plainconfig(posefile)
172 | DLC_config['save_iters']=numiter
173 | DLC_config['display_iters']=1
174 | DLC_config['multi_step']=[[0.001,numiter]]
175 |
176 | print("CHANGING training parameters to end quickly!")
177 | dlc.auxiliaryfunctions.write_config(posefile,DLC_config)
178 |
179 | print("TRAIN")
180 | dlc.train_network(path_config_file, shuffle=2,allow_growth=True)
181 |
182 | print("EVALUATE")
183 | dlc.evaluate_network(path_config_file,Shuffles=[2],plotting=False)
184 | """
185 |
186 | print("ANALYZING some individual frames")
187 | dlc.analyze_time_lapse_frames(
188 | path_config_file, os.path.join(cfg["project_path"], "labeled-data/reachingvideo1/")
189 | )
190 |
191 | print("Export model...")
192 | dlc.export_model(path_config_file, shuffle=1, make_tar=False)
193 |
194 | print(
195 | "ALL DONE!!! - default/imgaug cases of DLCcore training and evaluation are functional (no extract outlier or refinement tested)."
196 | )
197 |
--------------------------------------------------------------------------------