├── .flake8
├── .github
└── workflows
│ ├── dockerimage.yml
│ └── pythonpublish.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── docs
├── autogen.py
├── custom_theme
│ └── main.html
└── mkdocs.yml
├── keras_autodoc
├── __init__.py
├── autogen.py
├── docstring.py
├── examples.py
├── gathering_members.py
├── get_signatures.py
└── utils.py
├── setup.py
└── tests
├── Dockerfile
├── __init__.py
├── autogen_future.py
├── dummy_package
├── __init__.py
├── dummy_module.py
├── dummy_module2.py
└── expected.md
├── test_autogen.py
├── test_docstring.py
├── test_gathering_members.py
├── test_get_signature.py
├── test_integration.py
└── test_utils.py
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 |
3 | # imported but unused in __init__.py, that's ok.
4 | per-file-ignores = **/__init__.py:F401
5 |
6 | # black manages this
7 | max-line-length = 93
8 |
--------------------------------------------------------------------------------
/.github/workflows/dockerimage.yml:
--------------------------------------------------------------------------------
1 | name: Unit testing
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 |
7 | build:
8 |
9 | runs-on: ubuntu-latest
10 |
11 | steps:
12 | - uses: actions/checkout@v1
13 | - name: Build the Docker image
14 | run: docker build . --file tests/Dockerfile --tag test_image
15 |
--------------------------------------------------------------------------------
/.github/workflows/pythonpublish.yml:
--------------------------------------------------------------------------------
1 | name: Upload Python Package
2 |
3 | on:
4 | release:
5 | types: [created]
6 |
7 | jobs:
8 | deploy:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v2
12 | - name: Set up Python
13 | uses: actions/setup-python@v1
14 | with:
15 | python-version: '3.x'
16 | - name: Install dependencies
17 | run: |
18 | python -m pip install --upgrade pip
19 | pip install setuptools wheel twine
20 | - name: Build and publish
21 | env:
22 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
23 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
24 | run: |
25 | python setup.py sdist bdist_wheel
26 | twine upload dist/*
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 | .idea/
106 | docs/sources
107 | docs/site
108 |
109 | # IDE stuff
110 | .vscode/
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | We'd love to accept your patches and contributions to this project. There are
4 | just a few small guidelines you need to follow.
5 |
6 | ## Pull Request Guide
7 | Before you submit a pull request, check that it meets these guidelines:
8 |
9 | 1. Fork the repository. Create a new branch from the master branch.
10 |
11 | 2. Make a pull request from your new branch to the master branch of the original keras-autodoc repo. Give your pull request a **meaningful** name.
12 |
13 | 3. Include "resolves #issue_number" in the description of the pull request if applicable and briefly describe your contribution.
14 |
15 | 4. For the case of bug fixes, add new test cases which would fail before your bug fix.
16 |
17 | ## Code Style Guide
18 | This project tries to closely follow the official Python Style Guide detailed in [PEP8](https://www.python.org/dev/peps/pep-0008/). We use [Flake8](http://flake8.pycqa.org/en/latest/) to enforce it.
19 | The docstrings follow the [Google Python Style Guide](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#381-docstrings).
20 |
21 | ## Testing Guide
22 | [Pytest](https://docs.pytest.org/en/latest/) is used to write the unit tests.
23 | You should test your code by writing unit testing code in `tests` directory.
24 | The testing file name should be the `.py` file with a prefix of `test_` in the corresponding directory,
25 | e.g., the name should be `test_layers.py` if the code of which is to test `layer.py`.
26 |
27 | If you have Docker, you can easily run all tests without any setup by running `docker build -f tests/Dockerfile .` from the root directory.
28 |
29 | ## Pre-commit hook
30 |
31 | You can make git run `flake8` before every commit automatically. It will make you go faster by
32 | avoiding pushing commits which are not passing the flake8 tests. To do this,
33 | open `.git/hooks/pre-commit` with a text editor and write `flake8` inside. If the `flake8` test doesn't
34 | pass, the commit will be aborted.
35 |
36 | ## Code reviews
37 |
38 | All submissions, including submissions by project members, require review. We
39 | use GitHub pull requests for this purpose. Consult
40 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
41 | information on using pull requests.
42 |
43 | ## Community Guidelines
44 |
45 | This project follows [Google's Open Source Community
46 | Guidelines](https://opensource.google.com/conduct/).
47 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright 2019 The Keras team.
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # keras-autodoc
2 |
3 | 
4 |
5 |
6 | [Autodoc](http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html) for [mkdocs](https://www.mkdocs.org/).
7 |
8 | keras-autodoc will fetch the docstrings from the functions you wish to document and will insert them in the markdown files.
9 |
10 | Take a look at the [documentation](https://gabrieldemarmiesse.github.io/keras-autodoc/)!
11 |
12 | ### Install
13 |
14 | ```bash
15 | pip install keras-autodoc
16 | ```
17 |
18 | We recommend pinning the version (eg: `pip install keras-autodoc==0.3.2`). We may break compatibility without any warning.
19 |
20 | ### Example
21 |
22 | Let's suppose that you have a `docs` directory:
23 |
24 | ```
25 | ./docs
26 | |-- autogen.py
27 | |-- mkdocs.yml
28 | ```
29 |
30 | The API is quite simple:
31 |
32 | ```python
33 | # content of docs/autogen.py
34 |
35 | from keras_autodoc import DocumentationGenerator
36 |
37 |
38 | pages = {'layers/core.md': ['keras.layers.Dense', 'keras.layers.Flatten'],
39 | 'callbacks.md': ['keras.callbacks.TensorBoard']}
40 |
41 | doc_generator = DocumentationGenerator(pages)
42 | doc_generator.generate('./sources')
43 | ```
44 |
45 | ```yaml
46 | # content of docs/mkdocs.yml
47 |
48 | site_name: My_site
49 | docs_dir: sources
50 | site_description: 'My pretty site.'
51 |
52 | nav:
53 | - Core: layers/core.md
54 | - Callbacks:
55 | - Some callbacks: callbacks.md
56 | ```
57 |
58 | Then you just have to run:
59 |
60 | ```bash
61 | python autogen.py
62 | mkdocs serve
63 | ```
64 |
65 | and you'll be able to see your website at [localhost:8000/callbacks](http://localhost:8000/callbacks/).
66 |
67 | ### Docstring format:
68 |
69 | The docstrings used should use the The docstrings follow the [Google Python Style Guide](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#381-docstrings) with markdown, or just plain markdown.
70 |
71 | For example, let's take this class:
72 |
73 | ```python
74 | class ImageDataGenerator:
75 | """Generate batches of tensor image data with real-time data augmentation.
76 |
77 | The data will be looped over (in batches).
78 |
79 | # Arguments
80 | featurewise_center: Boolean.
81 | Set input mean to 0 over the dataset, feature-wise.
82 | zca_whitening: Boolean. Apply ZCA whitening.
83 | width_shift_range: Float, 1-D array-like or int
84 | - float: fraction of total width, if < 1, or pixels if >= 1.
85 | - 1-D array-like: random elements from the array.
86 | - int: integer number of pixels from interval
87 | `(-width_shift_range, +width_shift_range)`
88 | - With `width_shift_range=2` possible values
89 | are integers `[-1, 0, +1]`,
90 | same as with `width_shift_range=[-1, 0, +1]`,
91 | while with `width_shift_range=1.0` possible values are floats
92 | in the interval `[-1.0, +1.0)`.
93 |
94 | # Examples
95 |
96 | Example of using `.flow(x, y)`:
97 | ```python
98 | datagen = ImageDataGenerator(
99 | featurewise_center=True,
100 | zca_whitening=True,
101 | width_shift_range=0.2)
102 | # compute quantities required for featurewise normalization
103 | # (std, mean, and principal components if ZCA whitening is applied)
104 | datagen.fit(x_train)
105 | # fits the model on batches with real-time data augmentation:
106 | model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
107 | steps_per_epoch=len(x_train) / 32, epochs=epochs)
108 | ```
109 | """
110 |
111 | def __init__(self,featurewise_center, zca_whitening, width_shift_range):
112 | pass
113 | ```
114 |
115 | will be rendered as:
116 |
117 | ### ImageDataGenerator class:
118 |
119 | ```python
120 | dummy_module.ImageDataGenerator(featurewise_center, zca_whitening, width_shift_range=0.0)
121 | ```
122 |
123 | Generate batches of tensor image data with real-time data augmentation.
124 |
125 | The data will be looped over (in batches).
126 |
127 | __Arguments__
128 |
129 | - __featurewise_center__: Boolean.
130 | Set input mean to 0 over the dataset, feature-wise.
131 | - __zca_whitening__: Boolean. Apply ZCA whitening.
132 | - __width_shift_range__: Float, 1-D array-like or int
133 | - float: fraction of total width, if < 1, or pixels if >= 1.
134 | - 1-D array-like: random elements from the array.
135 | - int: integer number of pixels from interval
136 | `(-width_shift_range, +width_shift_range)`
137 | - With `width_shift_range=2` possible values
138 | are integers `[-1, 0, +1]`,
139 | same as with `width_shift_range=[-1, 0, +1]`,
140 | while with `width_shift_range=1.0` possible values are floats
141 | in the interval `[-1.0, +1.0)`.
142 |
143 | __Examples__
144 |
145 |
146 | Example of using `.flow(x, y)`:
147 | ```python
148 | datagen = ImageDataGenerator(
149 | featurewise_center=True,
150 | zca_whitening=True,
151 | width_shift_range=0.2)
152 | # compute quantities required for featurewise normalization
153 | # (std, mean, and principal components if ZCA whitening is applied)
154 | datagen.fit(x_train)
155 | # fits the model on batches with real-time data augmentation:
156 | model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
157 | steps_per_epoch=len(x_train) / 32, epochs=epochs)
158 | ```
159 |
160 | ### Take a look at our docs
161 |
162 | If you want examples, you can take a look at [the docs directory of autokeras](https://github.com/keras-team/autokeras/tree/master/docs) as well as [the generated docs](https://autokeras.com/).
163 |
164 | You can also look at [the docs directory of keras-tuner](https://github.com/keras-team/keras-tuner/tree/master/docs).
165 |
--------------------------------------------------------------------------------
/docs/autogen.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 | import shutil
3 |
4 | import keras_autodoc
5 |
6 |
7 | PAGES = {
8 | 'documentation_generator.md': [
9 | 'keras_autodoc.DocumentationGenerator',
10 | 'keras_autodoc.DocumentationGenerator.generate',
11 | ],
12 | 'automatic_gathering.md': [
13 | 'keras_autodoc.get_functions',
14 | 'keras_autodoc.get_classes',
15 | 'keras_autodoc.get_methods',
16 | ]
17 | }
18 |
19 |
20 | keras_autodoc_dir = pathlib.Path(__file__).resolve().parents[1]
21 |
22 |
23 | def generate(dest_dir):
24 | doc_generator = keras_autodoc.DocumentationGenerator(
25 | PAGES,
26 | 'https://github.com/keras-team/keras-autodoc/blob/master',
27 | )
28 | doc_generator.generate(dest_dir)
29 | shutil.copyfile(keras_autodoc_dir / 'README.md', dest_dir / 'index.md')
30 |
31 |
32 | if __name__ == '__main__':
33 | generate(keras_autodoc_dir / 'docs' / 'sources')
34 |
--------------------------------------------------------------------------------
/docs/custom_theme/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block content %}
4 |
{% include "content.html" %}
5 | {% endblock %}
6 |
--------------------------------------------------------------------------------
/docs/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Keras Autodoc
2 | theme:
3 | name: mkdocs
4 | custom_dir: custom_theme
5 | docs_dir: sources
6 | repo_url: https://github.com/keras-team/keras-autodoc
7 | site_description: 'Documentation for Keras Autodoc.'
8 |
9 | nav:
10 | - Home: index.md
11 | - Documentation generator: documentation_generator.md
12 | - Automatic gathering: automatic_gathering.md
13 |
--------------------------------------------------------------------------------
/keras_autodoc/__init__.py:
--------------------------------------------------------------------------------
1 | from .autogen import DocumentationGenerator
2 | from .gathering_members import get_methods
3 | from .gathering_members import get_classes
4 | from .gathering_members import get_functions
5 |
--------------------------------------------------------------------------------
/keras_autodoc/autogen.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import pathlib
3 | from inspect import getdoc, isclass
4 | from typing import Dict, Union, List, get_type_hints
5 |
6 | from .docstring import process_docstring
7 | from .examples import copy_examples
8 | from .get_signatures import get_signature
9 |
10 | from . import utils
11 |
12 |
13 | class DocumentationGenerator:
14 | """Generates the documentation.
15 |
16 | # Arguments
17 |
18 | pages: A dictionary. The keys are the files' paths, the values
19 | are lists of strings, functions /classes / methods names
20 | with dotted access to the object. For example,
21 | `pages = {'my_file.md': ['keras.layers.Dense']}` is valid.
22 | project_url: The url pointing to the module directory of your project on
23 | GitHub. This will be used to make a `[Sources]` link.
24 | template_dir: Where to put the markdown files which will be copied and
25 | filled in the destination directory. You should put files like
26 | `index.md` inside. If you want a markdown file to be filled with
27 | the docstring of a function, use the `{{autogenerated}}` tag inside,
28 | and then add the markdown file to the `pages` dictionary.
29 | example_dir: Where you store examples in your project. Usually standalone
30 | files with a markdown docstring at the top. Will be inserted in the docs.
31 | extra_aliases: When displaying type hints, it's possible that the full
32 | dotted path is displayed instead of alias. The aliases present in
33 | `pages` are used, but it may happen if you're using a third-party library.
34 | For example `tensorflow.python.ops.variables.Variable` is displayed instead
35 | of `tensorflow.Variable`. Here you have two solutions, either you provide
36 | the import keras-autodoc should follow:
37 | `extra_aliases=["tensorflow.Variable"]`, either you provide a mapping to use
38 | `extra_aliases={"tensorflow.python.ops.variables.Variable": "tf.Variable"}`.
39 | The second option should be used if you want more control and that you
40 | don't want to respect the alias corresponding to the import (you can't do
41 | `import tf.Variable`). When giving a list, keras-autodoc will try to import
42 | the object from the string to understand what object you want to replace.
43 | max_signature_line_length: When displaying class and function signatures,
44 | keras-autodoc formats them using Black. This parameter controls the
45 | maximum line length of these signatures, and is passed directly through
46 | to Black.
47 | titles_size: `"#"` signs to put before a title in the generated markdown.
48 | """
49 | def __init__(self,
50 | pages: Dict[str, list] = {},
51 | project_url: Union[str, Dict[str, str]] = None,
52 | template_dir=None,
53 | examples_dir=None,
54 | extra_aliases: Union[List[str], Dict[str, str]] = None,
55 | max_signature_line_length: int = 110,
56 | titles_size="###"):
57 | self.pages = pages
58 | self.project_url = project_url
59 | self.template_dir = template_dir
60 | self.examples_dir = examples_dir
61 | self.class_aliases = {}
62 | self._fill_aliases(extra_aliases)
63 | self.max_signature_line_length = max_signature_line_length
64 | self.titles_size = titles_size
65 |
66 | def generate(self, dest_dir):
67 | """Generate the docs.
68 |
69 | # Arguments
70 |
71 | dest_dir: Where to put the resulting markdown files.
72 | """
73 | dest_dir = pathlib.Path(dest_dir)
74 | print("Cleaning up existing sources directory.")
75 | if dest_dir.exists():
76 | shutil.rmtree(dest_dir)
77 |
78 | print("Populating sources directory with templates.")
79 | if self.template_dir:
80 | shutil.copytree(self.template_dir, dest_dir)
81 |
82 | for file_path, elements in self.pages.items():
83 | markdown_text = ''
84 | for element in elements:
85 | markdown_text += self._render(element)
86 | utils.insert_in_file(markdown_text, dest_dir / file_path)
87 |
88 | if self.examples_dir is not None:
89 | copy_examples(self.examples_dir, dest_dir / "examples")
90 |
91 | def process_docstring(self, docstring, types: dict = None):
92 | """Can be overridden."""
93 | processsed = process_docstring(docstring, types, self.class_aliases)
94 | return processsed
95 |
96 | def process_signature(self, signature):
97 | """Can be overridden."""
98 | return signature
99 |
100 | def _render(self, element):
101 | if isinstance(element, str):
102 | object_ = utils.import_object(element)
103 | if utils.ismethod(object_):
104 | # we remove the modules when displaying the methods
105 | signature_override = '.'.join(element.split('.')[-2:])
106 | else:
107 | signature_override = element
108 | else:
109 | signature_override = None
110 | object_ = element
111 |
112 | return self._render_from_object(object_, signature_override)
113 |
114 | def _render_from_object(self, object_, signature_override: str):
115 | subblocks = []
116 | if self.project_url is not None:
117 | subblocks.append(utils.make_source_link(object_, self.project_url))
118 | signature = get_signature(
119 | object_, signature_override, self.max_signature_line_length
120 | )
121 | signature = self.process_signature(signature)
122 | subblocks.append(f"{self.titles_size} {object_.__name__}\n")
123 | subblocks.append(utils.code_snippet(signature))
124 |
125 | docstring = getdoc(object_)
126 | if docstring:
127 | if isclass(object_):
128 | type_hints = get_type_hints(object_.__init__)
129 | else:
130 | type_hints = get_type_hints(object_)
131 | docstring = self.process_docstring(docstring, type_hints)
132 | subblocks.append(docstring)
133 | return "\n\n".join(subblocks) + '\n\n----\n\n'
134 |
135 | def _fill_aliases(self, extra_aliases):
136 | for list_elements in self.pages.values():
137 | for element_as_str in list_elements:
138 | element = utils.import_object(element_as_str)
139 | if not isclass(element):
140 | continue
141 | true_dotted_path = utils.get_dotted_path(element)
142 | self.class_aliases[true_dotted_path] = element_as_str
143 |
144 | if isinstance(extra_aliases, dict):
145 | self.class_aliases.update(extra_aliases)
146 | elif isinstance(extra_aliases, list):
147 | for alias in extra_aliases:
148 | full_dotted_path = utils.get_dotted_path(utils.import_object(alias))
149 | self.class_aliases[full_dotted_path] = alias
150 |
--------------------------------------------------------------------------------
/keras_autodoc/docstring.py:
--------------------------------------------------------------------------------
1 | import re
2 | import itertools
3 | from sphinx.util.typing import stringify
4 |
5 | from . import utils
6 |
7 |
8 | def get_code_blocks(docstring):
9 | code_blocks = {}
10 | tmp = docstring[:]
11 | while "```" in tmp:
12 | tmp = tmp[tmp.find("```"):]
13 | index = tmp[3:].find("```") + 6
14 | snippet = tmp[:index]
15 | # Place marker in docstring for later reinjection.
16 | token = f'$KERAS_AUTODOC_CODE_BLOCK_{len(code_blocks)}'
17 | docstring = docstring.replace(snippet, token)
18 | code_blocks[token] = snippet
19 | tmp = tmp[index:]
20 |
21 | return code_blocks, docstring
22 |
23 |
24 | def get_section_end(docstring, section_start):
25 | regex_indented_sections_end = re.compile(r'\S\n+(\S|$)')
26 | end = re.search(regex_indented_sections_end, docstring[section_start:])
27 | section_end = section_start + end.end()
28 | if section_end == len(docstring):
29 | return section_end
30 | else:
31 | return section_end - 2
32 |
33 |
34 | def get_google_style_sections_without_code(docstring):
35 | regex_indented_sections_start = re.compile(r'\n# .+?\n')
36 |
37 | google_style_sections = {}
38 | for i in itertools.count():
39 | match = re.search(regex_indented_sections_start, docstring)
40 | if match is None:
41 | break
42 | section_start = match.start() + 1
43 | section_end = get_section_end(docstring, section_start)
44 | google_style_section = docstring[section_start:section_end]
45 | token = f'KERAS_AUTODOC_GOOGLE_STYLE_SECTION_{i}'
46 | google_style_sections[token] = google_style_section
47 | docstring = utils.insert_in_string(docstring, token,
48 | section_start, section_end)
49 | return google_style_sections, docstring
50 |
51 |
52 | def get_google_style_sections(docstring):
53 | # First, extract code blocks and process them.
54 | # The parsing is easier if the #, : and other symbols aren't there.
55 | code_blocks, docstring = get_code_blocks(docstring)
56 |
57 | google_style_sections, docstring = \
58 | get_google_style_sections_without_code(docstring)
59 |
60 | docstring = reinject_strings(docstring, code_blocks)
61 | for section_token, section in google_style_sections.items():
62 | google_style_sections[section_token] = reinject_strings(section, code_blocks)
63 | return google_style_sections, docstring
64 |
65 |
66 | def to_markdown(google_style_section: str, types: dict = None, aliases=None) -> str:
67 | end_first_line = google_style_section.find('\n')
68 | section_title = google_style_section[2:end_first_line]
69 | section_body = google_style_section[end_first_line + 1:]
70 | section_body = utils.remove_indentation(section_body.strip())
71 |
72 | # it's a list of elements, a special formatting is applied.
73 | if section_title == "Arguments":
74 | section_body = format_as_markdown_list(section_body, types, aliases)
75 | elif section_title in ('Attributes', 'Raises'):
76 | section_body = format_as_markdown_list(section_body)
77 |
78 | if section_body:
79 | return f'__{section_title}__\n\n{section_body}\n'
80 | else:
81 | return f'__{section_title}__\n'
82 |
83 |
84 | def format_as_markdown_list(section_body, types: dict = None, aliases: dict = None):
85 | section_body = re.sub(r'\n([^ ].*?):', r'\n- __\1__:', section_body)
86 | section_body = re.sub(r'^([^ ].*?):', r'- __\1__:', section_body)
87 |
88 | # Optionally add type annotations to docstring
89 | if types:
90 | for arg, arg_type in types.items():
91 | type_hint_str = apply_aliases(stringify(arg_type), aliases)
92 | section_body = re.sub(
93 | rf"(- __{arg}__)", rf"\1 `{type_hint_str}`", section_body
94 | )
95 |
96 | return section_body
97 |
98 |
99 | def apply_aliases(string: str, aliases: dict):
100 | for dotted_path, alias in aliases.items():
101 | string = string.replace(dotted_path, alias)
102 | return string
103 |
104 |
105 | def reinject_strings(target, strings_to_inject):
106 | for token, string_to_inject in strings_to_inject.items():
107 | target = target.replace(token, string_to_inject)
108 | return target
109 |
110 |
111 | def process_docstring(docstring, types: dict = None, aliases=None):
112 | if docstring[-1] != '\n':
113 | docstring += '\n'
114 | google_style_sections, docstring = get_google_style_sections(docstring)
115 |
116 | for token, google_style_section in google_style_sections.items():
117 | markdown_section = to_markdown(google_style_section, types, aliases)
118 | docstring = docstring.replace(token, markdown_section)
119 | return docstring
120 |
--------------------------------------------------------------------------------
/keras_autodoc/examples.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 | import os
3 |
4 |
5 | def copy_examples(examples_dir, destination_dir):
6 | """Copy the examples directory in the documentation.
7 |
8 | Prettify files by extracting the docstrings written in Markdown.
9 | """
10 | pathlib.Path(destination_dir).mkdir(exist_ok=True)
11 | for file in os.listdir(examples_dir):
12 | if not file.endswith(".py"):
13 | continue
14 | module_path = os.path.join(examples_dir, file)
15 | docstring, starting_line = get_module_docstring(module_path)
16 | destination_file = os.path.join(destination_dir, file[:-2] + "md")
17 | with open(destination_file, "w+", encoding="utf-8") as f_out, \
18 | open(examples_dir / file, "r+", encoding="utf-8") as f_in:
19 |
20 | f_out.write(docstring + "\n\n")
21 |
22 | # skip docstring
23 | for _ in range(starting_line):
24 | next(f_in)
25 |
26 | f_out.write("```python\n")
27 | # next line might be empty.
28 | line = next(f_in)
29 | if line != "\n":
30 | f_out.write(line)
31 |
32 | # copy the rest of the file.
33 | for line in f_in:
34 | f_out.write(line)
35 | f_out.write("```")
36 |
37 |
38 | def get_module_docstring(filepath):
39 | """Extract the module docstring.
40 |
41 | Also finds the line at which the docstring ends.
42 | """
43 | co = compile(open(filepath, encoding="utf-8").read(), filepath, "exec")
44 | if co.co_consts and isinstance(co.co_consts[0], str):
45 | docstring = co.co_consts[0]
46 | else:
47 | print("Could not get the docstring from " + filepath)
48 | docstring = ""
49 | return docstring, co.co_firstlineno
50 |
--------------------------------------------------------------------------------
/keras_autodoc/gathering_members.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | from inspect import isclass, isfunction, isroutine
3 | from typing import List
4 | from .utils import import_object
5 |
6 |
7 | def get_classes(module,
8 | exclude: List[str] = None,
9 | return_strings: bool = True):
10 | """Get all the classes of a module.
11 |
12 | # Arguments
13 |
14 | module: The module to fetch the classes from. If it's a
15 | string, it should be in the dotted format. `'keras.layers'` for example.
16 | exclude: The names which will be excluded from the returned list. For
17 | example, `get_classes('keras.layers', exclude=['Dense', 'Conv2D'])`.
18 | return_strings: If False, the actual classes will be returned. Note that
19 | if you use aliases when building your docs, you should use strings.
20 | This is because the computed signature uses
21 | `__name__` and `__module__` if you don't provide a string as input.
22 |
23 | # Returns
24 |
25 | A list of strings or a list of classes.
26 | """
27 | return _get_all_module_element(module, exclude, return_strings, True)
28 |
29 |
30 | def get_functions(module,
31 | exclude: List[str] = None,
32 | return_strings: bool = True):
33 | """Get all the functions of a module.
34 |
35 | # Arguments
36 |
37 | module: The module to fetch the functions from. If it's a
38 | string, it should be in the dotted format. `'keras.backend'` for example.
39 | exclude: The names which will be excluded from the returned list. For
40 | example, `get_functions('keras.backend', exclude=['max'])`.
41 | return_strings: If False, the actual functions will be returned. Note that
42 | if you use aliases when building your docs, you should use strings.
43 | This is because the computed signature uses
44 | `__name__` and `__module__` if you don't provide a string as input.
45 |
46 | # Returns
47 |
48 | A list of strings or a list of functions.
49 | """
50 | return _get_all_module_element(module, exclude, return_strings, False)
51 |
52 |
53 | def get_methods(cls, exclude=None, return_strings=True):
54 | """Get all the method of a class.
55 |
56 | # Arguments
57 |
58 | cls: The class to fetch the methods from. If it's a
59 | string, it should be in the dotted format. `'keras.layers.Dense'`
60 | for example.
61 | exclude: The names which will be excluded from the returned list. For
62 | example, `get_methods('keras.Model', exclude=['save'])`.
63 | return_strings: If False, the actual methods will be returned. Note that
64 | if you use aliases when building your docs, you should use strings.
65 | This is because the computed signature uses
66 | `__name__` and `__module__` if you don't provide a string as input.
67 |
68 | # Returns
69 |
70 | A list of strings or a list of methods.
71 | """
72 | if isinstance(cls, str):
73 | cls_str = cls
74 | cls = import_object(cls)
75 | else:
76 | cls_str = f'{cls.__module__}.{cls.__name__}'
77 | exclude = exclude or []
78 | methods = []
79 | for _, method in inspect.getmembers(cls, predicate=isroutine):
80 | if method.__name__[0] == "_" or method.__name__ in exclude:
81 | continue
82 | if return_strings:
83 | methods.append(f'{cls_str}.{method.__name__}')
84 | else:
85 | methods.append(method)
86 | return methods
87 |
88 |
89 | def _get_all_module_element(module, exclude, return_strings, class_):
90 | if isinstance(module, str):
91 | module = import_object(module)
92 | exclude = exclude or []
93 | module_data = []
94 | for name in dir(module):
95 | module_member = getattr(module, name)
96 | if not (isfunction(module_member) or isclass(module_member)):
97 | continue
98 | if name[0] == "_" or name in exclude:
99 | continue
100 | if module.__name__ not in module_member.__module__:
101 | continue
102 | if module_member in module_data:
103 | continue
104 | if class_ and not isclass(module_member):
105 | continue
106 | if not class_ and not isfunction(module_member):
107 | continue
108 | if return_strings:
109 | module_data.append(f'{module.__name__}.{name}')
110 | else:
111 | module_data.append(module_member)
112 | module_data.sort(key=id)
113 | return module_data
114 |
--------------------------------------------------------------------------------
/keras_autodoc/get_signatures.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | from . import utils
3 | import black
4 | import inspect
5 | from sphinx.util.inspect import signature
6 | from sphinx.util.inspect import stringify_signature
7 |
8 |
9 | def get_signature_start(function):
10 | """For the Dense layer, it should return the string 'keras.layers.Dense'"""
11 | if utils.ismethod(function):
12 | prefix = f'{utils.get_class_from_method(function).__name__}.'
13 | else:
14 | try:
15 | prefix = f'{function.__module__}.'
16 | except AttributeError:
17 | warnings.warn(f'function {function} has no module. '
18 | f'It will not be included in the signature.')
19 | prefix = ''
20 |
21 | return f'{prefix}{function.__name__}'
22 |
23 |
24 | def get_signature_end(function):
25 | sig = signature(function)
26 | signature_end = stringify_signature(sig, show_annotation=False)
27 | if utils.ismethod(function):
28 | signature_end = signature_end.replace('(self, ', '(')
29 | signature_end = signature_end.replace('(self)', '()')
30 | return signature_end
31 |
32 |
33 | def get_function_signature(function, override=None, max_line_length: int = 110):
34 | if override is None:
35 | signature_start = get_signature_start(function)
36 | else:
37 | signature_start = override
38 | signature_end = get_signature_end(function)
39 | return format_signature(signature_start, signature_end, max_line_length)
40 |
41 |
42 | def get_class_signature(cls, override=None, max_line_length: int = 110):
43 | if override is None:
44 | signature_start = f'{cls.__module__}.{cls.__name__}'
45 | else:
46 | signature_start = override
47 | signature_end = get_signature_end(cls.__init__)
48 | return format_signature(signature_start, signature_end, max_line_length)
49 |
50 |
51 | def get_signature(object_, override=None, max_line_length: int = 110):
52 | if inspect.isclass(object_):
53 | return get_class_signature(object_, override, max_line_length)
54 | else:
55 | return get_function_signature(object_, override, max_line_length)
56 |
57 |
58 | def format_signature(
59 | signature_start: str, signature_end: str, max_line_length: int = 110
60 | ):
61 | """pretty formatting to avoid long signatures on one single line"""
62 |
63 | # first, we make it look like a real function declaration.
64 | fake_signature_start = 'x' * len(signature_start)
65 | fake_signature = fake_signature_start + signature_end
66 | fake_python_code = f'def {fake_signature}:\n pass\n'
67 |
68 | # we format with black
69 | mode = black.FileMode(line_length=max_line_length)
70 | formatted_fake_python_code = black.format_str(fake_python_code, mode=mode)
71 |
72 | # we make the final, multiline signature
73 | new_signature_end = extract_signature_end(formatted_fake_python_code)
74 | return signature_start + new_signature_end
75 |
76 |
77 | def extract_signature_end(function_definition):
78 | start = function_definition.find('(')
79 | stop = function_definition.rfind(')')
80 | return function_definition[start: stop + 1]
81 |
--------------------------------------------------------------------------------
/keras_autodoc/utils.py:
--------------------------------------------------------------------------------
1 | import re
2 | import os
3 | import inspect
4 | import importlib
5 |
6 |
7 | def count_leading_spaces(s):
8 | ws = re.search(r"\S", s)
9 | if ws:
10 | return ws.start()
11 | else:
12 | return 0
13 |
14 |
15 | def insert_in_file(markdown_text, file_path):
16 | """Save module page.
17 |
18 | Either insert content into existing page,
19 | or create page otherwise."""
20 | if file_path.exists():
21 | template = file_path.read_text(encoding="utf-8")
22 | if "{{autogenerated}}" not in template:
23 | raise RuntimeError(f"Template found for {file_path} but missing "
24 | f"{{autogenerated}} tag.")
25 | markdown_text = template.replace("{{autogenerated}}", markdown_text)
26 | print("...inserting autogenerated content into template:", file_path)
27 | else:
28 | print("...creating new page with autogenerated content:", file_path)
29 | os.makedirs(file_path.parent, exist_ok=True)
30 | file_path.write_text(markdown_text, encoding="utf-8")
31 |
32 |
33 | def code_snippet(snippet):
34 | return f'```python\n{snippet}\n```\n'
35 |
36 |
37 | def make_source_link(cls, project_url):
38 | if isinstance(project_url, dict):
39 | base_module = cls.__module__.split('.')[0]
40 | project_url = project_url[base_module]
41 | path = cls.__module__.replace(".", "/")
42 | line = inspect.getsourcelines(cls)[-1]
43 | return (f''
44 | f'[[source]]({project_url}/{path}.py#L{line})'
45 | f'')
46 |
47 |
48 | def format_classes_list(classes, page_name):
49 | for i in range(len(classes)):
50 | if not isinstance(classes[i], (list, tuple)):
51 | classes[i] = (classes[i], [])
52 | for class_, class_methods in classes:
53 | if not inspect.isclass(class_):
54 | # TODO: add a test for this
55 | raise TypeError(f'{class_} was given in the class list '
56 | f'of {page_name} but {class_} is not a Python class.')
57 | return classes
58 |
59 |
60 | def get_class_from_method(meth):
61 | """See
62 | https://stackoverflow.com/questions/3589311/
63 | get-defining-class-of-unbound-method-object-in-python-3/
64 | 25959545#25959545
65 | """
66 | if inspect.ismethod(meth):
67 | for cls in inspect.getmro(meth.__self__.__class__):
68 | if cls.__dict__.get(meth.__name__) is meth:
69 | return cls
70 | meth = meth.__func__ # fallback to __qualname__ parsing
71 | if inspect.isfunction(meth):
72 | cls = getattr(inspect.getmodule(meth),
73 | meth.__qualname__.split('.', 1)[0].rsplit('.', 1)[0])
74 | if isinstance(cls, type):
75 | return cls
76 | return getattr(meth, '__objclass__', None) # handle special descriptor objects
77 |
78 |
79 | def ismethod(function):
80 | return get_class_from_method(function) is not None
81 |
82 |
83 | def import_object(string: str):
84 | """Import an object from a string.
85 |
86 | The object can be a function, class or method.
87 | For example: `'keras.layers.Dense.get_weights'` is valid.
88 | """
89 | last_object_got = None
90 | seen_names = []
91 | for name in string.split('.'):
92 | seen_names.append(name)
93 | try:
94 | last_object_got = importlib.import_module('.'.join(seen_names))
95 | except ModuleNotFoundError:
96 | last_object_got = getattr(last_object_got, name)
97 | return last_object_got
98 |
99 |
100 | def get_type(object_) -> str:
101 | if inspect.isclass(object_):
102 | return 'class'
103 | elif ismethod(object_):
104 | return 'method'
105 | elif inspect.isfunction(object_):
106 | return 'function'
107 | else:
108 | raise TypeError(f'{object_} is detected as neither a class, a method nor'
109 | f'a function.')
110 |
111 |
112 | def insert_in_string(target, string_to_insert, start, end):
113 | target_start_cut = target[:start]
114 | target_end_cut = target[end:]
115 | return target_start_cut + string_to_insert + target_end_cut
116 |
117 |
118 | def remove_indentation(string):
119 | string = string.replace('\n ', '\n')
120 | if string[:4] == ' ':
121 | string = string[4:]
122 | return string
123 |
124 |
125 | def get_dotted_path(class_):
126 | return f'{class_.__module__}.{class_.__qualname__}'
127 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from setuptools import find_packages
3 | from pathlib import Path
4 |
5 | this_file = Path(__file__).resolve()
6 | readme = this_file.parent / 'README.md'
7 |
8 |
9 | setup(
10 | name='keras-autodoc',
11 | version='0.8.0',
12 | packages=find_packages(),
13 | install_requires=['markdown', 'sphinx', 'black==22.8.0'],
14 | package_data={'': ['README.md']},
15 | author='The Keras team',
16 | author_email='gabrieldemarmiesse@gmail.com',
17 | description='Building the Keras projects docs.',
18 | long_description=readme.read_text(encoding='utf-8'),
19 | long_description_content_type='text/markdown',
20 | url='https://github.com/keras-team/keras-autodoc',
21 | license='Apache License 2.0',
22 | extras_require={'tests': ['pytest', 'pytest-pep8', 'keras-tuner', 'tensorflow>=2.9.1']},
23 | classifiers=[
24 | 'Natural Language :: English',
25 | 'Operating System :: OS Independent',
26 | 'Programming Language :: Python :: 3.6',
27 | 'Programming Language :: Python :: 3.7',
28 | 'Programming Language :: Python :: 3.8',
29 | 'Programming Language :: Python :: 3.9',
30 | 'Programming Language :: Python :: 3.10',
31 | 'Topic :: Utilities',
32 | 'Topic :: Documentation',
33 | 'Topic :: Software Development :: Libraries :: Python Modules',
34 | 'License :: OSI Approved :: Apache Software License'
35 | ]
36 | )
37 |
--------------------------------------------------------------------------------
/tests/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7 as base_image
2 |
3 | RUN pip install tensorflow
4 | RUN pip install markdown mkdocs pytest sphinx
5 |
6 | FROM base_image as integration_tests
7 | COPY ./ ./keras-autodoc
8 | RUN pip install -e "./keras-autodoc[tests]"
9 | WORKDIR keras-autodoc
10 | RUN pytest tests/test_integration.py
11 |
12 | FROM base_image as unit_tests_py
13 | COPY ./ ./keras-autodoc
14 | RUN pip install -e ./keras-autodoc
15 | WORKDIR keras-autodoc
16 | RUN pytest --ignore=tests/test_integration.py tests/
17 |
18 | FROM base_image as flake8_tests
19 | RUN pip install flake8
20 | COPY ./ ./keras-autodoc
21 | WORKDIR keras-autodoc
22 | RUN flake8 .
23 |
24 | FROM base_image as doc_tests
25 | COPY ./ ./keras-autodoc
26 | RUN pip install -e keras-autodoc
27 | WORKDIR keras-autodoc/docs
28 | RUN python autogen.py
29 | RUN mkdocs build
30 |
31 | FROM base_image as test_release
32 | COPY ./ ./keras-autodoc
33 | RUN pip install ./keras-autodoc
34 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keras-team/keras-autodoc/c905423a13253ad0af4d0948abc216c0fc07dbf7/tests/__init__.py
--------------------------------------------------------------------------------
/tests/autogen_future.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from keras_autodoc import autogen
4 |
5 | from . import dummy_package
6 |
7 |
8 | def doing_things(an_argument: dummy_package.DataGenerator):
9 | """A function
10 |
11 | # Arguments
12 | an_argument: Some generator
13 |
14 | """
15 |
16 |
17 | def test_rendinging_with_extra_alias():
18 | extra_aliases = ["tests.dummy_package.DataGenerator"]
19 | generated = autogen.DocumentationGenerator(extra_aliases=extra_aliases)._render(
20 | doing_things)
21 | assert "- __an_argument__ `tests.dummy_package.DataGenerator`: Some" in generated
22 |
--------------------------------------------------------------------------------
/tests/dummy_package/__init__.py:
--------------------------------------------------------------------------------
1 | from .dummy_module import to_categorical, ImageDataGenerator
2 |
3 | DataGenerator = ImageDataGenerator
4 |
--------------------------------------------------------------------------------
/tests/dummy_package/dummy_module.py:
--------------------------------------------------------------------------------
1 | class Dense:
2 | """Just your regular densely-connected NN layer.
3 |
4 | `Dense` implements the operation:
5 | `output = activation(dot(input, kernel) + bias)`
6 | where `activation` is the element-wise activation function
7 | passed as the `activation` argument, `kernel` is a weights matrix
8 | created by the layer, and `bias` is a bias vector created by the layer
9 | (only applicable if `use_bias` is `True`).
10 |
11 | Note: if the input to the layer has a rank greater than 2, then
12 | it is flattened prior to the initial dot product with `kernel`.
13 |
14 | # Example
15 |
16 | ```python
17 | # as first layer in a sequential model:
18 | model = Sequential()
19 | model.add(Dense(32, input_shape=(16,)))
20 | # now the model will take as input arrays of shape (*, 16)
21 | # and output arrays of shape (*, 32)
22 |
23 | # after the first layer, you don't need to specify
24 | # the size of the input anymore:
25 | model.add(Dense(32))
26 | ```
27 |
28 | # Arguments
29 | units: Positive integer, dimensionality of the output space.
30 | activation: Activation function to use
31 | (see [activations](../activations.md)).
32 | If you don't specify anything, no activation is applied
33 | (ie. "linear" activation: `a(x) = x`).
34 | use_bias: Boolean, whether the layer uses a bias vector.
35 | kernel_initializer: Initializer for the `kernel` weights matrix
36 | (see [initializers](../initializers.md)).
37 | bias_initializer: Initializer for the bias vector
38 | (see [initializers](../initializers.md)).
39 | kernel_regularizer: Regularizer function applied to
40 | the `kernel` weights matrix
41 | (see [regularizer](../regularizers.md)).
42 | bias_regularizer: Regularizer function applied to the bias vector
43 | (see [regularizer](../regularizers.md)).
44 | activity_regularizer: Regularizer function applied to
45 | the output of the layer (its "activation").
46 | (see [regularizer](../regularizers.md)).
47 | kernel_constraint: Constraint function applied to
48 | the `kernel` weights matrix
49 | (see [constraints](../constraints.md)).
50 | bias_constraint: Constraint function applied to the bias vector
51 | (see [constraints](../constraints.md)).
52 |
53 | # Input shape
54 | nD tensor with shape: `(batch_size, ..., input_dim)`.
55 | The most common situation would be
56 | a 2D input with shape `(batch_size, input_dim)`.
57 |
58 | # Output shape
59 | nD tensor with shape: `(batch_size, ..., units)`.
60 | For instance, for a 2D input with shape `(batch_size, input_dim)`,
61 | the output would have shape `(batch_size, units)`.
62 | """
63 |
64 | def __init__(self, units,
65 | activation=None,
66 | use_bias=True,
67 | kernel_initializer='glorot_uniform',
68 | bias_initializer='zeros',
69 | kernel_regularizer=None,
70 | bias_regularizer=None,
71 | activity_regularizer=None,
72 | kernel_constraint=None,
73 | bias_constraint=None,
74 | **kwargs):
75 | ...
76 |
77 |
78 | def to_categorical(y, num_classes=None, dtype='float32'):
79 | """Converts a class vector (integers) to binary class matrix.
80 |
81 | E.g. for use with categorical_crossentropy.
82 |
83 | # Arguments
84 | y: class vector to be converted into a matrix
85 | (integers from 0 to num_classes).
86 | num_classes: total number of classes.
87 | dtype: The data type expected by the input, as a string
88 | (`float32`, `float64`, `int32`...)
89 |
90 | # Returns
91 | A binary matrix representation of the input. The classes axis
92 | is placed last.
93 |
94 | # Example
95 |
96 | ```python
97 | # Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
98 | > labels
99 | array([0, 2, 1, 2, 0])
100 | # `to_categorical` converts this into a matrix with as many
101 | # columns as there are classes. The number of rows
102 | # stays the same.
103 | > to_categorical(labels)
104 | array([[ 1., 0., 0.],
105 | [ 0., 0., 1.],
106 | [ 0., 1., 0.],
107 | [ 0., 0., 1.],
108 | [ 1., 0., 0.]], dtype=float32)
109 | ```
110 | """
111 |
112 |
113 | class ImageDataGenerator:
114 | """Generate batches of tensor image data with real-time data augmentation.
115 |
116 | The data will be looped over (in batches).
117 |
118 | # Arguments
119 | featurewise_center: Boolean.
120 | Set input mean to 0 over the dataset, feature-wise.
121 | samplewise_center: Boolean. Set each sample mean to 0.
122 | featurewise_std_normalization: Boolean.
123 | Divide inputs by std of the dataset, feature-wise.
124 | samplewise_std_normalization: Boolean. Divide each input by its std.
125 | zca_whitening: Boolean. Apply ZCA whitening.
126 | zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
127 | rotation_range: Int. Degree range for random rotations.
128 | width_shift_range: Float, 1-D array-like or int
129 | - float: fraction of total width, if < 1, or pixels if >= 1.
130 | - 1-D array-like: random elements from the array.
131 | - int: integer number of pixels from interval
132 | `(-width_shift_range, +width_shift_range)`
133 | - With `width_shift_range=2` possible values
134 | are integers `[-1, 0, +1]`,
135 | same as with `width_shift_range=[-1, 0, +1]`,
136 | while with `width_shift_range=1.0` possible values are floats
137 | in the interval `[-1.0, +1.0)`.
138 | height_shift_range: Float, 1-D array-like or int
139 | - float: fraction of total height, if < 1, or pixels if >= 1.
140 | - 1-D array-like: random elements from the array.
141 | - int: integer number of pixels from interval
142 | `(-height_shift_range, +height_shift_range)`
143 | - With `height_shift_range=2` possible values
144 | are integers `[-1, 0, +1]`,
145 | same as with `height_shift_range=[-1, 0, +1]`,
146 | while with `height_shift_range=1.0` possible values are floats
147 | in the interval `[-1.0, +1.0)`.
148 | brightness_range: Tuple or list of two floats. Range for picking
149 | a brightness shift value from.
150 | shear_range: Float. Shear Intensity
151 | (Shear angle in counter-clockwise direction in degrees)
152 | zoom_range: Float or [lower, upper]. Range for random zoom.
153 | If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
154 | channel_shift_range: Float. Range for random channel shifts.
155 | fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
156 | Default is 'nearest'.
157 | Points outside the boundaries of the input are filled
158 | according to the given mode:
159 | - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
160 | - 'nearest': aaaaaaaa|abcd|dddddddd
161 | - 'reflect': abcddcba|abcd|dcbaabcd
162 | - 'wrap': abcdabcd|abcd|abcdabcd
163 | cval: Float or Int.
164 | Value used for points outside the boundaries
165 | when `fill_mode = "constant"`.
166 | horizontal_flip: Boolean. Randomly flip inputs horizontally.
167 | vertical_flip: Boolean. Randomly flip inputs vertically.
168 | rescale: rescaling factor. Defaults to None.
169 | If None or 0, no rescaling is applied,
170 | otherwise we multiply the data by the value provided
171 | (after applying all other transformations).
172 | preprocessing_function: function that will be applied on each input.
173 | The function will run after the image is resized and augmented.
174 | The function should take one argument:
175 | one image (NumPy tensor with rank 3),
176 | and should output a NumPy tensor with the same shape.
177 | data_format: Image data format,
178 | either "channels_first" or "channels_last".
179 | "channels_last" mode means that the images should have shape
180 | `(samples, height, width, channels)`,
181 | "channels_first" mode means that the images should have shape
182 | `(samples, channels, height, width)`.
183 | It defaults to the `image_data_format` value found in your
184 | Keras config file at `~/.keras/keras.json`.
185 | If you never set it, then it will be "channels_last".
186 | validation_split: Float. Fraction of images reserved for validation
187 | (strictly between 0 and 1).
188 | interpolation_order: int, order to use for
189 | the spline interpolation. Higher is slower.
190 | dtype: Dtype to use for the generated arrays.
191 |
192 | # Examples
193 |
194 | Example of using `.flow(x, y)`:
195 | ```python
196 | (x_train, y_train), (x_test, y_test) = cifar10.load_data()
197 | y_train = np_utils.to_categorical(y_train, num_classes)
198 | y_test = np_utils.to_categorical(y_test, num_classes)
199 | datagen = ImageDataGenerator(
200 | featurewise_center=True,
201 | featurewise_std_normalization=True,
202 | rotation_range=20,
203 | width_shift_range=0.2,
204 | height_shift_range=0.2,
205 | horizontal_flip=True)
206 | # compute quantities required for featurewise normalization
207 | # (std, mean, and principal components if ZCA whitening is applied)
208 | datagen.fit(x_train)
209 | # fits the model on batches with real-time data augmentation:
210 | model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
211 | steps_per_epoch=len(x_train) / 32, epochs=epochs)
212 | # here's a more "manual" example
213 | for e in range(epochs):
214 | print('Epoch', e)
215 | batches = 0
216 | for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
217 | model.fit(x_batch, y_batch)
218 | batches += 1
219 | if batches >= len(x_train) / 32:
220 | # we need to break the loop by hand because
221 | # the generator loops indefinitely
222 | break
223 | ```
224 | Example of using `.flow_from_directory(directory)`:
225 | ```python
226 | train_datagen = ImageDataGenerator(
227 | rescale=1./255,
228 | shear_range=0.2,
229 | zoom_range=0.2,
230 | horizontal_flip=True)
231 | test_datagen = ImageDataGenerator(rescale=1./255)
232 | train_generator = train_datagen.flow_from_directory(
233 | 'data/train',
234 | target_size=(150, 150),
235 | batch_size=32,
236 | class_mode='binary')
237 | validation_generator = test_datagen.flow_from_directory(
238 | 'data/validation',
239 | target_size=(150, 150),
240 | batch_size=32,
241 | class_mode='binary')
242 | model.fit_generator(
243 | train_generator,
244 | steps_per_epoch=2000,
245 | epochs=50,
246 | validation_data=validation_generator,
247 | validation_steps=800)
248 | ```
249 | Example of transforming images and masks together.
250 | ```python
251 | # we create two instances with the same arguments
252 | data_gen_args = dict(featurewise_center=True,
253 | featurewise_std_normalization=True,
254 | rotation_range=90,
255 | width_shift_range=0.1,
256 | height_shift_range=0.1,
257 | zoom_range=0.2)
258 | image_datagen = ImageDataGenerator(**data_gen_args)
259 | mask_datagen = ImageDataGenerator(**data_gen_args)
260 | # Provide the same seed and keyword arguments to the fit and flow methods
261 | seed = 1
262 | image_datagen.fit(images, augment=True, seed=seed)
263 | mask_datagen.fit(masks, augment=True, seed=seed)
264 | image_generator = image_datagen.flow_from_directory(
265 | 'data/images',
266 | class_mode=None,
267 | seed=seed)
268 | mask_generator = mask_datagen.flow_from_directory(
269 | 'data/masks',
270 | class_mode=None,
271 | seed=seed)
272 | # combine generators into one which yields image and masks
273 | train_generator = zip(image_generator, mask_generator)
274 | model.fit_generator(
275 | train_generator,
276 | steps_per_epoch=2000,
277 | epochs=50)
278 | ```
279 | Example of using `flow_from_dataframe(dataframe, directory, x_col, y_col)`:
280 | ```python
281 | train_df = pandas.read_csv("./train.csv")
282 | valid_df = pandas.read_csv("./valid.csv")
283 | train_datagen = ImageDataGenerator(
284 | rescale=1./255,
285 | shear_range=0.2,
286 | zoom_range=0.2,
287 | horizontal_flip=True)
288 | test_datagen = ImageDataGenerator(rescale=1./255)
289 | train_generator = train_datagen.flow_from_dataframe(
290 | dataframe=train_df,
291 | directory='data/train',
292 | x_col="filename",
293 | y_col="class",
294 | target_size=(150, 150),
295 | batch_size=32,
296 | class_mode='binary')
297 | validation_generator = test_datagen.flow_from_dataframe(
298 | dataframe=valid_df,
299 | directory='data/validation',
300 | x_col="filename",
301 | y_col="class",
302 | target_size=(150, 150),
303 | batch_size=32,
304 | class_mode='binary')
305 | model.fit_generator(
306 | train_generator,
307 | steps_per_epoch=2000,
308 | epochs=50,
309 | validation_data=validation_generator,
310 | validation_steps=800)
311 | ```
312 | """
313 |
314 | def __init__(self,
315 | featurewise_center: bool = False,
316 | samplewise_center=False,
317 | featurewise_std_normalization=False,
318 | samplewise_std_normalization=False,
319 | zca_whitening=False,
320 | zca_epsilon=1e-6,
321 | rotation_range=0,
322 | width_shift_range=0.,
323 | height_shift_range=0.,
324 | brightness_range=None,
325 | shear_range=0.,
326 | zoom_range=0.,
327 | channel_shift_range=0.,
328 | fill_mode='nearest',
329 | cval=0.,
330 | horizontal_flip=False,
331 | vertical_flip=False,
332 | rescale=None,
333 | preprocessing_function=None,
334 | data_format='channels_last',
335 | validation_split=0.0,
336 | interpolation_order=1,
337 | dtype='float32'):
338 | ...
339 |
340 | def flow(self,
341 | x,
342 | y=None,
343 | batch_size=32,
344 | shuffle=True,
345 | sample_weight=None,
346 | seed=None,
347 | save_to_dir=None,
348 | save_prefix='',
349 | save_format='png',
350 | subset=None):
351 | """Takes data & label arrays, generates batches of augmented data.
352 |
353 | # Arguments
354 | x: Input data. Numpy array of rank 4 or a tuple.
355 | If tuple, the first element
356 | should contain the images and the second element
357 | another numpy array or a list of numpy arrays
358 | that gets passed to the output
359 | without any modifications.
360 | Can be used to feed the model miscellaneous data
361 | along with the images.
362 | In case of grayscale data, the channels axis of the image array
363 | should have value 1, in case
364 | of RGB data, it should have value 3, and in case
365 | of RGBA data, it should have value 4.
366 | y: Labels.
367 | batch_size: Int (default: 32).
368 | shuffle: Boolean (default: True).
369 | sample_weight: Sample weights.
370 | seed: Int (default: None).
371 | save_to_dir: None or str (default: None).
372 | This allows you to optionally specify a directory
373 | to which to save the augmented pictures being generated
374 | (useful for visualizing what you are doing).
375 | save_prefix: Str (default: `''`).
376 | Prefix to use for filenames of saved pictures
377 | (only relevant if `save_to_dir` is set).
378 | save_format: one of "png", "jpeg"
379 | (only relevant if `save_to_dir` is set). Default: "png".
380 | subset: Subset of data (`"training"` or `"validation"`) if
381 | `validation_split` is set in `ImageDataGenerator`.
382 |
383 | # Returns
384 | An `Iterator` yielding tuples of `(x, y)`
385 | where `x` is a numpy array of image data
386 | (in the case of a single image input) or a list
387 | of numpy arrays (in the case with
388 | additional inputs) and `y` is a numpy array
389 | of corresponding labels. If 'sample_weight' is not None,
390 | the yielded tuples are of the form `(x, y, sample_weight)`.
391 | If `y` is None, only the numpy array `x` is returned.
392 | """
393 | ...
394 |
395 | def flow_from_directory(self,
396 | directory,
397 | target_size=(256, 256),
398 | color_mode='rgb',
399 | classes=None,
400 | class_mode='categorical',
401 | batch_size=32,
402 | shuffle=True,
403 | seed=None,
404 | save_to_dir=None,
405 | save_prefix='',
406 | save_format='png',
407 | follow_links=False,
408 | subset=None,
409 | interpolation='nearest'):
410 | """Takes the path to a directory & generates batches of augmented data.
411 |
412 | # Arguments
413 | directory: string, path to the target directory.
414 | It should contain one subdirectory per class.
415 | Any PNG, JPG, BMP, PPM or TIF images
416 | inside each of the subdirectories directory tree
417 | will be included in the generator.
418 | See [this script](
419 | https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
420 | for more details.
421 | target_size: Tuple of integers `(height, width)`,
422 | default: `(256, 256)`.
423 | The dimensions to which all images found will be resized.
424 | color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
425 | Whether the images will be converted to
426 | have 1, 3, or 4 channels.
427 | classes: Optional list of class subdirectories
428 | (e.g. `['dogs', 'cats']`). Default: None.
429 | If not provided, the list of classes will be automatically
430 | inferred from the subdirectory names/structure
431 | under `directory`, where each subdirectory will
432 | be treated as a different class
433 | (and the order of the classes, which will map to the label
434 | indices, will be alphanumeric).
435 | The dictionary containing the mapping from class names to class
436 | indices can be obtained via the attribute `class_indices`.
437 | class_mode: One of "categorical", "binary", "sparse",
438 | "input", or None. Default: "categorical".
439 | Determines the type of label arrays that are returned:
440 | - "categorical" will be 2D one-hot encoded labels,
441 | - "binary" will be 1D binary labels,
442 | "sparse" will be 1D integer labels,
443 | - "input" will be images identical
444 | to input images (mainly used to work with autoencoders).
445 | - If None, no labels are returned
446 | (the generator will only yield batches of image data,
447 | which is useful to use with `model.predict_generator()`).
448 | Please note that in case of class_mode None,
449 | the data still needs to reside in a subdirectory
450 | of `directory` for it to work correctly.
451 | batch_size: Size of the batches of data (default: 32).
452 | shuffle: Whether to shuffle the data (default: True)
453 | If set to False, sorts the data in alphanumeric order.
454 | seed: Optional random seed for shuffling and transformations.
455 | save_to_dir: None or str (default: None).
456 | This allows you to optionally specify
457 | a directory to which to save
458 | the augmented pictures being generated
459 | (useful for visualizing what you are doing).
460 | save_prefix: Str. Prefix to use for filenames of saved pictures
461 | (only relevant if `save_to_dir` is set).
462 | save_format: One of "png", "jpeg"
463 | (only relevant if `save_to_dir` is set). Default: "png".
464 | follow_links: Whether to follow symlinks inside
465 | class subdirectories (default: False).
466 | subset: Subset of data (`"training"` or `"validation"`) if
467 | `validation_split` is set in `ImageDataGenerator`.
468 | interpolation: Interpolation method used to
469 | resample the image if the
470 | target size is different from that of the loaded image.
471 | Supported methods are `"nearest"`, `"bilinear"`,
472 | and `"bicubic"`.
473 | If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
474 | supported. If PIL version 3.4.0 or newer is installed,
475 | `"box"` and `"hamming"` are also supported.
476 | By default, `"nearest"` is used.
477 |
478 | # Returns
479 | A `DirectoryIterator` yielding tuples of `(x, y)`
480 | where `x` is a numpy array containing a batch
481 | of images with shape `(batch_size, *target_size, channels)`
482 | and `y` is a numpy array of corresponding labels.
483 | """
484 | ...
485 |
--------------------------------------------------------------------------------
/tests/dummy_package/dummy_module2.py:
--------------------------------------------------------------------------------
1 | from .dummy_module import ImageDataGenerator
2 |
3 |
4 | def dodo(x: ImageDataGenerator):
5 | """Some dodo"""
6 | pass
7 |
--------------------------------------------------------------------------------
/tests/dummy_package/expected.md:
--------------------------------------------------------------------------------
1 | [[source]](www.dummy.com/my_project/tests/dummy_package/dummy_module.py#L1)
2 |
3 | ### Dense
4 |
5 |
6 | ```python
7 | tests.dummy_package.dummy_module.Dense(
8 | units,
9 | activation=None,
10 | use_bias=True,
11 | kernel_initializer="glorot_uniform",
12 | bias_initializer="zeros",
13 | kernel_regularizer=None,
14 | bias_regularizer=None,
15 | activity_regularizer=None,
16 | kernel_constraint=None,
17 | bias_constraint=None,
18 | **kwargs
19 | )
20 | ```
21 |
22 |
23 | Just your regular densely-connected NN layer.
24 |
25 | `Dense` implements the operation:
26 | `output = activation(dot(input, kernel) + bias)`
27 | where `activation` is the element-wise activation function
28 | passed as the `activation` argument, `kernel` is a weights matrix
29 | created by the layer, and `bias` is a bias vector created by the layer
30 | (only applicable if `use_bias` is `True`).
31 |
32 | Note: if the input to the layer has a rank greater than 2, then
33 | it is flattened prior to the initial dot product with `kernel`.
34 |
35 | __Example__
36 |
37 | ```python
38 | # as first layer in a sequential model:
39 | model = Sequential()
40 | model.add(Dense(32, input_shape=(16,)))
41 | # now the model will take as input arrays of shape (*, 16)
42 | # and output arrays of shape (*, 32)
43 |
44 | # after the first layer, you don't need to specify
45 | # the size of the input anymore:
46 | model.add(Dense(32))
47 | ```
48 |
49 | __Arguments__
50 |
51 | - __units__: Positive integer, dimensionality of the output space.
52 | - __activation__: Activation function to use
53 | (see [activations](../activations.md)).
54 | If you don't specify anything, no activation is applied
55 | (ie. "linear" activation: `a(x) = x`).
56 | - __use_bias__: Boolean, whether the layer uses a bias vector.
57 | - __kernel_initializer__: Initializer for the `kernel` weights matrix
58 | (see [initializers](../initializers.md)).
59 | - __bias_initializer__: Initializer for the bias vector
60 | (see [initializers](../initializers.md)).
61 | - __kernel_regularizer__: Regularizer function applied to
62 | the `kernel` weights matrix
63 | (see [regularizer](../regularizers.md)).
64 | - __bias_regularizer__: Regularizer function applied to the bias vector
65 | (see [regularizer](../regularizers.md)).
66 | - __activity_regularizer__: Regularizer function applied to
67 | the output of the layer (its "activation").
68 | (see [regularizer](../regularizers.md)).
69 | - __kernel_constraint__: Constraint function applied to
70 | the `kernel` weights matrix
71 | (see [constraints](../constraints.md)).
72 | - __bias_constraint__: Constraint function applied to the bias vector
73 | (see [constraints](../constraints.md)).
74 |
75 | __Input shape__
76 |
77 | nD tensor with shape: `(batch_size, ..., input_dim)`.
78 | The most common situation would be
79 | a 2D input with shape `(batch_size, input_dim)`.
80 |
81 | __Output shape__
82 |
83 | nD tensor with shape: `(batch_size, ..., units)`.
84 | For instance, for a 2D input with shape `(batch_size, input_dim)`,
85 | the output would have shape `(batch_size, units)`.
86 |
87 |
88 | ----
89 |
90 | [[source]](www.dummy.com/my_project/tests/dummy_package/dummy_module.py#L113)
91 |
92 | ### ImageDataGenerator
93 |
94 |
95 | ```python
96 | tests.dummy_package.dummy_module.ImageDataGenerator(
97 | featurewise_center=False,
98 | samplewise_center=False,
99 | featurewise_std_normalization=False,
100 | samplewise_std_normalization=False,
101 | zca_whitening=False,
102 | zca_epsilon=1e-06,
103 | rotation_range=0,
104 | width_shift_range=0.0,
105 | height_shift_range=0.0,
106 | brightness_range=None,
107 | shear_range=0.0,
108 | zoom_range=0.0,
109 | channel_shift_range=0.0,
110 | fill_mode="nearest",
111 | cval=0.0,
112 | horizontal_flip=False,
113 | vertical_flip=False,
114 | rescale=None,
115 | preprocessing_function=None,
116 | data_format="channels_last",
117 | validation_split=0.0,
118 | interpolation_order=1,
119 | dtype="float32",
120 | )
121 | ```
122 |
123 |
124 | Generate batches of tensor image data with real-time data augmentation.
125 |
126 | The data will be looped over (in batches).
127 |
128 | __Arguments__
129 |
130 | - __featurewise_center__ `bool`: Boolean.
131 | Set input mean to 0 over the dataset, feature-wise.
132 | - __samplewise_center__: Boolean. Set each sample mean to 0.
133 | - __featurewise_std_normalization__: Boolean.
134 | Divide inputs by std of the dataset, feature-wise.
135 | - __samplewise_std_normalization__: Boolean. Divide each input by its std.
136 | - __zca_whitening__: Boolean. Apply ZCA whitening.
137 | - __zca_epsilon__: epsilon for ZCA whitening. Default is 1e-6.
138 | - __rotation_range__: Int. Degree range for random rotations.
139 | - __width_shift_range__: Float, 1-D array-like or int
140 | - float: fraction of total width, if < 1, or pixels if >= 1.
141 | - 1-D array-like: random elements from the array.
142 | - int: integer number of pixels from interval
143 | `(-width_shift_range, +width_shift_range)`
144 | - With `width_shift_range=2` possible values
145 | are integers `[-1, 0, +1]`,
146 | same as with `width_shift_range=[-1, 0, +1]`,
147 | while with `width_shift_range=1.0` possible values are floats
148 | in the interval `[-1.0, +1.0)`.
149 | - __height_shift_range__: Float, 1-D array-like or int
150 | - float: fraction of total height, if < 1, or pixels if >= 1.
151 | - 1-D array-like: random elements from the array.
152 | - int: integer number of pixels from interval
153 | `(-height_shift_range, +height_shift_range)`
154 | - With `height_shift_range=2` possible values
155 | are integers `[-1, 0, +1]`,
156 | same as with `height_shift_range=[-1, 0, +1]`,
157 | while with `height_shift_range=1.0` possible values are floats
158 | in the interval `[-1.0, +1.0)`.
159 | - __brightness_range__: Tuple or list of two floats. Range for picking
160 | a brightness shift value from.
161 | - __shear_range__: Float. Shear Intensity
162 | (Shear angle in counter-clockwise direction in degrees)
163 | - __zoom_range__: Float or [lower, upper]. Range for random zoom.
164 | If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
165 | - __channel_shift_range__: Float. Range for random channel shifts.
166 | - __fill_mode__: One of {"constant", "nearest", "reflect" or "wrap"}.
167 | Default is 'nearest'.
168 | Points outside the boundaries of the input are filled
169 | according to the given mode:
170 | - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
171 | - 'nearest': aaaaaaaa|abcd|dddddddd
172 | - 'reflect': abcddcba|abcd|dcbaabcd
173 | - 'wrap': abcdabcd|abcd|abcdabcd
174 | - __cval__: Float or Int.
175 | Value used for points outside the boundaries
176 | when `fill_mode = "constant"`.
177 | - __horizontal_flip__: Boolean. Randomly flip inputs horizontally.
178 | - __vertical_flip__: Boolean. Randomly flip inputs vertically.
179 | - __rescale__: rescaling factor. Defaults to None.
180 | If None or 0, no rescaling is applied,
181 | otherwise we multiply the data by the value provided
182 | (after applying all other transformations).
183 | - __preprocessing_function__: function that will be applied on each input.
184 | The function will run after the image is resized and augmented.
185 | The function should take one argument:
186 | one image (NumPy tensor with rank 3),
187 | and should output a NumPy tensor with the same shape.
188 | - __data_format__: Image data format,
189 | either "channels_first" or "channels_last".
190 | "channels_last" mode means that the images should have shape
191 | `(samples, height, width, channels)`,
192 | "channels_first" mode means that the images should have shape
193 | `(samples, channels, height, width)`.
194 | It defaults to the `image_data_format` value found in your
195 | Keras config file at `~/.keras/keras.json`.
196 | If you never set it, then it will be "channels_last".
197 | - __validation_split__: Float. Fraction of images reserved for validation
198 | (strictly between 0 and 1).
199 | - __interpolation_order__: int, order to use for
200 | the spline interpolation. Higher is slower.
201 | - __dtype__: Dtype to use for the generated arrays.
202 |
203 | __Examples__
204 |
205 | Example of using `.flow(x, y)`:
206 | ```python
207 | (x_train, y_train), (x_test, y_test) = cifar10.load_data()
208 | y_train = np_utils.to_categorical(y_train, num_classes)
209 | y_test = np_utils.to_categorical(y_test, num_classes)
210 | datagen = ImageDataGenerator(
211 | featurewise_center=True,
212 | featurewise_std_normalization=True,
213 | rotation_range=20,
214 | width_shift_range=0.2,
215 | height_shift_range=0.2,
216 | horizontal_flip=True)
217 | # compute quantities required for featurewise normalization
218 | # (std, mean, and principal components if ZCA whitening is applied)
219 | datagen.fit(x_train)
220 | # fits the model on batches with real-time data augmentation:
221 | model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
222 | steps_per_epoch=len(x_train) / 32, epochs=epochs)
223 | # here's a more "manual" example
224 | for e in range(epochs):
225 | print('Epoch', e)
226 | batches = 0
227 | for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
228 | model.fit(x_batch, y_batch)
229 | batches += 1
230 | if batches >= len(x_train) / 32:
231 | # we need to break the loop by hand because
232 | # the generator loops indefinitely
233 | break
234 | ```
235 | Example of using `.flow_from_directory(directory)`:
236 | ```python
237 | train_datagen = ImageDataGenerator(
238 | rescale=1./255,
239 | shear_range=0.2,
240 | zoom_range=0.2,
241 | horizontal_flip=True)
242 | test_datagen = ImageDataGenerator(rescale=1./255)
243 | train_generator = train_datagen.flow_from_directory(
244 | 'data/train',
245 | target_size=(150, 150),
246 | batch_size=32,
247 | class_mode='binary')
248 | validation_generator = test_datagen.flow_from_directory(
249 | 'data/validation',
250 | target_size=(150, 150),
251 | batch_size=32,
252 | class_mode='binary')
253 | model.fit_generator(
254 | train_generator,
255 | steps_per_epoch=2000,
256 | epochs=50,
257 | validation_data=validation_generator,
258 | validation_steps=800)
259 | ```
260 | Example of transforming images and masks together.
261 | ```python
262 | # we create two instances with the same arguments
263 | data_gen_args = dict(featurewise_center=True,
264 | featurewise_std_normalization=True,
265 | rotation_range=90,
266 | width_shift_range=0.1,
267 | height_shift_range=0.1,
268 | zoom_range=0.2)
269 | image_datagen = ImageDataGenerator(**data_gen_args)
270 | mask_datagen = ImageDataGenerator(**data_gen_args)
271 | # Provide the same seed and keyword arguments to the fit and flow methods
272 | seed = 1
273 | image_datagen.fit(images, augment=True, seed=seed)
274 | mask_datagen.fit(masks, augment=True, seed=seed)
275 | image_generator = image_datagen.flow_from_directory(
276 | 'data/images',
277 | class_mode=None,
278 | seed=seed)
279 | mask_generator = mask_datagen.flow_from_directory(
280 | 'data/masks',
281 | class_mode=None,
282 | seed=seed)
283 | # combine generators into one which yields image and masks
284 | train_generator = zip(image_generator, mask_generator)
285 | model.fit_generator(
286 | train_generator,
287 | steps_per_epoch=2000,
288 | epochs=50)
289 | ```
290 | Example of using `flow_from_dataframe(dataframe, directory, x_col, y_col)`:
291 | ```python
292 | train_df = pandas.read_csv("./train.csv")
293 | valid_df = pandas.read_csv("./valid.csv")
294 | train_datagen = ImageDataGenerator(
295 | rescale=1./255,
296 | shear_range=0.2,
297 | zoom_range=0.2,
298 | horizontal_flip=True)
299 | test_datagen = ImageDataGenerator(rescale=1./255)
300 | train_generator = train_datagen.flow_from_dataframe(
301 | dataframe=train_df,
302 | directory='data/train',
303 | x_col="filename",
304 | y_col="class",
305 | target_size=(150, 150),
306 | batch_size=32,
307 | class_mode='binary')
308 | validation_generator = test_datagen.flow_from_dataframe(
309 | dataframe=valid_df,
310 | directory='data/validation',
311 | x_col="filename",
312 | y_col="class",
313 | target_size=(150, 150),
314 | batch_size=32,
315 | class_mode='binary')
316 | model.fit_generator(
317 | train_generator,
318 | steps_per_epoch=2000,
319 | epochs=50,
320 | validation_data=validation_generator,
321 | validation_steps=800)
322 | ```
323 |
324 |
325 | ----
326 |
327 | [[source]](www.dummy.com/my_project/tests/dummy_package/dummy_module.py#L340)
328 |
329 | ### flow
330 |
331 |
332 | ```python
333 | ImageDataGenerator.flow(
334 | x,
335 | y=None,
336 | batch_size=32,
337 | shuffle=True,
338 | sample_weight=None,
339 | seed=None,
340 | save_to_dir=None,
341 | save_prefix="",
342 | save_format="png",
343 | subset=None,
344 | )
345 | ```
346 |
347 |
348 | Takes data & label arrays, generates batches of augmented data.
349 |
350 | __Arguments__
351 |
352 | - __x__: Input data. Numpy array of rank 4 or a tuple.
353 | If tuple, the first element
354 | should contain the images and the second element
355 | another numpy array or a list of numpy arrays
356 | that gets passed to the output
357 | without any modifications.
358 | Can be used to feed the model miscellaneous data
359 | along with the images.
360 | In case of grayscale data, the channels axis of the image array
361 | should have value 1, in case
362 | of RGB data, it should have value 3, and in case
363 | of RGBA data, it should have value 4.
364 | - __y__: Labels.
365 | - __batch_size__: Int (default: 32).
366 | - __shuffle__: Boolean (default: True).
367 | - __sample_weight__: Sample weights.
368 | - __seed__: Int (default: None).
369 | - __save_to_dir__: None or str (default: None).
370 | This allows you to optionally specify a directory
371 | to which to save the augmented pictures being generated
372 | (useful for visualizing what you are doing).
373 | - __save_prefix__: Str (default: `''`).
374 | Prefix to use for filenames of saved pictures
375 | (only relevant if `save_to_dir` is set).
376 | - __save_format__: one of "png", "jpeg"
377 | (only relevant if `save_to_dir` is set). Default: "png".
378 | - __subset__: Subset of data (`"training"` or `"validation"`) if
379 | `validation_split` is set in `ImageDataGenerator`.
380 |
381 | __Returns__
382 |
383 | An `Iterator` yielding tuples of `(x, y)`
384 | where `x` is a numpy array of image data
385 | (in the case of a single image input) or a list
386 | of numpy arrays (in the case with
387 | additional inputs) and `y` is a numpy array
388 | of corresponding labels. If 'sample_weight' is not None,
389 | the yielded tuples are of the form `(x, y, sample_weight)`.
390 | If `y` is None, only the numpy array `x` is returned.
391 |
392 |
393 | ----
394 |
395 | [[source]](www.dummy.com/my_project/tests/dummy_package/dummy_module.py#L395)
396 |
397 | ### flow_from_directory
398 |
399 |
400 | ```python
401 | ImageDataGenerator.flow_from_directory(
402 | directory,
403 | target_size=(256, 256),
404 | color_mode="rgb",
405 | classes=None,
406 | class_mode="categorical",
407 | batch_size=32,
408 | shuffle=True,
409 | seed=None,
410 | save_to_dir=None,
411 | save_prefix="",
412 | save_format="png",
413 | follow_links=False,
414 | subset=None,
415 | interpolation="nearest",
416 | )
417 | ```
418 |
419 |
420 | Takes the path to a directory & generates batches of augmented data.
421 |
422 | __Arguments__
423 |
424 | - __directory__: string, path to the target directory.
425 | It should contain one subdirectory per class.
426 | Any PNG, JPG, BMP, PPM or TIF images
427 | inside each of the subdirectories directory tree
428 | will be included in the generator.
429 | See [this script](
430 | https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
431 | for more details.
432 | - __target_size__: Tuple of integers `(height, width)`,
433 | default: `(256, 256)`.
434 | The dimensions to which all images found will be resized.
435 | - __color_mode__: One of "grayscale", "rgb", "rgba". Default: "rgb".
436 | Whether the images will be converted to
437 | have 1, 3, or 4 channels.
438 | - __classes__: Optional list of class subdirectories
439 | (e.g. `['dogs', 'cats']`). Default: None.
440 | If not provided, the list of classes will be automatically
441 | inferred from the subdirectory names/structure
442 | under `directory`, where each subdirectory will
443 | be treated as a different class
444 | (and the order of the classes, which will map to the label
445 | indices, will be alphanumeric).
446 | The dictionary containing the mapping from class names to class
447 | indices can be obtained via the attribute `class_indices`.
448 | - __class_mode__: One of "categorical", "binary", "sparse",
449 | "input", or None. Default: "categorical".
450 | Determines the type of label arrays that are returned:
451 | - "categorical" will be 2D one-hot encoded labels,
452 | - "binary" will be 1D binary labels,
453 | "sparse" will be 1D integer labels,
454 | - "input" will be images identical
455 | to input images (mainly used to work with autoencoders).
456 | - If None, no labels are returned
457 | (the generator will only yield batches of image data,
458 | which is useful to use with `model.predict_generator()`).
459 | Please note that in case of class_mode None,
460 | the data still needs to reside in a subdirectory
461 | of `directory` for it to work correctly.
462 | - __batch_size__: Size of the batches of data (default: 32).
463 | - __shuffle__: Whether to shuffle the data (default: True)
464 | If set to False, sorts the data in alphanumeric order.
465 | - __seed__: Optional random seed for shuffling and transformations.
466 | - __save_to_dir__: None or str (default: None).
467 | This allows you to optionally specify
468 | a directory to which to save
469 | the augmented pictures being generated
470 | (useful for visualizing what you are doing).
471 | - __save_prefix__: Str. Prefix to use for filenames of saved pictures
472 | (only relevant if `save_to_dir` is set).
473 | - __save_format__: One of "png", "jpeg"
474 | (only relevant if `save_to_dir` is set). Default: "png".
475 | - __follow_links__: Whether to follow symlinks inside
476 | class subdirectories (default: False).
477 | - __subset__: Subset of data (`"training"` or `"validation"`) if
478 | `validation_split` is set in `ImageDataGenerator`.
479 | - __interpolation__: Interpolation method used to
480 | resample the image if the
481 | target size is different from that of the loaded image.
482 | Supported methods are `"nearest"`, `"bilinear"`,
483 | and `"bicubic"`.
484 | If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
485 | supported. If PIL version 3.4.0 or newer is installed,
486 | `"box"` and `"hamming"` are also supported.
487 | By default, `"nearest"` is used.
488 |
489 | __Returns__
490 |
491 | A `DirectoryIterator` yielding tuples of `(x, y)`
492 | where `x` is a numpy array containing a batch
493 | of images with shape `(batch_size, *target_size, channels)`
494 | and `y` is a numpy array of corresponding labels.
495 |
496 |
497 | ----
498 |
499 | [[source]](www.dummy.com/my_project/tests/dummy_package/dummy_module.py#L78)
500 |
501 | ### to_categorical
502 |
503 |
504 | ```python
505 | tests.dummy_package.dummy_module.to_categorical(y, num_classes=None, dtype="float32")
506 | ```
507 |
508 |
509 | Converts a class vector (integers) to binary class matrix.
510 |
511 | E.g. for use with categorical_crossentropy.
512 |
513 | __Arguments__
514 |
515 | - __y__: class vector to be converted into a matrix
516 | (integers from 0 to num_classes).
517 | - __num_classes__: total number of classes.
518 | - __dtype__: The data type expected by the input, as a string
519 | (`float32`, `float64`, `int32`...)
520 |
521 | __Returns__
522 |
523 | A binary matrix representation of the input. The classes axis
524 | is placed last.
525 |
526 | __Example__
527 |
528 | ```python
529 | # Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
530 | > labels
531 | array([0, 2, 1, 2, 0])
532 | # `to_categorical` converts this into a matrix with as many
533 | # columns as there are classes. The number of rows
534 | # stays the same.
535 | > to_categorical(labels)
536 | array([[ 1., 0., 0.],
537 | [ 0., 0., 1.],
538 | [ 0., 1., 0.],
539 | [ 0., 0., 1.],
540 | [ 1., 0., 0.]], dtype=float32)
541 | ```
542 |
543 |
544 | ----
545 |
546 |
--------------------------------------------------------------------------------
/tests/test_autogen.py:
--------------------------------------------------------------------------------
1 | from markdown import markdown
2 | from keras_autodoc import autogen
3 | from keras_autodoc import get_methods
4 | import pytest
5 | import sys
6 | import pathlib
7 | from typing import Union, Optional, Tuple
8 | from .dummy_package import dummy_module
9 | from . import dummy_package
10 |
11 | test_doc1 = {
12 | "doc": """Base class for recurrent layers.
13 |
14 | # Arguments
15 | cell: A RNN cell instance. A RNN cell is a class that has:
16 | - a `call(input_at_t, states_at_t)` method, returning
17 | `(output_at_t, states_at_t_plus_1)`. The call method of the
18 | cell can also take the optional argument `constants`, see
19 | section "Note on passing external constants" below.
20 | - a `state_size` attribute. This can be a single integer
21 | (single state) in which case it is
22 | the size of the recurrent state
23 | (which should be the same as the size of the cell output).
24 | This can also be a list/tuple of integers
25 | (one size per state). In this case, the first entry
26 | (`state_size[0]`) should be the same as
27 | the size of the cell output.
28 | It is also possible for `cell` to be a list of RNN cell instances,
29 | in which cases the cells get stacked on after the other in the RNN,
30 | implementing an efficient stacked RNN.
31 | return_sequences: Boolean. Whether to return the last output
32 | in the output sequence, or the full sequence.
33 | return_state: Boolean. Whether to return the last state
34 | in addition to the output.
35 | go_backwards: Boolean (default False).
36 | If True, process the input sequence backwards and return the
37 | reversed sequence.
38 | stateful: Boolean (default False). If True, the last state
39 | for each sample at index i in a batch will be used as initial
40 | state for the sample of index i in the following batch.
41 | unroll: Boolean (default False).
42 | If True, the network will be unrolled,
43 | else a symbolic loop will be used.
44 | Unrolling can speed-up a RNN,
45 | although it tends to be more memory-intensive.
46 | Unrolling is only suitable for short sequences.
47 | input_dim: dimensionality of the input (integer).
48 | This argument (or alternatively,
49 | the keyword argument `input_shape`)
50 | is required when using this layer as the first layer in a model.
51 | input_length: Length of input sequences, to be specified
52 | when it is constant.
53 | This argument is required if you are going to connect
54 | `Flatten` then `Dense` layers upstream
55 | (without it, the shape of the dense outputs cannot be computed).
56 | Note that if the recurrent layer is not the first layer
57 | in your model, you would need to specify the input length
58 | at the level of the first layer
59 | (e.g. via the `input_shape` argument)
60 |
61 | # Input shape
62 | 3D tensor with shape `(batch_size, timesteps, input_dim)`.
63 |
64 | # Output shape
65 | - if `return_state`: a list of tensors. The first tensor is
66 | the output. The remaining tensors are the last states,
67 | each with shape `(batch_size, units)`.
68 | - if `return_sequences`: 3D tensor with shape
69 | `(batch_size, timesteps, units)`.
70 | - else, 2D tensor with shape `(batch_size, units)`.
71 |
72 | # Masking
73 | This layer supports masking for input data with a variable number
74 | of timesteps. To introduce masks to your data,
75 | use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
76 | set to `True`.
77 |
78 | # Note on using statefulness in RNNs
79 | You can set RNN layers to be 'stateful', which means that the states
80 | computed for the samples in one batch will be reused as initial states
81 | for the samples in the next batch. This assumes a one-to-one mapping
82 | between samples in different successive batches.
83 |
84 | To enable statefulness:
85 | - specify `stateful=True` in the layer constructor.
86 | - specify a fixed batch size for your model, by passing
87 | if sequential model:
88 | `batch_input_shape=(...)` to the first layer in your model.
89 | else for functional model with 1 or more Input layers:
90 | `batch_shape=(...)` to all the first layers in your model.
91 | This is the expected shape of your inputs
92 | *including the batch size*.
93 | It should be a tuple of integers, e.g. `(32, 10, 100)`.
94 | - specify `shuffle=False` when calling fit().
95 |
96 | To reset the states of your model, call `.reset_states()` on either
97 | a specific layer, or on your entire model.
98 |
99 | # Note on specifying the initial state of RNNs
100 |
101 | Note that
102 | One: You can specify the initial state of RNN layers symbolically by
103 | calling them with the keyword argument `initial_state`.
104 | Two: The value of `initial_state` should be a tensor or list of
105 | tensors representing
106 | the initial state of the RNN layer.
107 | You can specify the initial state of RNN layers numerically by:
108 | One: calling `reset_states`
109 | - With the keyword argument `states`.
110 | - The value of
111 | `states` should be a numpy array or
112 | list of numpy arrays representing
113 | the initial state of the RNN layer.
114 |
115 | # Note on passing external constants to RNNs
116 | You can pass "external" constants to the cell using the `constants`
117 | keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
118 | This: requires that the `cell.call` method accepts the same keyword argument
119 | `constants`. Such constants can be used to condition the cell
120 | transformation on additional static inputs (not changing over time),
121 | a.k.a. an attention mechanism.
122 |
123 | # Examples
124 |
125 | ```python
126 | # First, let's define a RNN Cell, as a layer subclass.
127 |
128 | class MinimalRNNCell(keras.layers.Layer):
129 |
130 | def __init__(self, units, **kwargs):
131 | self.units = units
132 | self.state_size = units
133 | super(MinimalRNNCell, self).__init__(**kwargs)
134 |
135 | def build(self, input_shape):
136 | self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
137 | initializer='uniform',
138 | name='kernel')
139 | self.recurrent_kernel = self.add_weight(
140 | shape=(self.units, self.units),
141 | initializer='uniform',
142 | name='recurrent_kernel')
143 | self.built = True
144 |
145 | def call(self, inputs, states):
146 | prev_output = states[0]
147 | h = K.dot(inputs, self.kernel)
148 | output = h + K.dot(prev_output, self.recurrent_kernel)
149 | return output, [output]
150 |
151 | # Let's use this cell in a RNN layer:
152 |
153 | cell = MinimalRNNCell(32)
154 | x = keras.Input((None, 5))
155 | layer = RNN(cell)
156 | y = layer(x)
157 |
158 | # Here's how to use the cell to build a stacked RNN:
159 |
160 | cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
161 | x = keras.Input((None, 5))
162 | layer = RNN(cells)
163 | y = layer(x)
164 | ```
165 | """,
166 | "result": """Base class for recurrent layers.
167 |
168 | __Arguments__
169 |
170 | - __cell__: A RNN cell instance. A RNN cell is a class that has:
171 | - a `call(input_at_t, states_at_t)` method, returning
172 | `(output_at_t, states_at_t_plus_1)`. The call method of the
173 | cell can also take the optional argument `constants`, see
174 | section "Note on passing external constants" below.
175 | - a `state_size` attribute. This can be a single integer
176 | (single state) in which case it is
177 | the size of the recurrent state
178 | (which should be the same as the size of the cell output).
179 | This can also be a list/tuple of integers
180 | (one size per state). In this case, the first entry
181 | (`state_size[0]`) should be the same as
182 | the size of the cell output.
183 | It is also possible for `cell` to be a list of RNN cell instances,
184 | in which cases the cells get stacked on after the other in the RNN,
185 | implementing an efficient stacked RNN.
186 | - __return_sequences__: Boolean. Whether to return the last output
187 | in the output sequence, or the full sequence.
188 | - __return_state__: Boolean. Whether to return the last state
189 | in addition to the output.
190 | - __go_backwards__: Boolean (default False).
191 | If True, process the input sequence backwards and return the
192 | reversed sequence.
193 | - __stateful__: Boolean (default False). If True, the last state
194 | for each sample at index i in a batch will be used as initial
195 | state for the sample of index i in the following batch.
196 | - __unroll__: Boolean (default False).
197 | If True, the network will be unrolled,
198 | else a symbolic loop will be used.
199 | Unrolling can speed-up a RNN,
200 | although it tends to be more memory-intensive.
201 | Unrolling is only suitable for short sequences.
202 | - __input_dim__: dimensionality of the input (integer).
203 | This argument (or alternatively,
204 | the keyword argument `input_shape`)
205 | is required when using this layer as the first layer in a model.
206 | - __input_length__: Length of input sequences, to be specified
207 | when it is constant.
208 | This argument is required if you are going to connect
209 | `Flatten` then `Dense` layers upstream
210 | (without it, the shape of the dense outputs cannot be computed).
211 | Note that if the recurrent layer is not the first layer
212 | in your model, you would need to specify the input length
213 | at the level of the first layer
214 | (e.g. via the `input_shape` argument)
215 |
216 | __Input shape__
217 |
218 | 3D tensor with shape `(batch_size, timesteps, input_dim)`.
219 |
220 | __Output shape__
221 |
222 | - if `return_state`: a list of tensors. The first tensor is
223 | the output. The remaining tensors are the last states,
224 | each with shape `(batch_size, units)`.
225 | - if `return_sequences`: 3D tensor with shape
226 | `(batch_size, timesteps, units)`.
227 | - else, 2D tensor with shape `(batch_size, units)`.
228 |
229 | __Masking__
230 |
231 | This layer supports masking for input data with a variable number
232 | of timesteps. To introduce masks to your data,
233 | use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
234 | set to `True`.
235 |
236 | __Note on using statefulness in RNNs__
237 |
238 | You can set RNN layers to be 'stateful', which means that the states
239 | computed for the samples in one batch will be reused as initial states
240 | for the samples in the next batch. This assumes a one-to-one mapping
241 | between samples in different successive batches.
242 |
243 | To enable statefulness:
244 | - specify `stateful=True` in the layer constructor.
245 | - specify a fixed batch size for your model, by passing
246 | if sequential model:
247 | `batch_input_shape=(...)` to the first layer in your model.
248 | else for functional model with 1 or more Input layers:
249 | `batch_shape=(...)` to all the first layers in your model.
250 | This is the expected shape of your inputs
251 | *including the batch size*.
252 | It should be a tuple of integers, e.g. `(32, 10, 100)`.
253 | - specify `shuffle=False` when calling fit().
254 |
255 | To reset the states of your model, call `.reset_states()` on either
256 | a specific layer, or on your entire model.
257 |
258 | __Note on specifying the initial state of RNNs__
259 |
260 | Note that
261 | One: You can specify the initial state of RNN layers symbolically by
262 | calling them with the keyword argument `initial_state`.
263 | Two: The value of `initial_state` should be a tensor or list of
264 | tensors representing
265 | the initial state of the RNN layer.
266 | You can specify the initial state of RNN layers numerically by:
267 | One: calling `reset_states`
268 | - With the keyword argument `states`.
269 | - The value of
270 | `states` should be a numpy array or
271 | list of numpy arrays representing
272 | the initial state of the RNN layer.
273 |
274 | __Note on passing external constants to RNNs__
275 |
276 | You can pass "external" constants to the cell using the `constants`
277 | keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
278 | This: requires that the `cell.call` method accepts the same keyword argument
279 | `constants`. Such constants can be used to condition the cell
280 | transformation on additional static inputs (not changing over time),
281 | a.k.a. an attention mechanism.
282 |
283 | __Examples__
284 |
285 | ```python
286 | # First, let's define a RNN Cell, as a layer subclass.
287 |
288 | class MinimalRNNCell(keras.layers.Layer):
289 |
290 | def __init__(self, units, **kwargs):
291 | self.units = units
292 | self.state_size = units
293 | super(MinimalRNNCell, self).__init__(**kwargs)
294 |
295 | def build(self, input_shape):
296 | self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
297 | initializer='uniform',
298 | name='kernel')
299 | self.recurrent_kernel = self.add_weight(
300 | shape=(self.units, self.units),
301 | initializer='uniform',
302 | name='recurrent_kernel')
303 | self.built = True
304 |
305 | def call(self, inputs, states):
306 | prev_output = states[0]
307 | h = K.dot(inputs, self.kernel)
308 | output = h + K.dot(prev_output, self.recurrent_kernel)
309 | return output, [output]
310 |
311 | # Let's use this cell in a RNN layer:
312 |
313 | cell = MinimalRNNCell(32)
314 | x = keras.Input((None, 5))
315 | layer = RNN(cell)
316 | y = layer(x)
317 |
318 | # Here's how to use the cell to build a stacked RNN:
319 |
320 | cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
321 | x = keras.Input((None, 5))
322 | layer = RNN(cells)
323 | y = layer(x)
324 | ```
325 | """,
326 | }
327 |
328 | test_doc_with_arguments_as_last_block = {
329 | "doc": """Base class for recurrent layers.
330 |
331 | # Arguments
332 | return_sequences: Boolean. Whether to return the last output
333 | in the output sequence, or the full sequence.
334 | return_state: Boolean. Whether to return the last state
335 | in addition to the output.
336 | """,
337 | "result": """Base class for recurrent layers.
338 |
339 | __Arguments__
340 |
341 | - __return_sequences__: Boolean. Whether to return the last output
342 | in the output sequence, or the full sequence.
343 | - __return_state__: Boolean. Whether to return the last state
344 | in addition to the output.
345 | """,
346 | }
347 |
348 |
349 | @pytest.mark.parametrize(
350 | "docs_descriptor", [test_doc_with_arguments_as_last_block, test_doc1]
351 | )
352 | def test_doc_lists(docs_descriptor):
353 | docstring = autogen.process_docstring(docs_descriptor["doc"])
354 | assert markdown(docstring) == markdown(docs_descriptor["result"])
355 |
356 |
357 | dummy_docstring = """Multiplies 2 tensors (and/or variables) and returns a *tensor*.
358 |
359 | When attempting to multiply a nD tensor
360 | with a nD tensor, it reproduces the Theano behavior.
361 | (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
362 |
363 | # Examples
364 | ```python
365 | # Theano-like behavior example
366 | >>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
367 | >>> y = K.ones((4, 3, 5))
368 | >>> xy = K.dot(x, y)
369 | >>> K.int_shape(xy)
370 | (2, 4, 5)
371 | ```
372 |
373 | # Numpy implementation
374 | ```python
375 | def dot(x, y):
376 | return dot(x, y)
377 | ```
378 | """
379 |
380 |
381 | def test_doc_multiple_sections_code():
382 | """ Checks that we can have code blocks in multiple sections."""
383 | generated = autogen.process_docstring(dummy_docstring)
384 | assert "# Theano-like behavior example" in generated
385 | assert "def dot(x, y):" in generated
386 |
387 |
388 | def check_against_expected(elements):
389 | doc_generator = autogen.DocumentationGenerator(
390 | project_url='www.dummy.com/my_project'
391 | )
392 | markdown_text = ''
393 | for element in elements:
394 | markdown_text += doc_generator._render(element)
395 |
396 | current_file_path = pathlib.Path(__file__).resolve()
397 | expected_file = current_file_path.parent / 'dummy_package' / 'expected.md'
398 | expected_text = expected_file.read_text()
399 | # we check that the generated html is the same
400 | # to ignore blank lines or other differences not relevant.
401 | assert markdown(markdown_text) == markdown(expected_text)
402 |
403 |
404 | def test_generate_markdown():
405 | elements = [dummy_module.Dense, dummy_module.ImageDataGenerator]
406 | elements += get_methods(dummy_module.ImageDataGenerator)
407 | elements.append(dummy_module.to_categorical)
408 | check_against_expected(elements)
409 |
410 |
411 | def test_generate_markdown_from_string():
412 | elements = [
413 | 'tests.dummy_package.dummy_module.Dense',
414 | 'tests.dummy_package.dummy_module.ImageDataGenerator',
415 | 'tests.dummy_package.dummy_module.ImageDataGenerator.flow',
416 | 'tests.dummy_package.dummy_module.ImageDataGenerator.flow_from_directory',
417 | 'tests.dummy_package.dummy_module.to_categorical'
418 | ]
419 | check_against_expected(elements)
420 |
421 |
422 | @pytest.mark.parametrize('element', [
423 | 'tests.dummy_package.DataGenerator',
424 | 'tests.dummy_package.to_categorical'
425 | ])
426 | def test_aliases_class_function(element):
427 | doc_generator = autogen.DocumentationGenerator()
428 | computed = doc_generator._render(element)
429 | expected = element + '('
430 | assert expected in computed
431 |
432 |
433 | @pytest.mark.parametrize(['element', 'expected'], [
434 | ('tests.dummy_package.DataGenerator.flow', '\nDataGenerator.flow('),
435 | ('tests.dummy_package.DataGenerator.flow_from_directory',
436 | '\nDataGenerator.flow_from_directory('),
437 | ])
438 | def test_aliases_methods(element, expected):
439 | doc_generator = autogen.DocumentationGenerator()
440 | computed = doc_generator._render(element)
441 | assert expected in computed
442 |
443 |
444 | expected_dodo = """ dodo
445 |
446 |
447 | ```python
448 | tests.dummy_package.dummy_module2.dodo(x)
449 | ```
450 |
451 |
452 | Some dodo
453 |
454 |
455 | ----
456 |
457 | """
458 |
459 |
460 | @pytest.mark.parametrize("titles_size", ["###", "##"])
461 | def test_aliases_in_hints(titles_size):
462 | pages = {'dod.md': ['tests.dummy_package.DataGenerator',
463 | 'tests.dummy_package.dummy_module2.dodo']}
464 | doc_generator = autogen.DocumentationGenerator(pages=pages, titles_size=titles_size)
465 | result = doc_generator._render('tests.dummy_package.dummy_module2.dodo')
466 | assert result == titles_size + expected_dodo
467 |
468 |
469 | class A:
470 | def dodo(self):
471 | """Some docstring."""
472 | pass
473 |
474 |
475 | class B(A):
476 | def dodo(self):
477 | pass
478 |
479 |
480 | def test_get_docstring_of_super_class():
481 | computed = autogen.DocumentationGenerator()._render(B.dodo)
482 | assert 'Some docstring' in computed
483 |
484 |
485 | def water_plant(
486 | self, amount: Union[int, float], fertilizer_type: Optional[str] = None
487 | ):
488 | """Give your plant some water.
489 |
490 | # Arguments
491 | amount: How much water to give.
492 | fertilizer_type: What kind of fertilizer to add.
493 | """
494 |
495 | pass
496 |
497 |
498 | def test_types_in_docstring():
499 | result = autogen.DocumentationGenerator()._render(water_plant)
500 |
501 | assert "water_plant(self, amount, fertilizer_type=None)" in result
502 | assert "- __amount__ `Union[int, float]`: How much" in result
503 | assert "- __fertilizer_type__ `Optional[str]`: What" in result
504 |
505 |
506 | def hard_method(self, arg: Union[int, Tuple[int, int]], arg2: int = 0) -> int:
507 | """Can we parse this?
508 |
509 | # Arguments
510 | arg: One or two integers.
511 | arg2: One integer.
512 | """
513 | pass
514 |
515 |
516 | def test_hard_method():
517 | generated = autogen.DocumentationGenerator()._render(hard_method)
518 |
519 | assert "- __arg__ `Union[int, Tuple[int, int]]`: One or" in generated
520 | assert "- __arg2__ `int`: One integer." in generated
521 |
522 |
523 | def doing_things(an_argument: dummy_package.DataGenerator):
524 | """A function
525 |
526 | # Arguments
527 | an_argument: Some generator
528 |
529 | """
530 |
531 |
532 | def test_rendinging_with_extra_alias():
533 | extra_aliases = ["tests.dummy_package.DataGenerator"]
534 | generated = autogen.DocumentationGenerator(extra_aliases=extra_aliases)._render(
535 | doing_things)
536 | assert "- __an_argument__ `tests.dummy_package.DataGenerator`: Some" in generated
537 |
538 |
539 | def test_rendinging_with_extra_alias_custom_alias():
540 | extra_aliases = {"tests.dummy_package.dummy_module.ImageDataGenerator":
541 | "some.new.Thing"}
542 | generated = autogen.DocumentationGenerator(extra_aliases=extra_aliases)._render(
543 | doing_things)
544 | assert "- __an_argument__ `some.new.Thing`: Some" in generated
545 |
546 |
547 | @pytest.mark.skipif(
548 | sys.version_info < (3, 7),
549 | reason="the __future__ annotations only works with py37+."
550 | )
551 | def test_future_annotations():
552 | from . import autogen_future
553 | autogen_future.test_rendinging_with_extra_alias()
554 |
555 |
556 | if __name__ == "__main__":
557 | pytest.main([__file__])
558 |
--------------------------------------------------------------------------------
/tests/test_docstring.py:
--------------------------------------------------------------------------------
1 | from keras_autodoc.docstring import process_docstring
2 |
3 |
4 | docstring1 = """This is a docstring
5 |
6 | Some text is here.
7 |
8 | # Arguments
9 | arg1: some argument.
10 | arg2: Some other
11 | argument with a line break.
12 |
13 | Some more text.
14 | """
15 |
16 | expected1 = """This is a docstring
17 |
18 | Some text is here.
19 |
20 | __Arguments__
21 |
22 | - __arg1__: some argument.
23 | - __arg2__: Some other
24 | argument with a line break.
25 |
26 | Some more text.
27 | """
28 |
29 |
30 | def test_generate_docstring_basic():
31 | computed = process_docstring(docstring1)
32 | assert computed == expected1
33 |
34 |
35 | docstring2 = """This is a docstring
36 |
37 | Some text is here.
38 |
39 | # Arguments
40 | arg1: some argument
41 | here written: with colon
42 |
43 | Some more text.
44 | """
45 |
46 | expected2 = """This is a docstring
47 |
48 | Some text is here.
49 |
50 | __Arguments__
51 |
52 | - __arg1__: some argument
53 | here written: with colon
54 |
55 | Some more text.
56 | """
57 |
58 |
59 | def test_generate_docstring_with_colon():
60 | computed = process_docstring(docstring2)
61 | assert computed == expected2
62 |
63 |
64 | docstring3 = """This is a docstring
65 |
66 | Some text is here.
67 |
68 | # A section
69 | Some stuff written
70 |
71 | Some line jump
72 |
73 | Some more text.
74 | """
75 |
76 | expected3 = """This is a docstring
77 |
78 | Some text is here.
79 |
80 | __A section__
81 |
82 | Some stuff written
83 |
84 | Some line jump
85 |
86 | Some more text.
87 | """
88 |
89 |
90 | def test_generate_docstring_line_jump():
91 | computed = process_docstring(docstring3)
92 | assert computed == expected3
93 |
94 |
95 | docstring4 = """This is a docstring
96 |
97 | Some text is here.
98 |
99 | # A section
100 | Some stuff written
101 | Some line jump
102 | Some more text.
103 | """
104 |
105 | expected4 = """This is a docstring
106 |
107 | Some text is here.
108 |
109 | __A section__
110 |
111 | Some stuff written
112 | Some line jump
113 |
114 | Some more text.
115 | """
116 |
117 |
118 | def test_generate_docstring_lines_stuck():
119 | computed = process_docstring(docstring4)
120 | assert computed == expected4
121 |
122 |
123 | docstring5 = """This is a docstring
124 |
125 | Some text is here.
126 |
127 | # A section
128 |
129 | Some stuff written
130 | Some line jump
131 |
132 | Some more text.
133 | """
134 |
135 | expected5 = """This is a docstring
136 |
137 | Some text is here.
138 |
139 | __A section__
140 |
141 | Some stuff written
142 | Some line jump
143 |
144 | Some more text.
145 | """
146 |
147 |
148 | def test_generate_docstring_no_indent():
149 | computed = process_docstring(docstring5)
150 | assert computed == expected5
151 |
152 |
153 | docstring6 = """This is a docstring
154 |
155 | Some text is here.
156 |
157 | # Output shape
158 | nD tensor with shape: `(batch_size, ..., units)`.
159 | For instance, for a 2D input with shape `(batch_size, input_dim)`,
160 | the output would have shape `(batch_size, units)`.
161 | """
162 |
163 | expected6 = """This is a docstring
164 |
165 | Some text is here.
166 |
167 | __Output shape__
168 |
169 | nD tensor with shape: `(batch_size, ..., units)`.
170 | For instance, for a 2D input with shape `(batch_size, input_dim)`,
171 | the output would have shape `(batch_size, units)`.
172 | """
173 |
174 |
175 | def test_generate_docstring_weird_poing_bug():
176 | computed = process_docstring(docstring6)
177 | assert computed == expected6
178 |
--------------------------------------------------------------------------------
/tests/test_gathering_members.py:
--------------------------------------------------------------------------------
1 | from keras_autodoc import get_functions
2 | from keras_autodoc import get_classes
3 | from keras_autodoc import get_methods
4 | from .dummy_package import dummy_module
5 |
6 |
7 | def test_get_module_functions():
8 | expected = {dummy_module.to_categorical}
9 | computed = set(get_functions(dummy_module, return_strings=False))
10 | assert expected == computed
11 |
12 |
13 | def test_get_module_functions_to_str():
14 | expected = {'tests.dummy_package.dummy_module.to_categorical'}
15 | assert set(get_functions(dummy_module)) == expected
16 |
17 |
18 | def test_get_module_functions_from_str_to_str():
19 | expected = {'tests.dummy_package.to_categorical'}
20 | computed = set(get_functions('tests.dummy_package'))
21 | assert computed == expected
22 |
23 |
24 | def test_get_module_classes():
25 | expected = {dummy_module.ImageDataGenerator, dummy_module.Dense}
26 | assert set(get_classes(dummy_module, return_strings=False)) == expected
27 |
28 |
29 | def test_get_module_classes_from_str_to_str():
30 | expected = {'tests.dummy_package.dummy_module.ImageDataGenerator',
31 | 'tests.dummy_package.dummy_module.Dense'}
32 | assert set(get_classes(dummy_module)) == expected
33 |
34 |
35 | def test_get_class_methods():
36 | expected = {
37 | dummy_module.ImageDataGenerator.flow,
38 | dummy_module.ImageDataGenerator.flow_from_directory
39 | }
40 | computed = get_methods(dummy_module.ImageDataGenerator, return_strings=False)
41 | computed = set(computed)
42 | assert computed == expected
43 |
44 |
45 | def test_get_class_methods_aliases():
46 | expected = {
47 | 'tests.dummy_package.DataGenerator.flow',
48 | 'tests.dummy_package.DataGenerator.flow_from_directory'
49 | }
50 | computed = set(get_methods('tests.dummy_package.DataGenerator'))
51 | assert computed == expected
52 |
--------------------------------------------------------------------------------
/tests/test_get_signature.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple, Union
2 |
3 | import pytest
4 |
5 | from keras_autodoc.get_signatures import get_signature_end
6 | from keras_autodoc.get_signatures import get_signature_start
7 | from keras_autodoc.get_signatures import get_class_signature
8 | from keras_autodoc.get_signatures import get_function_signature
9 | from keras_autodoc.get_signatures import get_signature
10 | from keras_autodoc.get_signatures import format_signature
11 |
12 |
13 | def test_get_signature_end():
14 | def some_function(*args, **kwargs):
15 | pass
16 |
17 | expected = '(*args, **kwargs)'
18 | computed = get_signature_end(some_function)
19 | assert expected == computed
20 |
21 |
22 | class Dog:
23 | def woof(self, volume, good_boy: bool = True, name: str = 'doggy') -> int:
24 | pass
25 |
26 | def hard_method(self, arg: Union[int, Tuple[int, int]], arg2: int = 0) -> int:
27 | pass
28 |
29 |
30 | def test_get_signature_end_method():
31 |
32 | expected = "(volume, good_boy=True, name='doggy')"
33 | computed = get_signature_end(Dog.woof)
34 | assert expected == computed
35 |
36 |
37 | def test_get_signature_end_method_hard():
38 |
39 | expected = "(arg, arg2=0)"
40 | result = get_signature_end(Dog.hard_method)
41 | assert expected == result
42 |
43 |
44 | def test_get_signature_start_method():
45 |
46 | expected = "Dog.woof"
47 | computed = get_signature_start(Dog.woof)
48 | assert expected == computed
49 |
50 |
51 | @pytest.mark.parametrize('fn', [get_function_signature, get_signature])
52 | def test_get_function_signature_black(fn):
53 | expected_110 = 'Dog.woof(volume, good_boy=True, name="doggy")'
54 | assert expected_110 == fn(Dog.woof, max_line_length=110)
55 |
56 | expected_40 = ('Dog.woof(\n'
57 | ' volume, good_boy=True, name="doggy"\n'
58 | ')')
59 | assert expected_40 == fn(Dog.woof, max_line_length=40)
60 |
61 | expected_30 = ('Dog.woof(\n'
62 | ' volume,\n'
63 | ' good_boy=True,\n'
64 | ' name="doggy",\n'
65 | ')')
66 | assert expected_30 == fn(Dog.woof, max_line_length=30)
67 |
68 |
69 | class HelloWorld:
70 | def __init__(self):
71 | pass
72 |
73 |
74 | def test_get_class_signature():
75 | expected = 'tests.test_get_signature.HelloWorld()'
76 | computed = get_class_signature(HelloWorld)
77 | assert expected == computed
78 |
79 |
80 | class HelloWorld2:
81 | def __init__(self, one, two, three: int = 3):
82 | pass
83 |
84 |
85 | def test_get_class_signature_with_args():
86 | expected = 'tests.test_get_signature.HelloWorld2(one, two, three=3)'
87 | computed = get_class_signature(HelloWorld2)
88 | assert expected == computed
89 |
90 |
91 | @pytest.mark.parametrize('fn', [get_class_signature, get_signature])
92 | def test_get_class_signature_with_args_black(fn):
93 | expected_110 = 'tests.test_get_signature.HelloWorld2(one, two, three=3)'
94 | assert expected_110 == fn(HelloWorld2, max_line_length=110)
95 |
96 | expected_50 = ('tests.test_get_signature.HelloWorld2(\n'
97 | ' one, two, three=3\n'
98 | ')')
99 | assert expected_50 == fn(HelloWorld2, max_line_length=50)
100 |
101 |
102 | def test_format_signature1():
103 | signature_start = 'hello.world'
104 | signature_end = '(dodo: str = "stuff", dada=(7, 9))'
105 |
106 | expected = signature_start + signature_end
107 | computed = format_signature(signature_start, signature_end)
108 | assert computed == expected
109 |
110 |
111 | def test_format_signature2():
112 | signature_start = 'hello.very.incredibly.large.world'
113 | signature_end = ('(doddodododododo: str = "stuff", '
114 | 'dadadadadadadada: tuple = (7, 9), '
115 | 'dudududududududu=37, '
116 | 'stufffffffffffffff=48)')
117 |
118 | expected = ('hello.very.incredibly.large.world(\n'
119 | ' doddodododododo: str = "stuff",\n'
120 | ' dadadadadadadada: tuple = (7, 9),\n'
121 | ' dudududududududu=37,\n'
122 | ' stufffffffffffffff=48,\n'
123 | ')')
124 |
125 | computed = format_signature(signature_start, signature_end)
126 | assert computed == expected
127 |
--------------------------------------------------------------------------------
/tests/test_integration.py:
--------------------------------------------------------------------------------
1 | from tensorflow.keras import Model
2 | from keras_tuner import HyperParameters
3 | from keras_autodoc.get_signatures import get_function_signature, get_signature_end
4 |
5 |
6 | def test_signature():
7 | excpected = ('Model.compile(\n'
8 | ' optimizer="rmsprop",\n'
9 | ' loss=None,\n'
10 | ' metrics=None,\n'
11 | ' loss_weights=None,\n'
12 | ' weighted_metrics=None,\n'
13 | ' run_eagerly=None,\n'
14 | ' steps_per_execution=None,\n'
15 | ' jit_compile=None,\n'
16 | ' **kwargs\n'
17 | ')')
18 | computed = get_function_signature(Model.compile)
19 | assert computed == excpected
20 |
21 |
22 | def test_wrapping_signature():
23 | expected = '(parent_name, parent_values)'
24 | computed = get_signature_end(HyperParameters.conditional_scope)
25 | assert computed == expected
26 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import io
3 | from keras_autodoc import utils
4 | from . import dummy_package
5 |
6 |
7 | def test_import_object():
8 | assert os.path.join == utils.import_object('os.path.join')
9 | assert io.BytesIO.flush == utils.import_object('io.BytesIO.flush')
10 | assert dummy_package == utils.import_object('tests.dummy_package')
11 |
--------------------------------------------------------------------------------