├── .github └── workflows │ ├── package-scan.yml │ └── python-package.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── beyondml ├── __init__.py ├── pt │ ├── __init__.py │ ├── layers │ │ ├── Conv2D.py │ │ ├── Conv3D.py │ │ ├── Dense.py │ │ ├── FilterLayer.py │ │ ├── MaskedConv2D.py │ │ ├── MaskedConv3D.py │ │ ├── MaskedDense.py │ │ ├── MaskedMultiHeadAttention.py │ │ ├── MaskedTransformerDecoderLayer.py │ │ ├── MaskedTransformerEncoderLayer.py │ │ ├── MultiConv2D.py │ │ ├── MultiConv3D.py │ │ ├── MultiDense.py │ │ ├── MultiMaskedConv2D.py │ │ ├── MultiMaskedConv3D.py │ │ ├── MultiMaskedDense.py │ │ ├── MultiMaxPool2D.py │ │ ├── MultiMaxPool3D.py │ │ ├── MultitaskNormalization.py │ │ ├── SelectorLayer.py │ │ ├── SparseConv2D.py │ │ ├── SparseConv3D.py │ │ ├── SparseDense.py │ │ ├── SparseMultiConv2D.py │ │ ├── SparseMultiConv3D.py │ │ ├── SparseMultiDense.py │ │ └── __init__.py │ └── utils │ │ ├── __init__.py │ │ └── utils.py └── tflow │ ├── __init__.py │ ├── layers │ ├── FilterLayer.py │ ├── MaskedConv2D.py │ ├── MaskedConv3D.py │ ├── MaskedDense.py │ ├── MultiConv2D.py │ ├── MultiConv3D.py │ ├── MultiDense.py │ ├── MultiMaskedConv2D.py │ ├── MultiMaskedConv3D.py │ ├── MultiMaskedDense.py │ ├── MultiMaxPool2D.py │ ├── MultiMaxPool3D.py │ ├── MultitaskNormalization.py │ ├── SelectorLayer.py │ ├── SparseConv2D.py │ ├── SparseConv3D.py │ ├── SparseDense.py │ ├── SparseMultiConv2D.py │ ├── SparseMultiConv3D.py │ ├── SparseMultiDense.py │ ├── SumLayer.py │ └── __init__.py │ └── utils │ ├── __init__.py │ ├── transformer.py │ └── utils.py ├── docs ├── .nojekyll ├── _downloads │ └── 24a622e4623b41fdb0fe92a5595144b2 │ │ └── beyondml.pdf ├── _images │ └── BeyondML_horizontal-color.png ├── _modules │ ├── beyondml │ │ ├── pt │ │ │ ├── layers │ │ │ │ ├── Conv2D.html │ │ │ │ ├── Conv3D.html │ │ │ │ ├── Dense.html │ │ │ │ ├── FilterLayer.html │ │ │ │ ├── MaskedConv2D.html │ │ │ │ ├── MaskedConv3D.html │ │ │ │ ├── MaskedDense.html │ │ │ │ ├── MaskedMultiHeadAttention.html │ │ │ │ ├── MaskedTransformerDecoderLayer.html │ │ │ │ ├── MaskedTransformerEncoderLayer.html │ │ │ │ ├── MultiConv2D.html │ │ │ │ ├── MultiConv3D.html │ │ │ │ ├── MultiDense.html │ │ │ │ ├── MultiMaskedConv2D.html │ │ │ │ ├── MultiMaskedConv3D.html │ │ │ │ ├── MultiMaskedDense.html │ │ │ │ ├── MultiMaxPool2D.html │ │ │ │ ├── MultiMaxPool3D.html │ │ │ │ ├── MultitaskNormalization.html │ │ │ │ ├── SelectorLayer.html │ │ │ │ ├── SparseConv2D.html │ │ │ │ ├── SparseConv3D.html │ │ │ │ ├── SparseDense.html │ │ │ │ ├── SparseMultiConv2D.html │ │ │ │ ├── SparseMultiConv3D.html │ │ │ │ └── SparseMultiDense.html │ │ │ └── utils │ │ │ │ └── utils.html │ │ └── tflow │ │ │ ├── layers │ │ │ ├── FilterLayer.html │ │ │ ├── MaskedConv2D.html │ │ │ ├── MaskedConv3D.html │ │ │ ├── MaskedDense.html │ │ │ ├── MultiConv2D.html │ │ │ ├── MultiConv3D.html │ │ │ ├── MultiDense.html │ │ │ ├── MultiMaskedConv2D.html │ │ │ ├── MultiMaskedConv3D.html │ │ │ ├── MultiMaskedDense.html │ │ │ ├── MultiMaxPool2D.html │ │ │ ├── MultiMaxPool3D.html │ │ │ ├── MultitaskNormalization.html │ │ │ ├── SelectorLayer.html │ │ │ ├── SparseConv2D.html │ │ │ ├── SparseConv3D.html │ │ │ ├── SparseDense.html │ │ │ ├── SparseMultiConv2D.html │ │ │ ├── SparseMultiConv3D.html │ │ │ ├── SparseMultiDense.html │ │ │ └── SumLayer.html │ │ │ └── utils │ │ │ ├── transformer.html │ │ │ └── utils.html │ └── index.html ├── _sources │ ├── beyondml.pt.layers.rst.txt │ ├── beyondml.pt.rst.txt │ ├── beyondml.pt.utils.rst.txt │ ├── beyondml.rst.txt │ ├── beyondml.tflow.layers.rst.txt │ ├── beyondml.tflow.rst.txt │ ├── beyondml.tflow.utils.rst.txt │ ├── index.rst.txt │ └── modules.rst.txt ├── _static │ ├── _sphinx_javascript_frameworks_compat.js │ ├── basic.css │ ├── css │ │ ├── badge_only.css │ │ ├── fonts │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ ├── lato-bold-italic.woff │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── lato-normal-italic.woff2 │ │ │ ├── lato-normal.woff │ │ │ └── lato-normal.woff2 │ │ └── theme.css │ ├── doctools.js │ ├── documentation_options.js │ ├── file.png │ ├── jquery-3.6.0.js │ ├── jquery.js │ ├── js │ │ ├── badge_only.js │ │ ├── html5shiv-printshiv.min.js │ │ ├── html5shiv.min.js │ │ └── theme.js │ ├── language_data.js │ ├── minus.png │ ├── plus.png │ ├── pygments.css │ ├── searchtools.js │ ├── sphinx_highlight.js │ ├── underscore-1.13.1.js │ └── underscore.js ├── beyondml.html ├── beyondml.pt.html ├── beyondml.pt.layers.html ├── beyondml.pt.utils.html ├── beyondml.tflow.html ├── beyondml.tflow.layers.html ├── beyondml.tflow.utils.html ├── genindex.html ├── index.html ├── modules.html ├── objects.inv ├── py-modindex.html ├── search.html └── searchindex.js ├── logo └── BeyondML_horizontal-color.png ├── requirements.txt ├── setup.py ├── sphinx ├── Makefile ├── _build │ ├── doctrees │ │ ├── beyondml.doctree │ │ ├── beyondml.pt.doctree │ │ ├── beyondml.pt.layers.doctree │ │ ├── beyondml.pt.utils.doctree │ │ ├── beyondml.tflow.doctree │ │ ├── beyondml.tflow.layers.doctree │ │ ├── beyondml.tflow.utils.doctree │ │ ├── environment.pickle │ │ ├── index.doctree │ │ └── modules.doctree │ ├── html │ │ ├── .buildinfo │ │ ├── _downloads │ │ │ └── 24a622e4623b41fdb0fe92a5595144b2 │ │ │ │ └── beyondml.pdf │ │ ├── _images │ │ │ └── BeyondML_horizontal-color.png │ │ ├── _modules │ │ │ ├── beyondml │ │ │ │ ├── pt │ │ │ │ │ ├── layers │ │ │ │ │ │ ├── Conv2D.html │ │ │ │ │ │ ├── Conv3D.html │ │ │ │ │ │ ├── Dense.html │ │ │ │ │ │ ├── FilterLayer.html │ │ │ │ │ │ ├── MaskedConv2D.html │ │ │ │ │ │ ├── MaskedConv3D.html │ │ │ │ │ │ ├── MaskedDense.html │ │ │ │ │ │ ├── MaskedMultiHeadAttention.html │ │ │ │ │ │ ├── MaskedTransformerDecoderLayer.html │ │ │ │ │ │ ├── MaskedTransformerEncoderLayer.html │ │ │ │ │ │ ├── MultiConv2D.html │ │ │ │ │ │ ├── MultiConv3D.html │ │ │ │ │ │ ├── MultiDense.html │ │ │ │ │ │ ├── MultiMaskedConv2D.html │ │ │ │ │ │ ├── MultiMaskedConv3D.html │ │ │ │ │ │ ├── MultiMaskedDense.html │ │ │ │ │ │ ├── MultiMaxPool2D.html │ │ │ │ │ │ ├── MultiMaxPool3D.html │ │ │ │ │ │ ├── MultitaskNormalization.html │ │ │ │ │ │ ├── SelectorLayer.html │ │ │ │ │ │ ├── SparseConv2D.html │ │ │ │ │ │ ├── SparseConv3D.html │ │ │ │ │ │ ├── SparseDense.html │ │ │ │ │ │ ├── SparseMultiConv2D.html │ │ │ │ │ │ ├── SparseMultiConv3D.html │ │ │ │ │ │ └── SparseMultiDense.html │ │ │ │ │ └── utils │ │ │ │ │ │ └── utils.html │ │ │ │ └── tflow │ │ │ │ │ ├── layers │ │ │ │ │ ├── FilterLayer.html │ │ │ │ │ ├── MaskedConv2D.html │ │ │ │ │ ├── MaskedConv3D.html │ │ │ │ │ ├── MaskedDense.html │ │ │ │ │ ├── MultiConv2D.html │ │ │ │ │ ├── MultiConv3D.html │ │ │ │ │ ├── MultiDense.html │ │ │ │ │ ├── MultiMaskedConv2D.html │ │ │ │ │ ├── MultiMaskedConv3D.html │ │ │ │ │ ├── MultiMaskedDense.html │ │ │ │ │ ├── MultiMaxPool2D.html │ │ │ │ │ ├── MultiMaxPool3D.html │ │ │ │ │ ├── MultitaskNormalization.html │ │ │ │ │ ├── SelectorLayer.html │ │ │ │ │ ├── SparseConv2D.html │ │ │ │ │ ├── SparseConv3D.html │ │ │ │ │ ├── SparseDense.html │ │ │ │ │ ├── SparseMultiConv2D.html │ │ │ │ │ ├── SparseMultiConv3D.html │ │ │ │ │ ├── SparseMultiDense.html │ │ │ │ │ └── SumLayer.html │ │ │ │ │ └── utils │ │ │ │ │ ├── transformer.html │ │ │ │ │ └── utils.html │ │ │ └── index.html │ │ ├── _sources │ │ │ ├── beyondml.pt.layers.rst.txt │ │ │ ├── beyondml.pt.rst.txt │ │ │ ├── beyondml.pt.utils.rst.txt │ │ │ ├── beyondml.rst.txt │ │ │ ├── beyondml.tflow.layers.rst.txt │ │ │ ├── beyondml.tflow.rst.txt │ │ │ ├── beyondml.tflow.utils.rst.txt │ │ │ ├── index.rst.txt │ │ │ └── modules.rst.txt │ │ ├── _static │ │ │ ├── _sphinx_javascript_frameworks_compat.js │ │ │ ├── basic.css │ │ │ ├── css │ │ │ │ ├── badge_only.css │ │ │ │ ├── fonts │ │ │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ │ │ ├── fontawesome-webfont.eot │ │ │ │ │ ├── fontawesome-webfont.svg │ │ │ │ │ ├── fontawesome-webfont.ttf │ │ │ │ │ ├── fontawesome-webfont.woff │ │ │ │ │ ├── fontawesome-webfont.woff2 │ │ │ │ │ ├── lato-bold-italic.woff │ │ │ │ │ ├── lato-bold-italic.woff2 │ │ │ │ │ ├── lato-bold.woff │ │ │ │ │ ├── lato-bold.woff2 │ │ │ │ │ ├── lato-normal-italic.woff │ │ │ │ │ ├── lato-normal-italic.woff2 │ │ │ │ │ ├── lato-normal.woff │ │ │ │ │ └── lato-normal.woff2 │ │ │ │ └── theme.css │ │ │ ├── doctools.js │ │ │ ├── documentation_options.js │ │ │ ├── file.png │ │ │ ├── jquery-3.6.0.js │ │ │ ├── jquery.js │ │ │ ├── js │ │ │ │ ├── badge_only.js │ │ │ │ ├── html5shiv-printshiv.min.js │ │ │ │ ├── html5shiv.min.js │ │ │ │ └── theme.js │ │ │ ├── language_data.js │ │ │ ├── minus.png │ │ │ ├── plus.png │ │ │ ├── pygments.css │ │ │ ├── searchtools.js │ │ │ ├── sphinx_highlight.js │ │ │ ├── underscore-1.13.1.js │ │ │ └── underscore.js │ │ ├── beyondml.html │ │ ├── beyondml.pt.html │ │ ├── beyondml.pt.layers.html │ │ ├── beyondml.pt.utils.html │ │ ├── beyondml.tflow.html │ │ ├── beyondml.tflow.layers.html │ │ ├── beyondml.tflow.utils.html │ │ ├── genindex.html │ │ ├── index.html │ │ ├── modules.html │ │ ├── objects.inv │ │ ├── py-modindex.html │ │ ├── search.html │ │ └── searchindex.js │ └── latex │ │ ├── BeyondML_horizontal-color.png │ │ ├── LICRcyr2utf8.xdy │ │ ├── LICRlatin2utf8.xdy │ │ ├── LatinRules.xdy │ │ ├── Makefile │ │ ├── beyondml.aux │ │ ├── beyondml.fdb_latexmk │ │ ├── beyondml.fls │ │ ├── beyondml.idx │ │ ├── beyondml.ilg │ │ ├── beyondml.ind │ │ ├── beyondml.out │ │ ├── beyondml.pdf │ │ ├── beyondml.tex │ │ ├── beyondml.toc │ │ ├── latexmkjarc │ │ ├── latexmkrc │ │ ├── make.bat │ │ ├── python.ist │ │ ├── sphinx.sty │ │ ├── sphinx.xdy │ │ ├── sphinxhighlight.sty │ │ ├── sphinxhowto.cls │ │ ├── sphinxlatexadmonitions.sty │ │ ├── sphinxlatexcontainers.sty │ │ ├── sphinxlatexgraphics.sty │ │ ├── sphinxlatexindbibtoc.sty │ │ ├── sphinxlatexlists.sty │ │ ├── sphinxlatexliterals.sty │ │ ├── sphinxlatexnumfig.sty │ │ ├── sphinxlatexobjects.sty │ │ ├── sphinxlatexshadowbox.sty │ │ ├── sphinxlatexstyleheadings.sty │ │ ├── sphinxlatexstylepage.sty │ │ ├── sphinxlatexstyletext.sty │ │ ├── sphinxlatextables.sty │ │ ├── sphinxmanual.cls │ │ ├── sphinxmessages.sty │ │ ├── sphinxoptionsgeometry.sty │ │ ├── sphinxoptionshyperref.sty │ │ ├── sphinxpackageboxes.sty │ │ ├── sphinxpackagecyrillic.sty │ │ └── sphinxpackagefootnote.sty ├── beyondml.pt.layers.rst ├── beyondml.pt.rst ├── beyondml.pt.utils.rst ├── beyondml.rst ├── beyondml.tflow.layers.rst ├── beyondml.tflow.rst ├── beyondml.tflow.utils.rst ├── conf.py ├── images │ └── BeyondML_horizontal-color.png ├── index.rst ├── make.bat └── modules.rst └── tests ├── test_pt.py ├── test_simple.py └── test_tf.py /.github/workflows/package-scan.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies and scan package and all dependencies with pip-audit 2 | 3 | 4 | name: Package scan 5 | 6 | on: 7 | push: 8 | branches: [ "main", "staging" ] 9 | pull_request: 10 | branches: [ "main", "staging" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ["3.8", "3.9", "3.10"] 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip setuptools 30 | python -m pip install flake8 pytest pip-audit 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | if [ -f additional_requirements.txt ]; then pip install -r additional_requirements.txt; fi 33 | python -m pip install ./ 34 | - name: Audit 35 | run: | 36 | pip-audit 37 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ "main", "staging" ] 9 | pull_request: 10 | branches: [ "main", "staging" ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ["3.8", "3.9", "3.10"] 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install flake8 pytest 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | python -m pip install ./ 33 | - name: Lint with flake8 34 | run: | 35 | # stop the build if there are Python syntax errors or undefined names 36 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 37 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 38 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 39 | - name: Test with pytest 40 | run: | 41 | pytest ./ -W ignore::DeprecationWarning 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # Add .DS_Store 132 | *.DS_Store 133 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | education, socio-economic status, nationality, personal appearance, race, 10 | religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at . All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to BeyondML 2 | Thank you very much for your interest in contributing to the BeyondML project! We want to make contributing to this project as easy and transparent as possible, whether you wish to: 3 | 4 | - Report a bug 5 | - Discuss the current state of the code 6 | - Submit a fix 7 | - Propose new features 8 | - Become a maintainer 9 | 10 | ## We Develop with GitHub 11 | We use GitHub to host code, to track issues and feature requests, as well as accept pull requests. Anyone is welcome to submit issues, request features, or provide other feedback on GitHub. Please follow our Code of Conduct when making any requests. 12 | 13 | ## Please Reach Out 14 | If you would like to directly contribute to the development of this project, please reach out to to get started and to learn our project's best practices for development! We thank you for your interest in helping support this project. 15 | 16 | ## Contributing Code 17 | 18 | When contributing code to the BeyondML project, we ask that the following best practices be adhered to: 19 | 20 | ### Branching Structure 21 | 22 | #### Main 23 | The `main` branch is reserved for **production releases** of BeyondML. This branch is not expected to be committed directly to directly, except in extreme circumstances. Additionally, the `main` branch is expected to receive merges only from the `staging` branch. 24 | 25 | #### Staging 26 | The `staging` branch is reserved for **developmental/testing releases** of BeyondML. This branch is set up so that tests are run when code is committed to the branch, helping to ensure that all code that is part of any release is tested. The `staging` branch is expected to receive merges from "version branches." 27 | 28 | #### Version Branches 29 | "Version branches" are branches designed to create functionality to be released in the named version of the branch. These branches are expected to be committed to directly by the core BeyondML team, and other developers are welcome to submit merge requests from their own personal branches. 30 | 31 | ### Testing 32 | When new functionality is introduced, it is expected that tests be created to show that the functionality is working. Please update the files within the `./tests` directory with any tests you create. Additionally, it is very helpful if you check that all tests are passing before committing your code to the version branch. We recommend running the following command in your shell environment, from the top-level directory of the project, to test this: 33 | 34 | ```bash 35 | pytest ./ -W ignore::DeprecationWarning 36 | ``` 37 | 38 | ### Generating Documentation 39 | We utilize [PDoc](https://pdoc.dev/) to generate the documentation for this project. When committing code to this project, specifically when code makes it to the `staging` branch, it is expected that the `./docs` folder be populated with up-to-date documentation for the package. To complete this, we recommend running the following command in your shell environment, from the top-level directory of the project, to generate the documentation: 40 | 41 | ```bash 42 | pdoc -d numpy -o docs ./beyondml 43 | ``` 44 | 45 | We loosely follow [Numpy Documentation Conventions](https://numpydoc.readthedocs.io/en/latest/format.html) for this project, and the preceding line to generate the documentation will parse numpy documentation strings correctly. 46 | 47 | Once again, thank you very much for your willingness and desire to contribute to the BeyondML project! 48 | -------------------------------------------------------------------------------- /beyondml/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | BeyondML (formerly MANN) is a Python package which enables creating sparse multitask artificial neural networks (MANNs) 3 | compatible with [TensorFlow](https://tensorflow.org) and [PyTorch](https://pytorch.org). This package 4 | contains custom layers and utilities to facilitate the training and optimization of models using the 5 | Reduction of Sub-Network Neuroplasticity (RSN2) training procedure developed by [AI Squared, Inc](https://squared.ai). 6 | 7 | ### Installation 8 | 9 | This package is available through [PyPi](https://pypi.org) and can be installed via the following command: 10 | 11 | ```bash 12 | pip install beyondml 13 | ``` 14 | 15 | ### Capabilities 16 | 17 | There are two major subpackages within the BeyondML package, the `beyondml.tflow` and the `beyondml.pt` packages. 18 | The `beyondml.tflow` package contains functionality for building multitask models using TensorFlow, and the 19 | `beyondml.pt` package contains functionality for building multitask models using PyTorch. 20 | """ 21 | 22 | __version__ = '0.1.7' 23 | __dev__ = True 24 | 25 | import beyondml.tflow 26 | import beyondml.pt 27 | -------------------------------------------------------------------------------- /beyondml/pt/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ## PyTorch compatibility for building MANN models 3 | 4 | The `beyondml.pt` subpackage contains layers and utilities for creating and pruning models using [PyTorch](https://pytorch.org). 5 | The package contains two subpackages, the `beyondml.pt.layers` package, and the `beyondml.pt.utils` package. 6 | 7 | Within the `layers` package, there is current functionality for the the following layers: 8 | - `beyondml.pt.layers.Conv2D` 9 | - `beyondml.pt.layers.Dense` 10 | - `beyondml.pt.layers.FilterLayer` 11 | - `beyondml.pt.layers.MaskedConv2D` 12 | - `beyondml.pt.layers.MaskedDense` 13 | - `beyondml.pt.layers.MultiConv2D` 14 | - `beyondml.pt.layers.MultiDense` 15 | - `beyondml.pt.layers.MultiMaskedConv2D` 16 | - `beyondml.pt.layers.MultiMaskedDense` 17 | - `beyondml.pt.layers.SelectorLayer` 18 | - `beyondml.pt.layers.SparseConv2D` 19 | - `beyondml.pt.layers.SparseDense` 20 | - `beyondml.pt.layers.SparseMultiConv2D` 21 | - `beyondml.pt.layers.SparseMultiDense` 22 | 23 | Within the `beyondml.pt.utils` package, there is currently only one function, the `prune_model` function. Because of 24 | the openness of developing with PyTorch in comparison to TensorFlow, there is far less functionality that 25 | can be supplied directly via BeyondML. Instead, for converting models from training to inference, the user 26 | is left to devise the best way to do so by building his or her own classes. 27 | 28 | ### Best Practices for Pruning 29 | In order to use the `utils.prune_model` function, the model itself must have a `.layers` property. This property 30 | is used to determine which layers can be pruned. **Only layers which support pruning and which are included in the 31 | `.layers` property are pruned,** meaning the user can determine which exact layers in the model he or she wants 32 | pruned. Alternatively, the user can create their own pruning function or method on the class itself and prune that way, 33 | utilizing each of the `.prune()` methods of the layers provided. 34 | """ 35 | 36 | import beyondml.pt.layers 37 | import beyondml.pt.utils 38 | -------------------------------------------------------------------------------- /beyondml/pt/layers/Conv2D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Conv2D(torch.nn.Module): 5 | """ 6 | Convolutional 2D layer initialized directly with weights, rather than with hyperparameters 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel, 12 | bias, 13 | padding='same', 14 | strides=1, 15 | device=None, 16 | dtype=None 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | kernel : torch.Tensor or Tensor-like 22 | The kernel tensor to use 23 | bias : torch.Tensor or Tensor-like 24 | The bias tensor to use 25 | padding : int or str (default 'same') 26 | The padding to use 27 | strides : int or tuple (default 1) 28 | The strides to use 29 | """ 30 | factory_kwargs = {'device': device, 'dtype': dtype} 31 | super().__init__() 32 | self.w = torch.nn.Parameter(torch.Tensor(kernel).to(**factory_kwargs)) 33 | self.b = torch.nn.Parameter(torch.Tensor(bias).to(**factory_kwargs)) 34 | 35 | self.padding = padding 36 | self.strides = strides 37 | 38 | def forward( 39 | self, 40 | inputs 41 | ): 42 | """ 43 | Call the layer on input data 44 | 45 | Parameters 46 | ---------- 47 | inputs : torch.Tensor 48 | Inputs to call the layer's logic on 49 | 50 | Returns 51 | ------- 52 | results : torch.Tensor 53 | The results of the layer's logic 54 | """ 55 | 56 | return torch.nn.functional.conv2d( 57 | inputs, 58 | self.w, 59 | self.b, 60 | stride=self.strides, 61 | padding=self.padding 62 | ) 63 | -------------------------------------------------------------------------------- /beyondml/pt/layers/Conv3D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Conv3D(torch.nn.Module): 5 | """ 6 | Convolutional 3D layer initialized directly with weights, rather than with hyperparameters 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel, 12 | bias, 13 | padding='same', 14 | strides=1, 15 | device=None, 16 | dtype=None 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | kernel : torch.Tensor or Tensor-like 22 | The kernel tensor to use 23 | bias : torch.Tensor or Tensor-like 24 | The bias tensor to use 25 | padding : int or str (default 'same') 26 | The padding to use 27 | strides : int or tuple (default 1) 28 | The strides to use 29 | """ 30 | 31 | factory_kwargs = {'device': device, 'dtype': dtype} 32 | super().__init__() 33 | self.w = torch.nn.Parameter(torch.Tensor(kernel).to(**factory_kwargs)) 34 | self.b = torch.nn.Parameter(torch.Tensor(bias).to(**factory_kwargs)) 35 | 36 | self.padding = padding 37 | self.strides = strides 38 | 39 | def forward( 40 | self, 41 | inputs 42 | ): 43 | """ 44 | Call the layer on input data 45 | 46 | Parameters 47 | ---------- 48 | inputs : torch.Tensor 49 | Inputs to call the layer's logic on 50 | 51 | Returns 52 | ------- 53 | results : torch.Tensor 54 | The results of the layer's logic 55 | """ 56 | return torch.nn.functional.conv3d( 57 | inputs, 58 | self.w, 59 | self.b, 60 | stride=self.strides, 61 | padding=self.padding 62 | ) 63 | -------------------------------------------------------------------------------- /beyondml/pt/layers/Dense.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Dense(torch.nn.Module): 5 | """ 6 | Fully-connected layer initialized directly with weights, rather than hyperparameters 7 | """ 8 | 9 | def __init__( 10 | self, 11 | weight, 12 | bias, 13 | device=None, 14 | dtype=None 15 | ): 16 | """ 17 | Parameters 18 | ---------- 19 | weight : torch.Tensor or Tensor-like 20 | The weight matrix to use 21 | bias : torch.Tensor or Tensor-like 22 | The bias vector to use 23 | """ 24 | factory_kwargs = {'device': device, 'dtype': dtype} 25 | super().__init__() 26 | self.w = torch.nn.Parameter(torch.Tensor(weight).to(**factory_kwargs)) 27 | self.b = torch.nn.Parameter(torch.Tensor(bias).to(**factory_kwargs)) 28 | 29 | def forward(self, inputs): 30 | """ 31 | Call the layer on input data 32 | 33 | Parameters 34 | ---------- 35 | inputs : torch.Tensor 36 | Inputs to call the layer's logic on 37 | 38 | Returns 39 | ------- 40 | results : torch.Tensor 41 | The results of the layer's logic 42 | """ 43 | out = torch.mm(inputs, self.w) 44 | out = torch.add(out, self.b) 45 | return out 46 | -------------------------------------------------------------------------------- /beyondml/pt/layers/FilterLayer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class FilterLayer(torch.nn.Module): 5 | """ 6 | Layer which filters input data, either returning values or all zeros depending on state 7 | """ 8 | 9 | def __init__( 10 | self, 11 | is_on=True, 12 | device=None, 13 | dtype=None 14 | ): 15 | """ 16 | Parameters 17 | ---------- 18 | is_on : bool (default False) 19 | Whether the layer is on or off 20 | """ 21 | 22 | super().__init__() 23 | self.is_on = is_on 24 | self.factory_kwargs = {'device': device, 'dtype': dtype} 25 | 26 | @property 27 | def is_on(self): 28 | return self._is_on 29 | 30 | @is_on.setter 31 | def is_on(self, value): 32 | if not isinstance(value, bool): 33 | raise TypeError('is_on must be Boolean') 34 | self._is_on = value 35 | 36 | def forward(self, inputs): 37 | """ 38 | Call the layer on input data 39 | 40 | Parameters 41 | ---------- 42 | inputs : torch.Tensor 43 | Inputs to call the layer's logic on 44 | 45 | Returns 46 | ------- 47 | results : torch.Tensor 48 | The results of the layer's logic 49 | """ 50 | if self.is_on: 51 | return inputs 52 | else: 53 | return torch.zeros_like(inputs, **self.factory_kwargs) 54 | 55 | def turn_on(self): 56 | """ 57 | Turn on the layer 58 | """ 59 | self.is_on = True 60 | 61 | def turn_off(self): 62 | """ 63 | Turn off the layer 64 | """ 65 | self.is_on = False 66 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MaskedConv2D.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | class MaskedConv2D(torch.nn.Module): 6 | """ 7 | Masked 2D Convolutional layer 8 | """ 9 | 10 | def __init__( 11 | self, 12 | in_channels, 13 | out_channels, 14 | kernel_size=3, 15 | padding='same', 16 | strides=1, 17 | device=None, 18 | dtype=None 19 | ): 20 | """ 21 | Parameters 22 | ---------- 23 | in_channels : int 24 | The number of channels for input data 25 | out_channels : int 26 | The number of filters to use 27 | kernel_size : int or tuple (default 3) 28 | The kernel size to use 29 | padding : str or int (default 'same') 30 | Padding to use 31 | strides : int or tuple (default 1) 32 | The number of strides to use 33 | """ 34 | 35 | super().__init__() 36 | factory_kwargs = {'device': device, 'dtype': dtype} 37 | self.in_channels = in_channels 38 | self.out_channels = out_channels 39 | self.kernel_size = kernel_size 40 | self.padding = padding 41 | self.strides = strides 42 | 43 | filters = torch.Tensor( 44 | self.out_channels, 45 | self.in_channels, 46 | self.kernel_size[0], 47 | self.kernel_size[1], 48 | ).to(**factory_kwargs) 49 | filters = torch.nn.init.kaiming_normal_(filters, a=np.sqrt(5)) 50 | self.w = torch.nn.Parameter(filters) 51 | self.register_buffer( 52 | 'w_mask', torch.ones_like(self.w, **factory_kwargs)) 53 | 54 | bias = torch.zeros(out_channels, **factory_kwargs) 55 | self.b = torch.nn.Parameter(bias) 56 | self.register_buffer( 57 | 'b_mask', torch.ones_like(self.b, **factory_kwargs)) 58 | 59 | @property 60 | def in_channels(self): 61 | return self._in_channels 62 | 63 | @in_channels.setter 64 | def in_channels(self, value): 65 | if not isinstance(value, int): 66 | raise TypeError('in_channels must be int') 67 | self._in_channels = value 68 | 69 | @property 70 | def out_channels(self): 71 | return self._out_channels 72 | 73 | @out_channels.setter 74 | def out_channels(self, value): 75 | if not isinstance(value, int): 76 | raise TypeError('out_channels must be int') 77 | self._out_channels = value 78 | 79 | @property 80 | def kernel_size(self): 81 | return self._kernel_size 82 | 83 | @kernel_size.setter 84 | def kernel_size(self, value): 85 | if isinstance(value, int): 86 | value = (value, value) 87 | elif isinstance(value, tuple): 88 | if not all([isinstance(val, int) for val in value]) and len(value) == 2: 89 | raise ValueError('If tuple, kernel_size must be two integers') 90 | else: 91 | raise TypeError('kernel_size must be int or tuple') 92 | self._kernel_size = value 93 | 94 | def forward(self, inputs): 95 | """ 96 | Call the layer on input data 97 | 98 | Parameters 99 | ---------- 100 | inputs : torch.Tensor 101 | Inputs to call the layer's logic on 102 | 103 | Returns 104 | ------- 105 | results : torch.Tensor 106 | The results of the layer's logic 107 | """ 108 | return torch.nn.functional.conv2d( 109 | inputs, 110 | self.w * self.w_mask, 111 | self.b * self.b_mask, 112 | stride=self.strides, 113 | padding=self.padding 114 | ) 115 | 116 | def prune(self, percentile): 117 | """ 118 | Prune the layer by updating the layer's mask 119 | 120 | Parameters 121 | ---------- 122 | percentile : int 123 | Integer between 0 and 99 which represents the proportion of weights to be inactive 124 | 125 | Notes 126 | ----- 127 | Acts on the layer in place 128 | """ 129 | w_copy = np.abs(self.w.detach().cpu().numpy()) 130 | b_copy = np.abs(self.b.detach().cpu().numpy()) 131 | w_percentile = np.percentile(w_copy, percentile) 132 | b_percentile = np.percentile(b_copy, percentile) 133 | 134 | new_w_mask = torch.Tensor( 135 | (w_copy >= w_percentile).astype(int)) 136 | new_b_mask = torch.Tensor( 137 | (b_copy >= b_percentile).astype(int)) 138 | self.w_mask[:] = new_w_mask 139 | self.b_mask[:] = new_b_mask 140 | 141 | self.w = torch.nn.Parameter(self.w.detach() * self.w_mask) 142 | self.b = torch.nn.Parameter(self.b.detach() * self.b_mask) 143 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MaskedDense.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | class MaskedDense(torch.nn.Module): 6 | """ 7 | Masked fully-connected layer 8 | """ 9 | 10 | def __init__( 11 | self, 12 | in_features, 13 | out_features, 14 | device=None, 15 | dtype=None 16 | ): 17 | """ 18 | Parameters 19 | ---------- 20 | in_features : int 21 | The number of features input to the layer 22 | out_features : int 23 | The number of features to be output by the layer. 24 | Also considered the number of artificial neurons 25 | """ 26 | 27 | super().__init__() 28 | factory_kwargs = {'device': device, 'dtype': dtype} 29 | self.in_features = in_features 30 | self.out_features = out_features 31 | 32 | weight = torch.Tensor( 33 | in_features, 34 | out_features, 35 | ).to(**factory_kwargs) 36 | weight = torch.nn.init.kaiming_normal_(weight, a=np.sqrt(5)) 37 | self.w = torch.nn.Parameter(weight) 38 | self.register_buffer( 39 | 'w_mask', torch.ones_like(self.w, **factory_kwargs)) 40 | 41 | bias = torch.zeros(out_features, **factory_kwargs) 42 | self.b = torch.nn.Parameter(bias) 43 | self.register_buffer('b_mask', torch.ones_like(bias, **factory_kwargs)) 44 | 45 | def forward(self, inputs): 46 | """ 47 | Call the layer on input data 48 | 49 | Parameters 50 | ---------- 51 | inputs : torch.Tensor 52 | Inputs to call the layer's logic on 53 | 54 | Returns 55 | ------- 56 | results : torch.Tensor 57 | The results of the layer's logic 58 | """ 59 | weight = self.w * self.w_mask 60 | bias = self.b * self.b_mask 61 | out = torch.matmul(inputs, weight) 62 | out = torch.add(out, bias) 63 | return out 64 | 65 | def prune(self, percentile): 66 | """ 67 | Prune the layer by updating the layer's mask 68 | 69 | Parameters 70 | ---------- 71 | percentile : int 72 | Integer between 0 and 99 which represents the proportion of weights to be inactive 73 | 74 | Notes 75 | ----- 76 | Acts on the layer in place 77 | """ 78 | w_copy = np.abs(self.w.detach().cpu().numpy()) 79 | b_copy = np.abs(self.b.detach().cpu().numpy()) 80 | w_percentile = np.percentile(w_copy, percentile) 81 | b_percentile = np.percentile(b_copy, percentile) 82 | 83 | new_w_mask = torch.Tensor( 84 | (w_copy >= w_percentile).astype(int)) 85 | new_b_mask = torch.Tensor( 86 | (b_copy >= b_percentile).astype(int)) 87 | self.w_mask[:] = new_w_mask 88 | self.b_mask[:] = new_b_mask 89 | 90 | self.w = torch.nn.Parameter( 91 | self.w.detach() * self.w_mask 92 | ) 93 | self.b = torch.nn.Parameter( 94 | self.b.detach() * self.b_mask 95 | ) 96 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MaskedTransformerEncoderLayer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from typing import Optional, Any, Union, Callable 3 | import torch 4 | from torch import Tensor 5 | from torch.nn import Dropout, LayerNorm 6 | from beyondml.pt.layers import MaskedDense 7 | from torch.nn import functional as F 8 | from .MaskedMultiHeadAttention import MaskedMultiHeadAttention 9 | 10 | 11 | class MaskedTransformerEncoderLayer(torch.nn.Module): 12 | """TransformerEncoderLayer is made up of self-attn and feedforward network. 13 | Args: 14 | d_model: the number of expected features in the input (required). 15 | nhead: the number of heads in the multiheadattention models (required). 16 | dim_feedforward: the dimension of the feedforward network model (default=2048). 17 | dropout: the dropout value (default=0.1). 18 | activation: the activation function of the intermediate layer, can be a string 19 | ("relu" or "gelu") or a unary callable. Default: relu 20 | layer_norm_eps: the eps value in layer normalization components (default=1e-5). 21 | batch_first: If ``True``, then the input and output tensors are provided 22 | as (batch, seq, feature). Default: ``False`` (seq, batch, feature). 23 | norm_first: if ``True``, layer norm is done prior to attention and feedforward 24 | operations, respectivaly. Otherwise it's done after. Default: ``False`` (after). 25 | """ 26 | __constants__ = ['batch_first', 'norm_first'] 27 | 28 | def __init__(self, 29 | d_model: int, 30 | nhead: int, 31 | dim_feedforward: int = 2048, 32 | dropout: float = 0.1, 33 | activation: Union[str, Callable[[Tensor], 34 | Tensor]] = torch.nn.functional.relu, 35 | layer_norm_eps: float = 1e-5, 36 | batch_first: bool = False, 37 | norm_first: bool = False, 38 | device=None, 39 | dtype=None 40 | ) -> None: 41 | factory_kwargs = {'device': device, 'dtype': dtype} 42 | super(MaskedTransformerEncoderLayer, self).__init__() 43 | self.self_attn = MaskedMultiHeadAttention( 44 | d_model, 45 | nhead, 46 | dropout=dropout, 47 | batch_first=batch_first, 48 | **factory_kwargs 49 | ) 50 | 51 | # Implementation of Feedforward model 52 | self.linear1 = MaskedDense(d_model, dim_feedforward, **factory_kwargs) 53 | self.dropout = Dropout(dropout) 54 | self.linear2 = MaskedDense(d_model, dim_feedforward, **factory_kwargs) 55 | 56 | self.norm_first = norm_first 57 | self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) 58 | self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) 59 | self.dropout1 = Dropout(dropout) 60 | self.dropout2 = Dropout(dropout) 61 | 62 | def __setstate__(self, state): 63 | super(MaskedTransformerEncoderLayer, self).__setstate__(state) 64 | if not hasattr(self, 'activation'): 65 | self.activation = F.relu 66 | 67 | def forward(self, src: Tensor): 68 | """Pass the input through the encoder layer. 69 | Args: 70 | src: the sequence to the encoder layer (required). 71 | """ 72 | 73 | x = src 74 | 75 | x = self._sa_block(x) 76 | x = self._ff_block(x) 77 | 78 | return x 79 | 80 | # self-attention block 81 | def _sa_block(self, x: Tensor, 82 | attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor: 83 | x = self.self_attn(x, x, x, 84 | attn_mask=attn_mask, 85 | key_padding_mask=key_padding_mask, 86 | need_weights=False)[0] 87 | return self.dropout1(x) 88 | 89 | # feed forward block 90 | def _ff_block(self, x: Tensor) -> Tensor: 91 | x = self.linear2(self.dropout(self.activation(self.linear1(x)))) 92 | return self.dropout2(x) 93 | 94 | def prune(self, percentile): 95 | self.self_attn.prune(percentile) 96 | self.linear1.prune(percentile) 97 | self.linear2.prune(percentile) 98 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultiConv2D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class MultiConv2D(torch.nn.Module): 5 | """ 6 | Multi- 2D Convolutional layer initialized with weights rather than hyperparameters 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel, 12 | bias, 13 | padding='same', 14 | strides=1, 15 | device=None, 16 | dtype=None 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | kernel : torch.Tensor or Tensor-like 22 | The kernel tensor to use 23 | bias : torch.Tensor or Tensor-like 24 | The bias matrix to use 25 | padding : str or int (default 'same') 26 | The padding to use 27 | strides : int or tuple (default 1) 28 | The strides to use 29 | """ 30 | 31 | factory_kwargs = {'device': device, 'dtype': dtype} 32 | super().__init__() 33 | self.w = torch.nn.Parameter(torch.Tensor(kernel).to(**factory_kwargs)) 34 | self.b = torch.nn.Parameter(torch.Tensor(bias).to(**factory_kwargs)) 35 | self.padding = padding 36 | self.strides = strides 37 | 38 | def forward( 39 | self, 40 | inputs 41 | ): 42 | """ 43 | Call the layer on input data 44 | 45 | Parameters 46 | ---------- 47 | inputs : torch.Tensor 48 | Inputs to call the layer's logic on 49 | 50 | Returns 51 | ------- 52 | results : torch.Tensor 53 | The results of the layer's logic 54 | """ 55 | outputs = [] 56 | kernel = self.w 57 | bias = self.b 58 | 59 | for i in range(len(inputs)): 60 | outputs.append( 61 | torch.nn.functional.conv2d( 62 | inputs[i], 63 | kernel[i], 64 | bias[i], 65 | stride=self.strides, 66 | padding=self.padding 67 | ) 68 | ) 69 | return outputs 70 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultiConv3D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class MultiConv3D(torch.nn.Module): 5 | """ 6 | Multitask 3D Convolutional layer initialized with weights rather than with hyperparameters 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel, 12 | bias, 13 | padding='same', 14 | strides=1, 15 | device=None, 16 | dtype=None 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | kernel : torch.Tensor or Tensor-like 22 | The kernel tensor to use 23 | bias : torch.Tensor or Tensor-like 24 | The bias tensor to use 25 | padding : str or int (default 'same') 26 | The padding to use 27 | strides : int or tuple (default 1) 28 | The strides to use 29 | """ 30 | 31 | factory_kwargs = {'device': device, 'dtype': dtype} 32 | super().__init__() 33 | self.w = torch.nn.Parameter( 34 | torch.Tensor(kernel).to(**factory_kwargs) 35 | ) 36 | self.b = torch.nn.Parameter( 37 | torch.Tensor(bias).to(**factory_kwargs) 38 | ) 39 | 40 | self.padding = padding 41 | self.strides = strides 42 | 43 | def forward( 44 | self, 45 | inputs 46 | ): 47 | """ 48 | Call the layer on input data 49 | 50 | Parameters 51 | ---------- 52 | inputs : torch.Tensor 53 | Inputs to call the layer's logic on 54 | 55 | Returns 56 | ------- 57 | results : torch.Tensor 58 | The results of the layer's logic 59 | """ 60 | 61 | outputs = [] 62 | for i in range(len(inputs)): 63 | outputs.append( 64 | torch.nn.functional.conv3d( 65 | inputs[i], 66 | self.w[i], 67 | self.b[i], 68 | stride=self.strides, 69 | padding=self.padding 70 | ) 71 | ) 72 | return outputs 73 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultiDense.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class MultiDense(torch.nn.Module): 5 | """ 6 | Multi-Fully-Connected layer initialized with weights rather than hyperparameters 7 | """ 8 | 9 | def __init__( 10 | self, 11 | weight, 12 | bias, 13 | device=None, 14 | dtype=None 15 | ): 16 | """ 17 | Parameters 18 | ---------- 19 | weight : torch.Tensor or Tensor-like 20 | The weight tensor to use 21 | bias : torch.Tensor or Tensor-like 22 | The bias tensor to use 23 | """ 24 | 25 | factory_kwargs = {'device': device, 'dtype': dtype} 26 | super().__init__() 27 | self.w = torch.nn.Parameter(torch.Tensor(weight).to(**factory_kwargs)) 28 | self.b = torch.nn.Parameter(torch.Tensor(bias).to(**factory_kwargs)) 29 | 30 | def forward(self, inputs): 31 | """ 32 | Call the layer on input data 33 | 34 | Parameters 35 | ---------- 36 | inputs : torch.Tensor 37 | Inputs to call the layer's logic on 38 | 39 | Returns 40 | ------- 41 | results : torch.Tensor 42 | The results of the layer's logic 43 | """ 44 | outputs = [] 45 | for i in range(len(inputs)): 46 | out = torch.mm(inputs[i], self.w[i]) 47 | out = torch.add(out, self.b[i]) 48 | outputs.append(out) 49 | return outputs 50 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultiMaskedDense.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | class MultiMaskedDense(torch.nn.Module): 6 | """ 7 | Multi-Fully-Connected layer which supports masking and pruning 8 | """ 9 | 10 | def __init__( 11 | self, 12 | in_features, 13 | out_features, 14 | num_tasks, 15 | device=None, 16 | dtype=None 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | in_features : int 22 | The number of input features 23 | out_features : int 24 | The number of output features. 25 | Also known as the number of artificial neurons 26 | num_tasks : int 27 | The number of tasks to initialize for 28 | """ 29 | 30 | super().__init__() 31 | factory_kwargs = {'device': device, 'dtype': dtype} 32 | self.in_features = in_features 33 | self.out_features = out_features 34 | self.num_tasks = num_tasks 35 | 36 | weight = torch.Tensor( 37 | num_tasks, 38 | in_features, 39 | out_features 40 | ).to(**factory_kwargs) 41 | weight = torch.nn.init.kaiming_normal_(weight, a=np.sqrt(5)) 42 | self.w = torch.nn.Parameter(weight) 43 | self.register_buffer( 44 | 'w_mask', torch.ones_like(self.w, **factory_kwargs)) 45 | 46 | bias = torch.zeros(num_tasks, out_features, **factory_kwargs) 47 | self.b = torch.nn.Parameter(bias) 48 | self.register_buffer('b_mask', torch.ones_like(bias, **factory_kwargs)) 49 | 50 | def forward(self, inputs): 51 | """ 52 | Call the layer on input data 53 | 54 | Parameters 55 | ---------- 56 | inputs : torch.Tensor 57 | Inputs to call the layer's logic on 58 | 59 | Returns 60 | ------- 61 | results : torch.Tensor 62 | The results of the layer's logic 63 | """ 64 | outputs = [] 65 | for i in range(len(inputs)): 66 | out = torch.mm(inputs[i], self.w[i] * self.w_mask[i]) 67 | out = torch.add(out, self.b[i] * self.b_mask[i]) 68 | outputs.append(out) 69 | return outputs 70 | 71 | def prune(self, percentile): 72 | """ 73 | Prune the layer by updating the layer's mask 74 | 75 | Parameters 76 | ---------- 77 | percentile : int 78 | Integer between 0 and 99 which represents the proportion of weights to be inactive 79 | 80 | Notes 81 | ----- 82 | Acts on the layer in place 83 | """ 84 | w_copy = np.abs(self.w.detach().cpu().numpy()) 85 | b_copy = np.abs(self.b.detach().cpu().numpy()) 86 | new_w_mask = np.zeros_like(w_copy) 87 | new_b_mask = np.zeros_like(b_copy) 88 | 89 | for task_num in range(self.num_tasks): 90 | if task_num != 0: 91 | for prev_idx in range(task_num): 92 | w_copy[task_num][new_w_mask[prev_idx] == 1] = 0 93 | b_copy[task_num][new_b_mask[prev_idx] == 1] = 0 94 | 95 | w_percentile = np.percentile(w_copy[task_num], percentile) 96 | b_percentile = np.percentile(b_copy[task_num], percentile) 97 | 98 | new_w_mask[task_num] = ( 99 | w_copy[task_num] >= w_percentile).astype(int) 100 | new_b_mask[task_num] = ( 101 | b_copy[task_num] >= b_percentile).astype(int) 102 | 103 | self.w_mask[:] = torch.Tensor(new_w_mask) 104 | self.b_mask[:] = torch.Tensor(new_b_mask) 105 | 106 | self.w = torch.nn.Parameter( 107 | self.w.detach() * self.w_mask 108 | ) 109 | self.b = torch.nn.Parameter( 110 | self.b.detach() * self.b_mask 111 | ) 112 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultiMaxPool2D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class MultiMaxPool2D(torch.nn.Module): 5 | """ 6 | Multitask implementation of 2-dimensional Max Pooling layer 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel_size, 12 | stride=None, 13 | padding=0, 14 | dilation=1 15 | ): 16 | """ 17 | Parameters 18 | ---------- 19 | kernel_size : int or tuple 20 | The kernel size to use 21 | stride : int, tuple, or None (default None) 22 | The stride to use. If None, defaults to kernel_size 23 | padding : int (default 0) 24 | The padding to use 25 | dilation : int (default 1) 26 | The dilation to use 27 | """ 28 | 29 | super().__init__() 30 | self.kernel_size = kernel_size 31 | self.stride = stride if stride else self.kernel_size 32 | self.padding = padding 33 | self.dilation = dilation 34 | 35 | def forward(self, inputs): 36 | """ 37 | Call the layer on input data 38 | 39 | Parameters 40 | ---------- 41 | inputs : torch.Tensor 42 | Inputs to call the layer's logic on 43 | 44 | Returns 45 | ------- 46 | results : torch.Tensor 47 | The results of the layer's logic 48 | """ 49 | outputs = [] 50 | for i in range(len(inputs)): 51 | outputs.append( 52 | torch.nn.functional.max_pool2d( 53 | input=inputs[i], 54 | kernel_size=self.kernel_size, 55 | stride=self.stride, 56 | padding=self.padding, 57 | dilation=self.dilation 58 | ) 59 | ) 60 | return outputs 61 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultiMaxPool3D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class MultiMaxPool3D(torch.nn.Module): 5 | """ 6 | Multitask implementation of 2-dimensional Max Pooling layer 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel_size, 12 | stride=None, 13 | padding=0, 14 | dilation=1 15 | ): 16 | """ 17 | Parameters 18 | ---------- 19 | kernel_size : int or tuple 20 | The kernel size to use 21 | stride : int, tuple, or None (default None) 22 | The stride to use. If None, defaults to kernel_size 23 | padding : int (default 0) 24 | The padding to use 25 | dilation : int (default 1) 26 | The dilation to use 27 | """ 28 | self.kernel_size = kernel_size 29 | self.stride = stride if stride else self.kernel_size 30 | self.padding = padding 31 | self.dilation = dilation 32 | 33 | def forward( 34 | self, 35 | inputs 36 | ): 37 | """ 38 | Call the layer on input data 39 | 40 | Parameters 41 | ---------- 42 | inputs : torch.Tensor 43 | Inputs to call the layer's logic on 44 | 45 | Returns 46 | ------- 47 | results : torch.Tensor 48 | The results of the layer's logic 49 | """ 50 | outputs = [] 51 | for i in range(len(inputs)): 52 | outputs.append( 53 | torch.nn.functional.max_pool3d( 54 | input=inputs[i], 55 | kernel_size=self.kernel_size, 56 | stride=self.stride, 57 | padding=self.padding, 58 | dilation=self.dilation 59 | ) 60 | ) 61 | return outputs 62 | -------------------------------------------------------------------------------- /beyondml/pt/layers/MultitaskNormalization.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class MultitaskNormalization(torch.nn.Module): 5 | """ 6 | Layer which normalizes a set of inputs to sum to 1 7 | """ 8 | 9 | def __init__( 10 | self, 11 | device=None, 12 | dtype=None 13 | ): 14 | 15 | super().__init__() 16 | self.factory_kwargs = {'device': device, 'dtype': dtype} 17 | 18 | def forward(self, inputs): 19 | """ 20 | Call the layer on input data 21 | 22 | Parameters 23 | ---------- 24 | inputs : torch.Tensor or list of Tensors 25 | Inputs to call the layer's logic on 26 | 27 | Returns 28 | ------- 29 | results : torch.Tensor or list of Tensors 30 | The results of the layer's logic 31 | """ 32 | s = 0 33 | for i in inputs: 34 | s += 1 35 | return [i / s for i in inputs] 36 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SelectorLayer.py: -------------------------------------------------------------------------------- 1 | from torch.nn import Module 2 | 3 | 4 | class SelectorLayer(Module): 5 | """ 6 | Layer which selects an individual input based on index and only returns that one 7 | """ 8 | 9 | def __init__( 10 | self, 11 | sel_index 12 | ): 13 | """ 14 | Parameters 15 | ---------- 16 | sel_index : int 17 | The index of inputs to select 18 | """ 19 | super().__init__() 20 | self.sel_index = sel_index 21 | 22 | @property 23 | def sel_index(self): 24 | return self._sel_index 25 | 26 | @sel_index.setter 27 | def sel_index(self, value): 28 | if not isinstance(value, int): 29 | raise TypeError('sel_index must be integer-valued') 30 | self._sel_index = value 31 | 32 | def forward(self, inputs): 33 | """ 34 | Call the layer on input data 35 | 36 | Parameters 37 | ---------- 38 | inputs : torch.Tensor 39 | Inputs to call the layer's logic on 40 | 41 | Returns 42 | ------- 43 | results : torch.Tensor 44 | The results of the layer's logic 45 | """ 46 | return inputs[self.sel_index] 47 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SparseConv2D.py: -------------------------------------------------------------------------------- 1 | from numpy import dtype 2 | import torch 3 | 4 | 5 | class SparseConv2D(torch.nn.Module): 6 | """ 7 | Sparse implementation of a 2D Convolutional layer, expected to be converted from a 8 | trained, pruned layer 9 | """ 10 | 11 | def __init__( 12 | self, 13 | kernel, 14 | bias, 15 | padding='same', 16 | strides=1, 17 | device=None, 18 | dtype=None 19 | ): 20 | """ 21 | Parameters 22 | ---------- 23 | kernel : torch.Tensor or Tensor-like 24 | The kernel to use 25 | bias : torch.Tensor or Tensor-like 26 | The bias to use 27 | padding : str or int (default 'same') 28 | The padding to use 29 | strides : int or tuple (default 1) 30 | The padding to use 31 | """ 32 | 33 | factory_kwargs = {'device': device, 'dtype': dtype} 34 | super().__init__() 35 | self.register_buffer('w', torch.Tensor( 36 | kernel).to(**factory_kwargs).to_sparse()) 37 | self.register_buffer('b', torch.Tensor( 38 | bias).to(**factory_kwargs).to_sparse()) 39 | 40 | self.padding = padding 41 | self.strides = strides 42 | 43 | def forward( 44 | self, 45 | inputs 46 | ): 47 | """ 48 | Call the layer on input data 49 | 50 | Parameters 51 | ---------- 52 | inputs : torch.Tensor 53 | Inputs to call the layer's logic on 54 | 55 | Returns 56 | ------- 57 | results : torch.Tensor 58 | The results of the layer's logic 59 | """ 60 | kernel = self.w.to_dense() 61 | bias = self.b.to_dense() 62 | 63 | return torch.nn.functional.conv2d( 64 | inputs, 65 | kernel, 66 | bias, 67 | stride=self.strides, 68 | padding=self.padding 69 | ) 70 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SparseConv3D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SparseConv3D(torch.nn.Module): 5 | """ 6 | Sparse 3D Convolutional layer, expected to be converted from a 7 | trained, pruned layer 8 | """ 9 | 10 | def __init__( 11 | self, 12 | kernel, 13 | bias, 14 | padding='same', 15 | strides=1, 16 | device=None, 17 | dtype=None 18 | ): 19 | """ 20 | Parameters 21 | ---------- 22 | kernel : torch.Tensor or Tensor-like 23 | The kernel to use 24 | bias : torch.Tensor or Tensor-like 25 | The bias to use 26 | padding : str or int (default 'same') 27 | The padding to use 28 | strides : int or tuple (default 1) 29 | The strides to use 30 | """ 31 | 32 | factory_kwargs = {'device': device, 'dtype': dtype} 33 | super().__init__() 34 | self.register_buffer('w', torch.Tensor( 35 | kernel).to(**factory_kwargs).to_sparse()) 36 | self.register_buffer('b', torch.Tensor( 37 | bias).to(**factory_kwargs).to_sparse()) 38 | 39 | self.padding = padding 40 | self.strides = strides 41 | 42 | def forward( 43 | self, 44 | inputs 45 | ): 46 | """ 47 | Call the layer on input data 48 | 49 | Parameters 50 | ---------- 51 | inputs : torch.Tensor 52 | Inputs to call the layer's logic on 53 | 54 | Returns 55 | ------- 56 | results : torch.Tensor 57 | The results of the layer's logic 58 | """ 59 | kernel = self.w.to_dense() 60 | bias = self.b.to_dense() 61 | 62 | return torch.nn.functional.conv3d( 63 | inputs, 64 | kernel, 65 | bias, 66 | stride=self.strides, 67 | padding=self.padding 68 | ) 69 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SparseDense.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SparseDense(torch.nn.Module): 5 | """ 6 | Sparse implementation of a fully-connected layer 7 | """ 8 | 9 | def __init__( 10 | self, 11 | weight, 12 | bias, 13 | device=None, 14 | dtype=None 15 | ): 16 | """ 17 | Parameters 18 | ---------- 19 | weight : torch.Tensor or Tensor-like 20 | The weight to use 21 | bias : torch.Tensor or Tensor-like 22 | The bias to use 23 | """ 24 | 25 | factory_kwargs = {'device': device, 'dtype': dtype} 26 | super().__init__() 27 | self.register_buffer('w', torch.Tensor( 28 | weight).to(**factory_kwargs).to_sparse()) 29 | self.register_buffer('b', torch.Tensor( 30 | bias).to(**factory_kwargs).to_sparse()) 31 | 32 | def forward(self, inputs): 33 | """ 34 | Call the layer on input data 35 | 36 | Parameters 37 | ---------- 38 | inputs : torch.Tensor 39 | Inputs to call the layer's logic on 40 | 41 | Returns 42 | ------- 43 | results : torch.Tensor 44 | The results of the layer's logic 45 | """ 46 | out = torch.sparse.mm(self.w.t(), inputs.t()).t() 47 | out = torch.add(out, self.b.to_dense()) 48 | return out 49 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SparseMultiConv2D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SparseMultiConv2D(torch.nn.Module): 5 | """ 6 | Sparse implementation of a Multi 2D Convolutional layer 7 | """ 8 | 9 | def __init__( 10 | self, 11 | kernel, 12 | bias, 13 | padding='same', 14 | strides=1, 15 | device=None, 16 | dtype=None 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | kernel : torch.Tensor or Tensor-like 22 | The kernel to use 23 | bias : torch.Tensor or Tensor-like 24 | The bias to use 25 | padding : str or int (default 'same') 26 | The padding to use 27 | strides : int or tuple (default 1) 28 | The padding to use 29 | """ 30 | 31 | factory_kwargs = {'device': device, 'dtype': dtype} 32 | super().__init__() 33 | for i in range(kernel.shape[0]): 34 | self.register_buffer( 35 | f'w_{i}', 36 | torch.Tensor(kernel[i]).to(**factory_kwargs).to_sparse() 37 | ) 38 | self.register_buffer( 39 | f'b_{i}', 40 | torch.Tensor(bias[i]) 41 | ) 42 | 43 | self.padding = padding 44 | self.strides = strides 45 | 46 | def forward( 47 | self, 48 | inputs 49 | ): 50 | """ 51 | Call the layer on input data 52 | 53 | Parameters 54 | ---------- 55 | inputs : torch.Tensor 56 | Inputs to call the layer's logic on 57 | 58 | Returns 59 | ------- 60 | results : torch.Tensor 61 | The results of the layer's logic 62 | """ 63 | outputs = [] 64 | 65 | for i in range(len(inputs)): 66 | outputs.append( 67 | torch.nn.functional.conv2d( 68 | inputs[i], 69 | self.get_buffer(f'w_{i}').to_dense(), 70 | self.get_buffer(f'b_{i}').to_dense(), 71 | stride=self.strides, 72 | padding=self.padding 73 | ) 74 | ) 75 | return outputs 76 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SparseMultiConv3D.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SparseMultiConv3D(torch.nn.Module): 5 | """ 6 | Sparse implementation of a Multitask 3D Convolutional layer, expected to be converted from a 7 | trained, pruned layer 8 | """ 9 | 10 | def __init__( 11 | self, 12 | kernel, 13 | bias, 14 | padding='same', 15 | strides=1, 16 | device=None, 17 | dtype=None 18 | ): 19 | """ 20 | Parameters 21 | ---------- 22 | kernel : torch.Tensor or Tensor-like 23 | The kernel to use 24 | bias : torch.Tensor or Tensor-like 25 | The bias to use 26 | padding : str or int (default 'same') 27 | The padding to use 28 | strides : int or tuple (default 1) 29 | The strides to use 30 | """ 31 | 32 | factory_kwargs = {'device': device, 'dtype': dtype} 33 | super().__init__() 34 | for i in range(kernel.shape[0]): 35 | self.register_buffer( 36 | f'w_{i}', 37 | torch.Tensor(kernel[i]).to(**factory_kwargs).to_sparse() 38 | ) 39 | self.register_buffer( 40 | f'b_{i}', 41 | torch.Tensor(bias[i]).to(**factory_kwargs).to_sparse() 42 | ) 43 | 44 | self.padding = padding 45 | self.strides = strides 46 | 47 | def forward( 48 | self, 49 | inputs 50 | ): 51 | """ 52 | Call the layer on input data 53 | 54 | Parameters 55 | ---------- 56 | inputs : torch.Tensor 57 | Inputs to call the layer's logic on 58 | 59 | Returns 60 | ------- 61 | results : torch.Tensor 62 | The results of the layer's logic 63 | """ 64 | 65 | outputs = [] 66 | for i in range(len(inputs)): 67 | outputs.append( 68 | torch.nn.functional.conv3d( 69 | inputs[i], 70 | self.get_buffer(f'w_{i}').to_dense(), 71 | self.get_buffer(f'b_{i}').to_dense(), 72 | stride=self.strides, 73 | padding=self.padding 74 | ) 75 | ) 76 | return outputs 77 | -------------------------------------------------------------------------------- /beyondml/pt/layers/SparseMultiDense.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class SparseMultiDense(torch.nn.Module): 5 | """ 6 | Sparse implementation of the Multi-Fully-Connected layer 7 | """ 8 | 9 | def __init__( 10 | self, 11 | weight, 12 | bias, 13 | device=None, 14 | dtype=None 15 | ): 16 | """ 17 | Parameters 18 | ---------- 19 | weight : torch.Tensor or Tensor-like 20 | The weight to use 21 | bias : torch.Tensor or Tensor-like 22 | The bias to use 23 | """ 24 | 25 | factory_kwargs = {'device': device, 'dtype': dtype} 26 | super().__init__() 27 | for i in range(weight.shape[0]): 28 | self.register_buffer( 29 | f'w_{i}', 30 | torch.Tensor(weight[i]).to(**factory_kwargs).to_sparse() 31 | ) 32 | self.register_buffer( 33 | f'b_{i}', 34 | torch.Tensor(bias[i]).to(**factory_kwargs).to_sparse() 35 | ) 36 | 37 | def forward(self, inputs): 38 | """ 39 | Call the layer on input data 40 | 41 | Parameters 42 | ---------- 43 | inputs : torch.Tensor 44 | Inputs to call the layer's logic on 45 | 46 | Returns 47 | ------- 48 | results : torch.Tensor 49 | The results of the layer's logic 50 | """ 51 | outputs = [] 52 | for i in range(len(inputs)): 53 | out = torch.sparse.mm( 54 | self.get_buffer(f'w_{i}').t(), 55 | inputs[i].t() 56 | ).t() 57 | out = torch.add( 58 | out, 59 | self.get_buffer(f'b_{i}').to_dense() 60 | ) 61 | outputs.append(out) 62 | return outputs 63 | -------------------------------------------------------------------------------- /beyondml/pt/layers/__init__.py: -------------------------------------------------------------------------------- 1 | """Layers compatible with PyTorch models""" 2 | 3 | from .FilterLayer import FilterLayer 4 | from .SelectorLayer import SelectorLayer 5 | from .Dense import Dense 6 | from .Conv2D import Conv2D 7 | from .Conv3D import Conv3D 8 | from .MultiDense import MultiDense 9 | from .MultiConv2D import MultiConv2D 10 | from .MultiConv3D import MultiConv3D 11 | from .MaskedDense import MaskedDense 12 | from .MaskedConv2D import MaskedConv2D 13 | from .MaskedConv3D import MaskedConv3D 14 | from .MultiMaskedDense import MultiMaskedDense 15 | from .MultiMaskedConv2D import MultiMaskedConv2D 16 | from .MultiMaskedConv3D import MultiMaskedConv3D 17 | from .SparseMultiDense import SparseMultiDense 18 | from .SparseMultiConv2D import SparseMultiConv2D 19 | from .SparseMultiConv3D import SparseMultiConv3D 20 | from .SparseDense import SparseDense 21 | from .SparseConv2D import SparseConv2D 22 | from .SparseConv3D import SparseConv3D 23 | from .MultiMaxPool2D import MultiMaxPool2D 24 | from .MultiMaxPool3D import MultiMaxPool3D 25 | from .MaskedTransformerEncoderLayer import MaskedTransformerEncoderLayer 26 | from .MaskedTransformerDecoderLayer import MaskedTransformerDecoderLayer 27 | from .MaskedMultiHeadAttention import MaskedMultiHeadAttention 28 | from .MultitaskNormalization import MultitaskNormalization 29 | -------------------------------------------------------------------------------- /beyondml/pt/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some additional utilities for building MANN models in PyTorch. 3 | """ 4 | 5 | from .utils import prune_model 6 | -------------------------------------------------------------------------------- /beyondml/pt/utils/utils.py: -------------------------------------------------------------------------------- 1 | from beyondml.pt.layers import MaskedConv2D, MaskedConv3D, MaskedDense, MultiMaskedConv2D, MultiMaskedConv3D, MultiMaskedDense, MultitaskNormalization 2 | 3 | 4 | def prune_model(model, percentile): 5 | """ 6 | Prune a compatible model 7 | 8 | Parameters 9 | ---------- 10 | model : PyTorch model 11 | A model that has been developed to have a `.layers` property containing layers to be pruned 12 | percentile : int 13 | An integer between 0 and 99 which corresponds to how much to prune the model 14 | 15 | Returns 16 | ------- 17 | pruned_model : PyTorch model 18 | The pruned model 19 | 20 | Notes 21 | ----- 22 | - The model input **must** have a `.layers` property to be able to function. Only layers within the 23 | `.layers` property which are recognized as prunable are pruned, via their own `.prune()` method 24 | - Also acts on the model in place, but returns the model for ease of use 25 | """ 26 | 27 | compatible_layers = (MaskedConv2D, MaskedConv3D, MaskedDense, 28 | MultiMaskedConv2D, MultiMaskedConv3D, MultiMaskedDense) 29 | 30 | try: 31 | for layer in model.layers: 32 | if isinstance(layer, compatible_layers): 33 | layer.prune(percentile) 34 | except AttributeError: 35 | raise AttributeError('Input model does not have a `.layers` attribute. Please make sure to add that attribute\ 36 | to the model class in order to use this function') 37 | 38 | return model 39 | -------------------------------------------------------------------------------- /beyondml/tflow/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ## TensorFlow compatibility for building MANN models. 3 | 4 | The `beyondml.tflow` package contains two subpackages, `beyondml.tflow.layers` and `beyondml.tflow.utils`, which contain 5 | the functionality to create and train MANN layers within TensorFlow. For individuals who are 6 | familiar with the former name of this package, `mann`, backwards compatibility can be achieved 7 | (assuming only TensorFlow support is needed), by replacing the following line of code: 8 | 9 | >>> import mann 10 | 11 | with the following line: 12 | 13 | >>> import beyondml.tflow as mann 14 | 15 | in all existing scripts. 16 | 17 | Within the `layers` package, there is current functionality for the the following layers: 18 | - `beyondml.tflow.layers.FilterLayer` 19 | - `beyondml.tflow.layers.MaskedConv2D` 20 | - `beyondml.tflow.layers.MaskedDense` 21 | - `beyondml.tflow.layers.MultiConv2D` 22 | - `beyondml.tflow.layers.MultiDense` 23 | - `beyondml.tflow.layers.MultiMaskedConv2D` 24 | - `beyondml.tflow.layers.MultiMaskedDense` 25 | - `beyondml.tflow.layers.MultiMaxPool2D` 26 | - `beyondml.tflow.layers.SelectorLayer` 27 | - `beyondml.tflow.layers.SumLayer` 28 | - `beyondml.tflow.layers.SparseDense` 29 | - `beyondml.tflow.layers.SparseConv` 30 | - `beyondml.tflow.layers.SparseMultiDense` 31 | - `beyondml.tflow.layers.SparseMultiConv` 32 | 33 | **Note that with any of the sparse layers (such as the `SparseDense` layer), any model which 34 | utilizes these layers will not be loadable using the traditional `load_model` functions available 35 | in TensorFlow. Instead, the model should be saved using either joblib or pickle.** 36 | 37 | Within the `utils` package, there are the current functions and classes: 38 | - `ActiveSparsification` 39 | - `build_transformer_block` 40 | - `build_token_position_embedding_block` 41 | - `get_custom_objects` 42 | - `mask_model` 43 | - `remove_layer_masks` 44 | - `add_layer_masks` 45 | - `quantize_model` 46 | - `get_task_masking_gradients` 47 | - `mask_task_weights` 48 | - `train_model_iteratively` 49 | """ 50 | 51 | import beyondml.tflow.layers 52 | import beyondml.tflow.utils 53 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/FilterLayer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.keras.layers import Layer 4 | 5 | 6 | class FilterLayer(Layer): 7 | """ 8 | Layer which filters inputs based on status of `on` or `off` 9 | 10 | Example: 11 | 12 | >>> # Create a model with just a FilterLayer 13 | >>> input_layer = tf.keras.layers.Input(10) 14 | >>> filter_layer = mann.layers.FilterLayer()(input_layer) 15 | >>> model = tf.keras.models.Model(input_layer, filter_layer) 16 | >>> model.compile() 17 | >>> # Call the model with the layer turned on 18 | >>> data = np.arange(10).reshape((1, 10)) 19 | >>> model.predict(data) 20 | array([[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]], dtype=float32) 21 | >>> # Turn off the FilterLayer and call it again 22 | >>> model.layers[-1].turn_off() 23 | >>> # Model must be recompiled after turning the layer on or off 24 | >>> model.compile() 25 | >>> model.predict(data) 26 | array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype=float32) 27 | 28 | """ 29 | 30 | def __init__( 31 | self, 32 | is_on=True, 33 | **kwargs 34 | ): 35 | super(FilterLayer, self).__init__(**kwargs) 36 | self.is_on = is_on 37 | 38 | def call(self, inputs): 39 | """ 40 | This is where the layer's logic lives and is called upon inputs 41 | 42 | Parameters 43 | ---------- 44 | inputs : TensorFlow Tensor or Tensor-like 45 | The inputs to the layer 46 | 47 | Returns 48 | ------- 49 | outputs : TensorFlow Tensor 50 | The outputs of the layer's logic 51 | """ 52 | if self.is_on: 53 | return inputs 54 | else: 55 | return tf.zeros_like(inputs) 56 | 57 | def get_config(self): 58 | config = super().get_config().copy() 59 | config.update({'is_on': self.is_on}) 60 | return config 61 | 62 | def turn_on(self): 63 | """Turn the layer `on` so inputs are returned unchanged as outputs""" 64 | self.is_on = True 65 | 66 | def turn_off(self): 67 | """Turn the layer `off` so inputs are destroyed and all-zero tensors are output""" 68 | self.is_on = False 69 | 70 | @classmethod 71 | def from_config(cls, config): 72 | return cls(**config) 73 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/MultiDense.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.keras.layers import Layer 4 | 5 | 6 | class MultiDense(Layer): 7 | """ 8 | Multitask fully connected layer 9 | 10 | This layer implements multiple stacks of fully connected weights to account for different 11 | ways neurons can activate for various tasks. It is expected that to train using the RSN2 algorithm 12 | that MultiMaskedDense layers be used during training and then those layers be converted to this layer type. 13 | 14 | """ 15 | 16 | def __init__( 17 | self, 18 | units, 19 | use_bias=True, 20 | activation=None, 21 | kernel_initializer='random_normal', 22 | bias_initializer='zeros', 23 | **kwargs 24 | ): 25 | """ 26 | Parameters 27 | ---------- 28 | units : int 29 | The number of artificial neurons to use 30 | use_bias : bool (default True) 31 | Whether to use a bias calculation in the outputs 32 | activation : None, str, or function (default None) 33 | The activation function to use on the outputs 34 | kernel_initializer : str or keras initialization function (default 'random_normal') 35 | The weight initialization function to use 36 | bias_initializer : str or keras initialization function (default 'zeros') 37 | The bias initialization function to use 38 | 39 | """ 40 | super(MultiDense, self).__init__(**kwargs) 41 | self.units = int(units) if not isinstance(units, int) else units 42 | self.use_bias = use_bias 43 | self.activation = tf.keras.activations.get(activation) 44 | self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) 45 | self.bias_initializer = tf.keras.initializers.get(bias_initializer) 46 | 47 | def build(self, input_shape): 48 | """ 49 | Build the layer in preparation to be trained or called. Should not be called directly, 50 | but rather is called when the layer is added to a model 51 | """ 52 | try: 53 | input_shape = [ 54 | tuple(shape.as_list()) for shape in input_shape 55 | ] 56 | except AttributeError: 57 | # Sometimes, input shapes come as tuples already 58 | pass 59 | 60 | simplified_shape = input_shape[0] 61 | 62 | self.w = self.add_weight( 63 | shape=(len(input_shape), simplified_shape[-1], self.units), 64 | initializer=self.kernel_initializer, 65 | trainable=True, 66 | name='weights' 67 | ) 68 | 69 | if self.use_bias: 70 | self.b = self.add_weight( 71 | shape=(len(input_shape), self.units), 72 | initializer=self.bias_initializer, 73 | trainable=True, 74 | name='bias' 75 | ) 76 | 77 | def call(self, inputs): 78 | """ 79 | This is where the layer's logic lives and is called upon inputs 80 | 81 | Parameters 82 | ---------- 83 | inputs : TensorFlow Tensor or Tensor-like 84 | The inputs to the layer 85 | 86 | Returns 87 | ------- 88 | outputs : TensorFlow Tensor 89 | The outputs of the layer's logic 90 | """ 91 | output_tensor = [ 92 | tf.matmul(inputs[i], self.w[i]) for i in range(len(inputs)) 93 | ] 94 | if self.use_bias: 95 | output_tensor = [ 96 | output_tensor[i] + (self.b[i]) for i in range(len(output_tensor)) 97 | ] 98 | return [self.activation(tensor) for tensor in output_tensor] 99 | 100 | def get_config(self): 101 | config = super().get_config().copy() 102 | config.update( 103 | { 104 | 'units': self.units, 105 | 'use_bias': self.use_bias, 106 | 'activation': tf.keras.activations.serialize(self.activation), 107 | 'kernel_initializer': tf.keras.initializers.serialize(self.kernel_initializer), 108 | 'bias_initializer': tf.keras.initializers.serialize(self.bias_initializer) 109 | } 110 | ) 111 | return config 112 | 113 | @classmethod 114 | def from_config(cls, config): 115 | return cls( 116 | units=config['units'], 117 | use_bias=config['use_bias'], 118 | activation=config['activation'], 119 | kernel_initializer=config['kernel_initializer'], 120 | bias_initializer=config['bias_initializer'] 121 | ) 122 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/MultiMaxPool2D.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import Layer 3 | 4 | 5 | class MultiMaxPool2D(Layer): 6 | """ 7 | Multitask Max Pooling Layer. This layer implements the Max Pooling algorithm 8 | across multiple inputs for developing multitask models 9 | 10 | """ 11 | 12 | def __init__( 13 | self, 14 | pool_size=(2, 2), 15 | strides=(1, 1), 16 | padding='same', 17 | **kwargs 18 | ): 19 | """ 20 | Parameters 21 | ---------- 22 | pool_size : integer or tuple of 2 integers (default (2, 2)) 23 | Window size over which to take the maximum 24 | strides : integer or tuple of 2 integers (default (1, 1)) 25 | Stride values to move the pooling window after each step 26 | padding : str (default 'same') 27 | One of either 'same' or 'valid', case-insensitive. The 28 | padding to apply to the inputs 29 | 30 | """ 31 | super(MultiMaxPool2D, self).__init__(**kwargs) 32 | self.pool_size = pool_size 33 | self.strides = strides 34 | self.padding = padding 35 | 36 | def call(self, inputs): 37 | """ 38 | This is where the layer's logic lives and is called upon inputs 39 | 40 | Parameters 41 | ---------- 42 | inputs : TensorFlow Tensor or Tensor-like 43 | The inputs to the layer 44 | 45 | Returns 46 | ------- 47 | outputs : TensorFlow Tensor 48 | The outputs of the layer's logic 49 | """ 50 | return [ 51 | tf.nn.max_pool2d( 52 | inputs[i], 53 | self.pool_size, 54 | self.strides, 55 | self.padding.upper(), 56 | 'NHWC' 57 | ) for i in range(len(inputs)) 58 | ] 59 | 60 | def get_config(self): 61 | config = super().get_config().copy() 62 | config.update( 63 | { 64 | 'pool_size': self.pool_size, 65 | 'strides': self.strides, 66 | 'padding': self.padding 67 | } 68 | ) 69 | return config 70 | 71 | @classmethod 72 | def from_config(cls, config): 73 | return cls( 74 | pool_size=config['pool_size'], 75 | strides=config['strides'], 76 | padding=config['padding'] 77 | ) 78 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/MultiMaxPool3D.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import Layer 3 | 4 | 5 | class MultiMaxPool3D(Layer): 6 | """ 7 | Multitask 3D Max Pooling Layer. This layer implements the Max Pooling 8 | algorithm across multiple inputs for developing multitask models 9 | """ 10 | 11 | def __init__( 12 | self, 13 | pool_size=(3, 3, 3), 14 | strides=(1, 1, 1), 15 | padding='same', 16 | **kwargs 17 | ): 18 | """ 19 | Parameters 20 | ---------- 21 | pool_size : integer or tuple of 3 integers (default (3, 3, 3)) 22 | Window size over which to take the maximum 23 | strides : integer or tuple of 3 integers (default (1, 1, 1)) 24 | Stride values to move the pooling window after each step 25 | padding : str (default 'same') 26 | One of either 'same' or 'valid', case-insensitive. The 27 | padding to apply to the inputs 28 | 29 | """ 30 | super().__init__(**kwargs) 31 | self.pool_size = pool_size 32 | self.strides = strides 33 | self.padding = padding 34 | 35 | def call(self, inputs): 36 | """ 37 | This is where the layer's logic lives and is called upon inputs 38 | 39 | Parameters 40 | ---------- 41 | inputs : TensorFlow Tensor or Tensor-like 42 | The inputs to the layer 43 | 44 | Returns 45 | ------- 46 | outputs : TensorFlow Tensor 47 | The outputs of the layer's logic 48 | """ 49 | return [ 50 | tf.nn.max_pool3d( 51 | input=inputs[i], 52 | ksize=self.pool_size, 53 | strides=self.strides, 54 | padding=self.padding.upper(), 55 | data_format='NDHWC' 56 | ) for i in range(len(inputs)) 57 | ] 58 | 59 | def get_config(self): 60 | config = super().get_config().copy() 61 | config.update( 62 | { 63 | 'pool_size': self.pool_size, 64 | 'strides': self.strides, 65 | 'padding': self.padding 66 | } 67 | ) 68 | return config 69 | 70 | @classmethod 71 | def from_config(cls, config): 72 | return cls( 73 | pool_size=config['pool_size'], 74 | strides=config['strides'], 75 | padding=config['padding'] 76 | ) 77 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/MultitaskNormalization.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | 3 | 4 | class MultitaskNormalization(Layer): 5 | """ 6 | Multitask layer which normalizes all inputs to sum to 1 7 | """ 8 | 9 | def __init__(self, **kwargs): 10 | super().__init__(**kwargs) 11 | 12 | def build(self, input_shape): 13 | pass 14 | 15 | def call(self, inputs): 16 | """ 17 | This is where the layer's logic lives and is called upon inputs 18 | 19 | Parameters 20 | ---------- 21 | inputs : TensorFlow Tensor or Tensor-like 22 | The inputs to the layer 23 | 24 | Returns 25 | ------- 26 | outputs : TensorFlow Tensor 27 | The outputs of the layer's logic 28 | """ 29 | s = 0 30 | for i in inputs: 31 | s += i 32 | return [i / s for i in inputs] 33 | 34 | def get_config(self): 35 | return super().get_config() 36 | 37 | @classmethod 38 | def from_config(cls, config): 39 | return cls(**config) 40 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SelectorLayer.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | 3 | 4 | class SelectorLayer(Layer): 5 | """ 6 | Layer which selects individual inputs 7 | 8 | Example: 9 | 10 | >>> # Create a model with two inputs and one SelectorLayer 11 | >>> input_1 = tf.keras.layers.Input(10) 12 | >>> input_2 = tf.keras.layers.Input(10) 13 | >>> selector = mann.layers.SelectorLayer(1)([input_1, input_2]) # 1 here indicates to select the second input and return it 14 | >>> model = tf.keras.models.Model([input_1, input_2], selector) 15 | >>> model.compile() 16 | >>> # Call the model 17 | >>> data1 = np.arange(10).reshape((1, 10)) 18 | >>> data2 = 2*np.arange(10).reshape((1, 10)) 19 | >>> model.predict([data1, data2]) 20 | array([[ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.]], dtype=float32) 21 | 22 | """ 23 | 24 | def __init__( 25 | self, 26 | sel_index, 27 | **kwargs 28 | ): 29 | """ 30 | Parameters 31 | ---------- 32 | sel_index : int 33 | The index of the inputs to be selected 34 | """ 35 | super(SelectorLayer, self).__init__(**kwargs) 36 | self.sel_index = sel_index 37 | 38 | @property 39 | def sel_index(self): 40 | return self._sel_index 41 | 42 | @sel_index.setter 43 | def sel_index(self, value): 44 | if not isinstance(value, int): 45 | raise TypeError( 46 | f'sel_index must be int, got {value}, type {type(value)}') 47 | self._sel_index = value 48 | 49 | def call(self, inputs): 50 | """ 51 | This is where the layer's logic lives and is called upon inputs 52 | 53 | Parameters 54 | ---------- 55 | inputs : TensorFlow Tensor or Tensor-like 56 | The inputs to the layer 57 | 58 | Returns 59 | ------- 60 | outputs : TensorFlow Tensor 61 | The outputs of the layer's logic 62 | """ 63 | return inputs[self.sel_index] 64 | 65 | def get_config(self): 66 | config = super().get_config().copy() 67 | config.update( 68 | { 69 | 'sel_index': self.sel_index 70 | } 71 | ) 72 | return config 73 | 74 | @classmethod 75 | def from_config(cls, config): 76 | return cls( 77 | sel_index=config['sel_index'] 78 | ) 79 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SparseConv2D.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | import tensorflow as tf 3 | 4 | 5 | class SparseConv2D(Layer): 6 | """ 7 | Sparse implementation of the Convolutional layer. If used in a model, 8 | must be saved and loaded via pickle 9 | """ 10 | 11 | def __init__( 12 | self, 13 | filters, 14 | bias, 15 | padding='same', 16 | strides=1, 17 | activation=None, 18 | **kwargs 19 | ): 20 | """ 21 | Parameters 22 | ---------- 23 | filters : tf.Tensor 24 | The convolutional filters 25 | bias : tf.Tensor 26 | the bias tensor 27 | padding : str, int, or tuple of int (default 'same') 28 | The padding to use 29 | strides : int or tuple of int (default 1) 30 | The strides to use 31 | activation : None, str, or keras activation function (default None) 32 | The activation function to use 33 | """ 34 | super().__init__(**kwargs) 35 | self.w = tf.sparse.from_dense(filters) 36 | self.b = tf.sparse.from_dense(bias) 37 | self.padding = padding 38 | self.strides = strides 39 | self.activation = tf.keras.activations.get(activation) 40 | 41 | def build(self, input_shape): 42 | """ 43 | Build the layer in preparation to be trained or called. Should not be called directly, 44 | but rather is called when the layer is added to a model 45 | """ 46 | pass 47 | 48 | def call(self, inputs): 49 | """ 50 | This is where the layer's logic lives and is called upon inputs 51 | 52 | Parameters 53 | ---------- 54 | inputs : TensorFlow Tensor or Tensor-like 55 | The inputs to the layer 56 | 57 | Returns 58 | ------- 59 | outputs : TensorFlow Tensor 60 | The outputs of the layer's logic 61 | """ 62 | conv_output = tf.nn.convolution( 63 | inputs, 64 | tf.sparse.to_dense(self.w), 65 | padding=self.padding.upper() if isinstance( 66 | self.padding, str) else self.padding, 67 | strides=self.strides, 68 | data_format='NHWC' 69 | ) 70 | conv_output = conv_output + tf.sparse.to_dense(self.b) 71 | return self.activation(conv_output) 72 | 73 | def get_config(self): 74 | config = super().get_config().copy() 75 | config.update( 76 | { 77 | 'padding': self.padding, 78 | 'strides': self.strides, 79 | 'activation': tf.keras.activations.serialize(self.activation) 80 | } 81 | ) 82 | return config 83 | 84 | @classmethod 85 | def from_layer(cls, layer): 86 | """ 87 | Create a layer from an instance of another layer 88 | """ 89 | weights = layer.get_weights() 90 | w = weights[0] 91 | b = weights[1] 92 | padding = layer.padding 93 | strides = layer.strides 94 | activation = layer.activation 95 | return cls( 96 | w, 97 | b, 98 | padding, 99 | strides, 100 | activation 101 | ) 102 | 103 | @classmethod 104 | def from_config(cls, config): 105 | return cls(**config) 106 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SparseConv3D.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | import tensorflow as tf 3 | 4 | 5 | class SparseConv3D(Layer): 6 | """ 7 | Sparse implementation of the Convolutional layer. If used in a model, 8 | must be saved and loaded via pickle 9 | """ 10 | 11 | def __init__( 12 | self, 13 | filters, 14 | bias, 15 | padding='same', 16 | strides=1, 17 | activation=None, 18 | **kwargs 19 | ): 20 | """ 21 | Parameters 22 | ---------- 23 | filters : tf.Tensor 24 | The convolutional filters 25 | bias : tf.Tensor 26 | the bias tensor 27 | padding : str, int, or tuple of int (default 'same') 28 | The padding to use 29 | strides : int or tuple of int (default 1) 30 | The strides to use 31 | activation : None, str, or keras activation function (default None) 32 | The activation function to use 33 | """ 34 | super().__init__(**kwargs) 35 | self.w = tf.sparse.from_dense(filters) 36 | self.b = tf.sparse.from_dense(bias) 37 | self.padding = padding 38 | self.strides = strides 39 | self.activation = tf.keras.activations.get(activation) 40 | 41 | def build(self, input_shape): 42 | """ 43 | Build the layer in preparation to be trained or called. Should not be called directly, 44 | but rather is called when the layer is added to a model 45 | """ 46 | pass 47 | 48 | def call(self, inputs): 49 | """ 50 | This is where the layer's logic lives and is called upon inputs 51 | 52 | Parameters 53 | ---------- 54 | inputs : TensorFlow Tensor or Tensor-like 55 | The inputs to the layer 56 | 57 | Returns 58 | ------- 59 | outputs : TensorFlow Tensor 60 | The outputs of the layer's logic 61 | """ 62 | conv_output = tf.nn.convolution( 63 | inputs, 64 | tf.sparse.to_dense(self.w), 65 | padding=self.padding.upper() if isinstance( 66 | self.padding, str) else self.padding, 67 | strides=self.strides, 68 | data_format='NDHWC' 69 | ) 70 | conv_output = conv_output + tf.sparse.to_dense(self.b) 71 | return self.activation(conv_output) 72 | 73 | def get_config(self): 74 | config = super().get_config().copy() 75 | config.update( 76 | { 77 | 'padding': self.padding, 78 | 'strides': self.strides, 79 | 'activation': tf.keras.activations.serialize(self.activation) 80 | } 81 | ) 82 | return config 83 | 84 | @classmethod 85 | def from_layer(cls, layer): 86 | """ 87 | Create a layer from an instance of another layer 88 | """ 89 | weights = layer.get_weights() 90 | w = weights[0] 91 | b = weights[1] 92 | padding = layer.padding 93 | strides = layer.strides 94 | activation = layer.activation 95 | return cls( 96 | w, 97 | b, 98 | padding, 99 | strides, 100 | activation 101 | ) 102 | 103 | @classmethod 104 | def from_config(cls, config): 105 | return cls(**config) 106 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SparseDense.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | import tensorflow as tf 3 | 4 | 5 | class SparseDense(Layer): 6 | """ 7 | Sparse implementation of the Dense layer. If used in a model, must be saved and loaded via pickle 8 | """ 9 | 10 | def __init__( 11 | self, 12 | weight, 13 | bias, 14 | activation=None, 15 | **kwargs 16 | ): 17 | """ 18 | Parameters 19 | ---------- 20 | weight : tf.Tensor 21 | The kernel tensor 22 | bias : tf.Tensor 23 | The bias tensor 24 | activation : None, str, or keras activation function (default None) 25 | The activation function to use 26 | """ 27 | super().__init__(**kwargs) 28 | self.w = tf.sparse.from_dense(weight) 29 | self.b = tf.sparse.from_dense(bias) 30 | self.activation = tf.keras.activations.get(activation) 31 | 32 | def build(self, input_shape): 33 | """ 34 | Build the layer in preparation to be trained or called. Should not be called directly, 35 | but rather is called when the layer is added to a model 36 | """ 37 | pass 38 | 39 | def call(self, inputs): 40 | """ 41 | This is where the layer's logic lives and is called upon inputs 42 | 43 | Parameters 44 | ---------- 45 | inputs : TensorFlow Tensor or Tensor-like 46 | The inputs to the layer 47 | 48 | Returns 49 | ------- 50 | outputs : TensorFlow Tensor 51 | The outputs of the layer's logic 52 | """ 53 | return self.activation( 54 | tf.sparse.sparse_dense_matmul( 55 | inputs, 56 | self.w 57 | ) + tf.sparse.to_dense(self.b) 58 | ) 59 | 60 | def get_config(self): 61 | config = super().get_config().copy() 62 | config['activation'] = tf.keras.activations.serialize(self.activation) 63 | return config 64 | 65 | @classmethod 66 | def from_layer(cls, layer): 67 | """ 68 | Create a layer from an instance of another layer 69 | """ 70 | weights = layer.get_weights() 71 | w = weights[0] 72 | b = weights[1] 73 | activation = layer.activation 74 | return cls( 75 | w, 76 | b, 77 | activation 78 | ) 79 | 80 | @classmethod 81 | def from_config(cls, config): 82 | return cls(**config) 83 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SparseMultiConv2D.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | import tensorflow as tf 3 | 4 | 5 | class SparseMultiConv2D(Layer): 6 | """ 7 | Sparse implementation of the MultiConv layer. If used in a model, must be saved and loaded via pickle 8 | """ 9 | 10 | def __init__( 11 | self, 12 | filters, 13 | bias, 14 | padding='same', 15 | strides=1, 16 | activation=None, 17 | **kwargs 18 | ): 19 | """ 20 | Parameters 21 | ---------- 22 | filters : tf.Tensor 23 | The convolutional filters 24 | bias : tf.Tensor 25 | the bias tensor 26 | padding : str, int, or tuple of int (default 'same') 27 | The padding to use 28 | strides : int or tuple of int (default 1) 29 | The strides to use 30 | activation : None, str, or keras activation function (default None) 31 | The activation function to use 32 | """ 33 | super().__init__(**kwargs) 34 | self.w = { 35 | i: tf.sparse.from_dense(filters[i]) for i in range(filters.shape[0]) 36 | } 37 | self.b = { 38 | i: tf.sparse.from_dense(bias[i]) for i in range(bias.shape[0]) 39 | } 40 | self.padding = padding 41 | self.strides = strides 42 | self.activation = tf.keras.activations.get(activation) 43 | 44 | def build(self, input_shapes): 45 | """ 46 | Build the layer in preparation to be trained or called. Should not be called directly, 47 | but rather is called when the layer is added to a model 48 | """ 49 | pass 50 | 51 | def call(self, inputs): 52 | """ 53 | This is where the layer's logic lives and is called upon inputs 54 | 55 | Parameters 56 | ---------- 57 | inputs : TensorFlow Tensor or Tensor-like 58 | The inputs to the layer 59 | 60 | Returns 61 | ------- 62 | outputs : TensorFlow Tensor 63 | The outputs of the layer's logic 64 | """ 65 | 66 | conv_outputs = [ 67 | tf.nn.convolution( 68 | inputs[i], 69 | tf.sparse.to_dense(self.w[i]), 70 | padding=self.padding.upper() if isinstance( 71 | self.padding, str) else self.padding, 72 | strides=self.strides, 73 | data_format='NHWC' 74 | ) for i in range(len(inputs)) 75 | ] 76 | conv_outputs = [ 77 | conv_outputs[i] + tf.sparse.to_dense(self.b[i]) for i in range(len(conv_outputs)) 78 | ] 79 | return [ 80 | self.activation(output) for output in conv_outputs 81 | ] 82 | 83 | def get_config(self): 84 | config = super().get_config().copy() 85 | config.update( 86 | { 87 | 'padding': self.padding, 88 | 'strides': self.strides, 89 | 'activation': tf.keras.activations.serialize(self.activation) 90 | } 91 | ) 92 | return config 93 | 94 | @classmethod 95 | def from_layer(cls, layer): 96 | """ 97 | Create a layer from an instance of another layer 98 | """ 99 | weights = layer.get_weights() 100 | w = weights[0] 101 | b = weights[1] 102 | padding = layer.padding 103 | strides = layer.strides 104 | activation = layer.activation 105 | return cls( 106 | w, 107 | b, 108 | padding, 109 | strides, 110 | activation 111 | ) 112 | 113 | @classmethod 114 | def from_config(cls, config): 115 | return cls(**config) 116 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SparseMultiConv3D.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | import tensorflow as tf 3 | 4 | 5 | class SparseMultiConv3D(Layer): 6 | """ 7 | Sparse implementation of the MultiConv layer. If used in a model, must be saved and loaded via pickle 8 | """ 9 | 10 | def __init__( 11 | self, 12 | filters, 13 | bias, 14 | padding='same', 15 | strides=1, 16 | activation=None, 17 | **kwargs 18 | ): 19 | """ 20 | Parameters 21 | ---------- 22 | filters : tf.Tensor 23 | The convolutional filters 24 | bias : tf.Tensor 25 | the bias tensor 26 | padding : str, int, or tuple of int (default 'same') 27 | The padding to use 28 | strides : int or tuple of int (default 1) 29 | The strides to use 30 | activation : None, str, or keras activation function (default None) 31 | The activation function to use 32 | """ 33 | super().__init__(**kwargs) 34 | self.w = { 35 | i: tf.sparse.from_dense(filters[i]) for i in range(filters.shape[0]) 36 | } 37 | self.b = { 38 | i: tf.sparse.from_dense(bias[i]) for i in range(bias.shape[0]) 39 | } 40 | self.padding = padding 41 | self.strides = strides 42 | self.activation = tf.keras.activations.get(activation) 43 | 44 | def build(self, input_shape): 45 | """ 46 | Build the layer in preparation to be trained or called. Should not be called directly, 47 | but rather is called when the layer is added to a model 48 | """ 49 | pass 50 | 51 | def call(self, inputs): 52 | """ 53 | This is where the layer's logic lives and is called upon inputs 54 | 55 | Parameters 56 | ---------- 57 | inputs : TensorFlow Tensor or Tensor-like 58 | The inputs to the layer 59 | 60 | Returns 61 | ------- 62 | outputs : TensorFlow Tensor 63 | The outputs of the layer's logic 64 | """ 65 | 66 | conv_outputs = [ 67 | tf.nn.convolution( 68 | inputs[i], 69 | tf.sparse.to_dense(self.w[i]), 70 | padding=self.padding.upper() if isinstance( 71 | self.padding, str) else self.padding, 72 | strides=self.strides, 73 | data_format='NDHWC' 74 | ) for i in range(len(inputs)) 75 | ] 76 | conv_outputs = [ 77 | conv_outputs[i] + tf.sparse.to_dense(self.b[i]) for i in range(len(conv_outputs)) 78 | ] 79 | return [ 80 | self.activation(output) for output in conv_outputs 81 | ] 82 | 83 | def get_config(self): 84 | config = super().get_config().copy() 85 | config.update( 86 | { 87 | 'padding': self.padding, 88 | 'strides': self.strides, 89 | 'activation': tf.keras.activations.serialize(self.activation) 90 | } 91 | ) 92 | return config 93 | 94 | @classmethod 95 | def from_layer(cls, layer): 96 | """ 97 | Create a layer from an instance of another layer 98 | """ 99 | weights = layer.get_weights() 100 | w = weights[0] 101 | b = weights[1] 102 | padding = layer.padding 103 | strides = layer.strides 104 | activation = layer.activation 105 | return cls( 106 | w, 107 | b, 108 | padding, 109 | strides, 110 | activation 111 | ) 112 | 113 | @classmethod 114 | def from_config(cls, config): 115 | return cls(**config) 116 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SparseMultiDense.py: -------------------------------------------------------------------------------- 1 | from tensorflow.keras.layers import Layer 2 | import tensorflow as tf 3 | 4 | 5 | class SparseMultiDense(Layer): 6 | """ 7 | Sparse implementation of the MultiDense layer. If used in a model, must be saved and loaded via pickle 8 | """ 9 | 10 | def __init__( 11 | self, 12 | weight, 13 | bias, 14 | activation=None, 15 | **kwargs 16 | ): 17 | """ 18 | Parameters 19 | ---------- 20 | weight : tf.Tensor 21 | The kernel tensor 22 | bias : tf.Tensor 23 | The bias tensor 24 | activation : None, str or keras activation function (default None) 25 | The activation function to use 26 | 27 | """ 28 | super().__init__(**kwargs) 29 | self.w = { 30 | i: tf.sparse.from_dense(weight[i]) for i in range(weight.shape[0]) 31 | } 32 | self.b = { 33 | i: tf.sparse.from_dense(bias[i]) for i in range(bias.shape[0]) 34 | } 35 | self.activation = tf.keras.activations.get(activation) 36 | 37 | def build(self, input_shape): 38 | """ 39 | Build the layer in preparation to be trained or called. Should not be called directly, 40 | but rather is called when the layer is added to a model 41 | """ 42 | pass 43 | 44 | def call(self, inputs): 45 | """ 46 | This is where the layer's logic lives and is called upon inputs 47 | 48 | Parameters 49 | ---------- 50 | inputs : TensorFlow Tensor or Tensor-like 51 | The inputs to the layer 52 | 53 | Returns 54 | ------- 55 | outputs : TensorFlow Tensor 56 | The outputs of the layer's logic 57 | """ 58 | 59 | output_tensor = [ 60 | tf.matmul(inputs[i], tf.sparse.to_dense(self.w[i])) + tf.sparse.to_dense(self.b[i]) for i in range(len(inputs)) 61 | ] 62 | return [ 63 | self.activation(tensor) for tensor in output_tensor 64 | ] 65 | 66 | def get_config(self): 67 | config = super().get_config().copy() 68 | config['activation'] = tf.keras.activations.serialize(self.activation) 69 | return config 70 | 71 | @classmethod 72 | def from_layer(cls, layer): 73 | """ 74 | Create a layer from an instance of another layer 75 | """ 76 | weights = layer.get_weights() 77 | w = weights[0] 78 | b = weights[1] 79 | activation = layer.activation 80 | return cls( 81 | w, 82 | b, 83 | activation 84 | ) 85 | 86 | @classmethod 87 | def from_config(cls, config): 88 | return cls(**config) 89 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/SumLayer.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.layers import Layer 3 | 4 | 5 | class SumLayer(Layer): 6 | """ 7 | Layer which adds all inputs together. All inputs must have compatible shapes 8 | 9 | Example: 10 | 11 | >>> # Create a model with just a SumLayer and two inputs 12 | >>> input_1 = tf.keras.layers.Input(10) 13 | >>> input_2 = tf.keras.layers.Input(10) 14 | >>> sum_layer = mann.layers.SumLayer()([input_1, input_2]) 15 | >>> model = tf.keras.models.Model([input_1, input_2], sum_layer) 16 | >>> model.compile() 17 | >>> # Call the model 18 | >>> data = np.arange(10).reshape((1, 10)) 19 | >>> model.predict([data, data]) 20 | array([[ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.]], dtype=float32) 21 | 22 | """ 23 | 24 | def __init__(self, **kwargs): 25 | super(SumLayer, self).__init__(**kwargs) 26 | 27 | def call(self, inputs): 28 | """ 29 | This is where the layer's logic lives and is called upon inputs 30 | 31 | Parameters 32 | ---------- 33 | inputs : TensorFlow Tensor or Tensor-like 34 | The inputs to the layer 35 | 36 | Returns 37 | ------- 38 | outputs : TensorFlow Tensor 39 | The outputs of the layer's logic 40 | """ 41 | return tf.add_n(inputs) 42 | 43 | def get_config(self): 44 | return super().get_config().copy() 45 | 46 | @classmethod 47 | def from_config(cls, config): 48 | return cls(**config) 49 | -------------------------------------------------------------------------------- /beyondml/tflow/layers/__init__.py: -------------------------------------------------------------------------------- 1 | """Custom layers to use when building MANN models""" 2 | 3 | from .MultitaskNormalization import MultitaskNormalization 4 | from .FilterLayer import FilterLayer 5 | from .SumLayer import SumLayer 6 | from .MaskedDense import MaskedDense 7 | from .MaskedConv2D import MaskedConv2D 8 | from .MaskedConv3D import MaskedConv3D 9 | from .SelectorLayer import SelectorLayer 10 | from .MultiMaskedDense import MultiMaskedDense 11 | from .MultiMaskedConv2D import MultiMaskedConv2D 12 | from .MultiMaskedConv3D import MultiMaskedConv3D 13 | from .MultiDense import MultiDense 14 | from .MultiConv2D import MultiConv2D 15 | from .MultiConv3D import MultiConv3D 16 | from .MultiMaxPool2D import MultiMaxPool2D 17 | from .MultiMaxPool3D import MultiMaxPool3D 18 | from .SparseDense import SparseDense 19 | from .SparseConv2D import SparseConv2D 20 | from .SparseConv3D import SparseConv3D 21 | from .SparseMultiDense import SparseMultiDense 22 | from .SparseMultiConv2D import SparseMultiConv2D 23 | from .SparseMultiConv3D import SparseMultiConv3D 24 | -------------------------------------------------------------------------------- /beyondml/tflow/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Some utilities to use when building, loading, and training MANN models""" 2 | 3 | from .utils import get_custom_objects, mask_model, replace_weights, replace_config, add_layer_masks, quantize_model, get_task_masking_gradients, mask_task_weights, train_model_iteratively, train_model, ActiveSparsification 4 | from .transformer import build_token_position_embedding_block, build_transformer_block 5 | -------------------------------------------------------------------------------- /beyondml/tflow/utils/transformer.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from beyondml.tflow.layers import MultiDense, SelectorLayer 3 | 4 | 5 | def build_transformer_block( 6 | input_shape, 7 | embed_dim, 8 | num_heads, 9 | neurons, 10 | dropout_rate=0.1, 11 | ): 12 | """ 13 | Build a Transformer Block 14 | 15 | Parameters 16 | ---------- 17 | input_shape : int or tuple of int 18 | The input shape for the model to use 19 | embed_dim : int 20 | The dimension of the embedding 21 | num_heads : int 22 | The number of attention heads to use 23 | neurons : int 24 | The number of hidden neurons to use in the hidden layer 25 | dropout_rate : float (default 0.1) 26 | Rate at which dropout is applied 27 | value_dim : int or None (default None) 28 | The dimension to use for the `value` matrix, if provided 29 | 30 | Returns 31 | ------- 32 | transformer_block : TensorFlow keras Functional model 33 | The transformer block, which can then be used alone or as 34 | a layer in another model 35 | """ 36 | input_layer = tf.keras.layers.Input(input_shape) 37 | query = MultiDense(embed_dim)([input_layer] * num_heads) 38 | key = MultiDense(embed_dim)([input_layer] * num_heads) 39 | value = MultiDense(embed_dim)([input_layer] * num_heads) 40 | 41 | query_selectors = [ 42 | SelectorLayer(i)(query) for i in range(num_heads) 43 | ] 44 | key_selectors = [ 45 | SelectorLayer(i)(key) for i in range(num_heads) 46 | ] 47 | value_selectors = [ 48 | SelectorLayer(i)(value) for i in range(num_heads) 49 | ] 50 | attention_layers = [ 51 | tf.keras.layers.Attention()([query_selectors[i], key_selectors[i], value_selectors[i]]) for i in range(num_heads) 52 | ] 53 | concat = tf.keras.layers.Concatenate()(attention_layers) 54 | merge = tf.keras.layers.Reshape((input_shape[0], -1))(concat) 55 | 56 | x = tf.keras.layers.Dropout(dropout_rate)(merge) 57 | out1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x) 58 | x = tf.keras.layers.Dense(neurons, activation='relu')(out1) 59 | x = tf.keras.layers.Dense(embed_dim * num_heads)(x) 60 | x = tf.keras.layers.Dropout(dropout_rate)(x) 61 | x = tf.keras.layers.Add()([out1, x]) 62 | output_layer = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x) 63 | 64 | return tf.keras.models.Model(input_layer, output_layer) 65 | 66 | 67 | def build_token_position_embedding_block( 68 | sequence_length, 69 | vocab_size, 70 | embed_dim 71 | ): 72 | """ 73 | Builds a token and position embedding block 74 | 75 | Parameters 76 | ---------- 77 | sequence_length : int 78 | The length of each sequence 79 | vocab_size : int 80 | The size of the vocabulary used 81 | embed_dim : int 82 | The desired embedding dimension 83 | 84 | Returns 85 | ------- 86 | embedding_block : TensorFlow keras Functional model 87 | The embedding block, which can be used alone or 88 | as a layer in another model 89 | """ 90 | tok_input = tf.keras.layers.Input(sequence_length) 91 | pos_input = tf.keras.layers.Input(sequence_length) 92 | 93 | tok_embed = tf.keras.layers.Embedding( 94 | vocab_size, output_dim=embed_dim)(tok_input) 95 | pos_embed = tf.keras.layers.Embedding( 96 | sequence_length, output_dim=embed_dim)(pos_input) 97 | output_layer = tf.keras.layers.Add()([tok_embed, pos_embed]) 98 | 99 | return tf.keras.models.Model([tok_input, pos_input], output_layer) 100 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/.nojekyll -------------------------------------------------------------------------------- /docs/_downloads/24a622e4623b41fdb0fe92a5595144b2/beyondml.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_downloads/24a622e4623b41fdb0fe92a5595144b2/beyondml.pdf -------------------------------------------------------------------------------- /docs/_images/BeyondML_horizontal-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_images/BeyondML_horizontal-color.png -------------------------------------------------------------------------------- /docs/_sources/beyondml.pt.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.pt package 2 | =================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.pt.layers 11 | beyondml.pt.utils 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml.pt 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /docs/_sources/beyondml.pt.utils.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.pt.utils package 2 | ========================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | beyondml.pt.utils.utils module 8 | ------------------------------ 9 | 10 | .. automodule:: beyondml.pt.utils.utils 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: beyondml.pt.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /docs/_sources/beyondml.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml package 2 | ================ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.pt 11 | beyondml.tflow 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /docs/_sources/beyondml.tflow.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.tflow package 2 | ====================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.tflow.layers 11 | beyondml.tflow.utils 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml.tflow 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /docs/_sources/beyondml.tflow.utils.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.tflow.utils package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | beyondml.tflow.utils.transformer module 8 | --------------------------------------- 9 | 10 | .. automodule:: beyondml.tflow.utils.transformer 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | beyondml.tflow.utils.utils module 16 | --------------------------------- 17 | 18 | .. automodule:: beyondml.tflow.utils.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: beyondml.tflow.utils 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. BeyondML documentation master file, created by 2 | sphinx-quickstart on Fri Jan 6 12:23:41 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. image:: images/BeyondML_horizontal-color.png 7 | :align: center 8 | :width: 400 9 | 10 | | 11 | 12 | Welcome to BeyondML's documentation! 13 | ==================================== 14 | 15 | BeyondML is a Python package which enables creating sparse multitask artificial neural networks (MANNs) 16 | compatible with `TensorFlow `_ and `PyTorch `_. 17 | This package contains custom layers and utilities to facilitate the training and optimization of models 18 | using the Reduction of Sub-Network Neuroplasticity (RSN2) training procedure developed by `AI Squared, Inc `_. 19 | 20 | :download:`View this Documentation in PDF Format <./_build/latex/beyondml.pdf>` 21 | 22 | Installation 23 | ************ 24 | 25 | This package is available through `Pypi `_ and can be installed by running the following command: 26 | 27 | .. code-block:: 28 | 29 | pip install beyondml 30 | 31 | Alternatively, the latest version of the software can be installed directly from GitHub using the following command: 32 | 33 | .. code-block:: 34 | 35 | pip install git+https://github.com/beyond-ml-labs/beyondml 36 | 37 | .. toctree:: 38 | :maxdepth: 2 39 | :caption: Documentation: 40 | 41 | modules 42 | 43 | Changelog 44 | ********* 45 | 46 | - Version 0.1.0 47 | - Refactored existing MANN repository to rename to BeyondML 48 | - Version 0.1.1 49 | - Added the `SparseDense`, `SparseConv`, `SparseMultiDense`, and `SparseMultiConv` layers to 50 | `beyondml.tflow.layers`, giving users the functionality to utilize sparse tensors during 51 | inference 52 | - Version 0.1.2 53 | - Added the `MaskedMultiHeadAttention`, `MaskedTransformerEncoderLayer`, and `MaskedTransformerDecoderLayer` layers to `beyondml.pt.layers` to add pruning to the transformer architecture 54 | - Added `MaskedConv3D`, `MultiMaskedConv3D`, `MultiConv3D`, `MultiMaxPool3D`, `SparseConv3D`, and `SparseMultiConv3D` layers to `beyondml.tflow.layers` 55 | - Added `MaskedConv3D`, `MultiMaskedConv3D`, `MultiConv3D`, `MultiMaxPool3D`, `SparseConv3D`, `SparseMultiConv3D`, and `MultiMaxPool2D` layers to `beyondml.pt.layers` 56 | - Version 0.1.3 57 | - Added `beyondml.pt` compatibility with more native PyTorch functionality for using models on different devices and datatypes 58 | - Added `train_model` function to `beyondml.tflow.utils` 59 | - Added `MultitaskNormalization` layer to `beyondml.tflow.layers` and `beyondml.pt.layers` 60 | - Version 0.1.4 61 | - Updated documentation to use Sphinx 62 | - Version 0.1.5 63 | - Updated requirements to use newer version of TensorFlow 64 | - Fixed errors with changes to types of `input_shape` in TensorFlow Keras layers 65 | - Fixed errors resulting from model/configuration changes with TensorFlow 66 | - Version 0.1.6 67 | - Fixed issues with converting between masked and unmasked models in TensorFlow 68 | - Version 0.1.7 69 | - Updated Pytorch implementation of Transformer-based architectures -------------------------------------------------------------------------------- /docs/_sources/modules.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | beyondml 8 | -------------------------------------------------------------------------------- /docs/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false, 12 | SHOW_SEARCH_SUMMARY: true, 13 | ENABLE_SEARCH_SHORTCUTS: true, 14 | }; -------------------------------------------------------------------------------- /docs/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/docs/_static/file.png -------------------------------------------------------------------------------- /docs/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /docs/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/_static/js/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | !function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t 2 | 3 | 4 | 5 | 6 | Search — BeyondML documentation 7 | 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 53 | 54 |
58 | 59 |
60 |
61 |
62 |
    63 |
  • 64 | 65 |
  • 66 |
  • 67 |
68 |
69 |
70 |
71 |
72 | 73 | 80 | 81 | 82 |
83 | 84 |
85 | 86 |
87 |
88 |
89 | 90 |
91 | 92 |
93 |

© Copyright 2023, BeyondML Labs.

94 |
95 | 96 | Built with Sphinx using a 97 | theme 98 | provided by Read the Docs. 99 | 100 | 101 |
102 |
103 |
104 |
105 |
106 | 111 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /logo/BeyondML_horizontal-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/logo/BeyondML_horizontal-color.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow<=2.13 2 | torch 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from beyondml import __version__ 3 | 4 | with open('requirements.txt', 'r') as f: 5 | requirements = [ 6 | line for line in f.read().splitlines() if line != '' 7 | ] 8 | 9 | setup( 10 | name='beyondml', 11 | version=__version__, 12 | url='https://github.com/Beyond-ML-Labs/BeyondML', 13 | packages=['beyondml', 'beyondml.tflow', 'beyondml.tflow.layers', 14 | 'beyondml.tflow.utils', 'beyondml.pt', 'beyondml.pt.layers', 15 | 'beyondml.pt.utils'], 16 | author='The AI Squared Team', 17 | author_email='mann@squared.ai', 18 | description='Package containing utilities for implementing RSN2/MANN', 19 | long_description=open('README.md').read(), 20 | long_description_content_type='text/markdown', 21 | license='Apache 2.0', 22 | install_requires=requirements 23 | ) 24 | -------------------------------------------------------------------------------- /sphinx/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.pt.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.pt.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.pt.layers.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.pt.layers.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.pt.utils.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.pt.utils.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.tflow.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.tflow.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.tflow.layers.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.tflow.layers.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/beyondml.tflow.utils.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/beyondml.tflow.utils.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /sphinx/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /sphinx/_build/doctrees/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/doctrees/modules.doctree -------------------------------------------------------------------------------- /sphinx/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: e28002f24b4d38f6cefaa374be88a1d2 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /sphinx/_build/html/_downloads/24a622e4623b41fdb0fe92a5595144b2/beyondml.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_downloads/24a622e4623b41fdb0fe92a5595144b2/beyondml.pdf -------------------------------------------------------------------------------- /sphinx/_build/html/_images/BeyondML_horizontal-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_images/BeyondML_horizontal-color.png -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/beyondml.pt.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.pt package 2 | =================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.pt.layers 11 | beyondml.pt.utils 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml.pt 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/beyondml.pt.utils.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.pt.utils package 2 | ========================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | beyondml.pt.utils.utils module 8 | ------------------------------ 9 | 10 | .. automodule:: beyondml.pt.utils.utils 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: beyondml.pt.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/beyondml.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml package 2 | ================ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.pt 11 | beyondml.tflow 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/beyondml.tflow.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.tflow package 2 | ====================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.tflow.layers 11 | beyondml.tflow.utils 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml.tflow 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/beyondml.tflow.utils.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml.tflow.utils package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | beyondml.tflow.utils.transformer module 8 | --------------------------------------- 9 | 10 | .. automodule:: beyondml.tflow.utils.transformer 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | beyondml.tflow.utils.utils module 16 | --------------------------------- 17 | 18 | .. automodule:: beyondml.tflow.utils.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: beyondml.tflow.utils 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. BeyondML documentation master file, created by 2 | sphinx-quickstart on Fri Jan 6 12:23:41 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. image:: images/BeyondML_horizontal-color.png 7 | :align: center 8 | :width: 400 9 | 10 | | 11 | 12 | Welcome to BeyondML's documentation! 13 | ==================================== 14 | 15 | BeyondML is a Python package which enables creating sparse multitask artificial neural networks (MANNs) 16 | compatible with `TensorFlow `_ and `PyTorch `_. 17 | This package contains custom layers and utilities to facilitate the training and optimization of models 18 | using the Reduction of Sub-Network Neuroplasticity (RSN2) training procedure developed by `AI Squared, Inc `_. 19 | 20 | :download:`View this Documentation in PDF Format <./_build/latex/beyondml.pdf>` 21 | 22 | Installation 23 | ************ 24 | 25 | This package is available through `Pypi `_ and can be installed by running the following command: 26 | 27 | .. code-block:: 28 | 29 | pip install beyondml 30 | 31 | Alternatively, the latest version of the software can be installed directly from GitHub using the following command: 32 | 33 | .. code-block:: 34 | 35 | pip install git+https://github.com/beyond-ml-labs/beyondml 36 | 37 | .. toctree:: 38 | :maxdepth: 2 39 | :caption: Documentation: 40 | 41 | modules 42 | 43 | Changelog 44 | ********* 45 | 46 | - Version 0.1.0 47 | - Refactored existing MANN repository to rename to BeyondML 48 | - Version 0.1.1 49 | - Added the `SparseDense`, `SparseConv`, `SparseMultiDense`, and `SparseMultiConv` layers to 50 | `beyondml.tflow.layers`, giving users the functionality to utilize sparse tensors during 51 | inference 52 | - Version 0.1.2 53 | - Added the `MaskedMultiHeadAttention`, `MaskedTransformerEncoderLayer`, and `MaskedTransformerDecoderLayer` layers to `beyondml.pt.layers` to add pruning to the transformer architecture 54 | - Added `MaskedConv3D`, `MultiMaskedConv3D`, `MultiConv3D`, `MultiMaxPool3D`, `SparseConv3D`, and `SparseMultiConv3D` layers to `beyondml.tflow.layers` 55 | - Added `MaskedConv3D`, `MultiMaskedConv3D`, `MultiConv3D`, `MultiMaxPool3D`, `SparseConv3D`, `SparseMultiConv3D`, and `MultiMaxPool2D` layers to `beyondml.pt.layers` 56 | - Version 0.1.3 57 | - Added `beyondml.pt` compatibility with more native PyTorch functionality for using models on different devices and datatypes 58 | - Added `train_model` function to `beyondml.tflow.utils` 59 | - Added `MultitaskNormalization` layer to `beyondml.tflow.layers` and `beyondml.pt.layers` 60 | - Version 0.1.4 61 | - Updated documentation to use Sphinx 62 | - Version 0.1.5 63 | - Updated requirements to use newer version of TensorFlow 64 | - Fixed errors with changes to types of `input_shape` in TensorFlow Keras layers 65 | - Fixed errors resulting from model/configuration changes with TensorFlow 66 | - Version 0.1.6 67 | - Fixed issues with converting between masked and unmasked models in TensorFlow 68 | - Version 0.1.7 69 | - Updated Pytorch implementation of Transformer-based architectures -------------------------------------------------------------------------------- /sphinx/_build/html/_sources/modules.rst.txt: -------------------------------------------------------------------------------- 1 | beyondml 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | beyondml 8 | -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /sphinx/_build/html/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /sphinx/_build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false, 12 | SHOW_SEARCH_SUMMARY: true, 13 | ENABLE_SEARCH_SHORTCUTS: true, 14 | }; -------------------------------------------------------------------------------- /sphinx/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/file.png -------------------------------------------------------------------------------- /sphinx/_build/html/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /sphinx/_build/html/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /sphinx/_build/html/_static/js/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /sphinx/_build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/minus.png -------------------------------------------------------------------------------- /sphinx/_build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/_static/plus.png -------------------------------------------------------------------------------- /sphinx/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/html/objects.inv -------------------------------------------------------------------------------- /sphinx/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Search — BeyondML documentation 7 | 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 53 | 54 |
58 | 59 |
60 |
61 |
62 |
    63 |
  • 64 | 65 |
  • 66 |
  • 67 |
68 |
69 |
70 |
71 |
72 | 73 | 80 | 81 | 82 |
83 | 84 |
85 | 86 |
87 |
88 |
89 | 90 |
91 | 92 |
93 |

© Copyright 2023, BeyondML Labs.

94 |
95 | 96 | Built with Sphinx using a 97 | theme 98 | provided by Read the Docs. 99 | 100 | 101 |
102 |
103 |
104 |
105 |
106 | 111 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /sphinx/_build/latex/BeyondML_horizontal-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/latex/BeyondML_horizontal-color.png -------------------------------------------------------------------------------- /sphinx/_build/latex/LICRcyr2utf8.xdy: -------------------------------------------------------------------------------- 1 | ;; -*- coding: utf-8; mode: Lisp; -*- 2 | ;; style file for xindy 3 | ;; filename: LICRcyr2utf8.xdy 4 | ;; description: style file for xindy which maps back LaTeX Internal 5 | ;; Character Representation of Cyrillic to utf-8 6 | ;; usage: for use with pdflatex produced .idx files. 7 | ;; Contributed by the Sphinx team, July 2018. 8 | (merge-rule "\IeC {\'\CYRG }" "Ѓ" :string) 9 | (merge-rule "\IeC {\'\CYRK }" "Ќ" :string) 10 | (merge-rule "\IeC {\'\cyrg }" "ѓ" :string) 11 | (merge-rule "\IeC {\'\cyrk }" "ќ" :string) 12 | (merge-rule "\IeC {\CYRA }" "А" :string) 13 | (merge-rule "\IeC {\CYRB }" "Б" :string) 14 | (merge-rule "\IeC {\CYRC }" "Ц" :string) 15 | (merge-rule "\IeC {\CYRCH }" "Ч" :string) 16 | (merge-rule "\IeC {\CYRD }" "Д" :string) 17 | (merge-rule "\IeC {\CYRDJE }" "Ђ" :string) 18 | (merge-rule "\IeC {\CYRDZE }" "Ѕ" :string) 19 | (merge-rule "\IeC {\CYRDZHE }" "Џ" :string) 20 | (merge-rule "\IeC {\CYRE }" "Е" :string) 21 | (merge-rule "\IeC {\CYREREV }" "Э" :string) 22 | (merge-rule "\IeC {\CYRERY }" "Ы" :string) 23 | (merge-rule "\IeC {\CYRF }" "Ф" :string) 24 | (merge-rule "\IeC {\CYRG }" "Г" :string) 25 | (merge-rule "\IeC {\CYRGUP }" "Ґ" :string) 26 | (merge-rule "\IeC {\CYRH }" "Х" :string) 27 | (merge-rule "\IeC {\CYRHRDSN }" "Ъ" :string) 28 | (merge-rule "\IeC {\CYRI }" "И" :string) 29 | (merge-rule "\IeC {\CYRIE }" "Є" :string) 30 | (merge-rule "\IeC {\CYRII }" "І" :string) 31 | (merge-rule "\IeC {\CYRISHRT }" "Й" :string) 32 | (merge-rule "\IeC {\CYRJE }" "Ј" :string) 33 | (merge-rule "\IeC {\CYRK }" "К" :string) 34 | (merge-rule "\IeC {\CYRL }" "Л" :string) 35 | (merge-rule "\IeC {\CYRLJE }" "Љ" :string) 36 | (merge-rule "\IeC {\CYRM }" "М" :string) 37 | (merge-rule "\IeC {\CYRN }" "Н" :string) 38 | (merge-rule "\IeC {\CYRNJE }" "Њ" :string) 39 | (merge-rule "\IeC {\CYRO }" "О" :string) 40 | (merge-rule "\IeC {\CYRP }" "П" :string) 41 | (merge-rule "\IeC {\CYRR }" "Р" :string) 42 | (merge-rule "\IeC {\CYRS }" "С" :string) 43 | (merge-rule "\IeC {\CYRSFTSN }" "Ь" :string) 44 | (merge-rule "\IeC {\CYRSH }" "Ш" :string) 45 | (merge-rule "\IeC {\CYRSHCH }" "Щ" :string) 46 | (merge-rule "\IeC {\CYRT }" "Т" :string) 47 | (merge-rule "\IeC {\CYRTSHE }" "Ћ" :string) 48 | (merge-rule "\IeC {\CYRU }" "У" :string) 49 | (merge-rule "\IeC {\CYRUSHRT }" "Ў" :string) 50 | (merge-rule "\IeC {\CYRV }" "В" :string) 51 | (merge-rule "\IeC {\CYRYA }" "Я" :string) 52 | (merge-rule "\IeC {\CYRYI }" "Ї" :string) 53 | (merge-rule "\IeC {\CYRYO }" "Ё" :string) 54 | (merge-rule "\IeC {\CYRYU }" "Ю" :string) 55 | (merge-rule "\IeC {\CYRZ }" "З" :string) 56 | (merge-rule "\IeC {\CYRZH }" "Ж" :string) 57 | (merge-rule "\IeC {\cyra }" "а" :string) 58 | (merge-rule "\IeC {\cyrb }" "б" :string) 59 | (merge-rule "\IeC {\cyrc }" "ц" :string) 60 | (merge-rule "\IeC {\cyrch }" "ч" :string) 61 | (merge-rule "\IeC {\cyrd }" "д" :string) 62 | (merge-rule "\IeC {\cyrdje }" "ђ" :string) 63 | (merge-rule "\IeC {\cyrdze }" "ѕ" :string) 64 | (merge-rule "\IeC {\cyrdzhe }" "џ" :string) 65 | (merge-rule "\IeC {\cyre }" "е" :string) 66 | (merge-rule "\IeC {\cyrerev }" "э" :string) 67 | (merge-rule "\IeC {\cyrery }" "ы" :string) 68 | (merge-rule "\IeC {\cyrf }" "ф" :string) 69 | (merge-rule "\IeC {\cyrg }" "г" :string) 70 | (merge-rule "\IeC {\cyrgup }" "ґ" :string) 71 | (merge-rule "\IeC {\cyrh }" "х" :string) 72 | (merge-rule "\IeC {\cyrhrdsn }" "ъ" :string) 73 | (merge-rule "\IeC {\cyri }" "и" :string) 74 | (merge-rule "\IeC {\cyrie }" "є" :string) 75 | (merge-rule "\IeC {\cyrii }" "і" :string) 76 | (merge-rule "\IeC {\cyrishrt }" "й" :string) 77 | (merge-rule "\IeC {\cyrje }" "ј" :string) 78 | (merge-rule "\IeC {\cyrk }" "к" :string) 79 | (merge-rule "\IeC {\cyrl }" "л" :string) 80 | (merge-rule "\IeC {\cyrlje }" "љ" :string) 81 | (merge-rule "\IeC {\cyrm }" "м" :string) 82 | (merge-rule "\IeC {\cyrn }" "н" :string) 83 | (merge-rule "\IeC {\cyrnje }" "њ" :string) 84 | (merge-rule "\IeC {\cyro }" "о" :string) 85 | (merge-rule "\IeC {\cyrp }" "п" :string) 86 | (merge-rule "\IeC {\cyrr }" "р" :string) 87 | (merge-rule "\IeC {\cyrs }" "с" :string) 88 | (merge-rule "\IeC {\cyrsftsn }" "ь" :string) 89 | (merge-rule "\IeC {\cyrsh }" "ш" :string) 90 | (merge-rule "\IeC {\cyrshch }" "щ" :string) 91 | (merge-rule "\IeC {\cyrt }" "т" :string) 92 | (merge-rule "\IeC {\cyrtshe }" "ћ" :string) 93 | (merge-rule "\IeC {\cyru }" "у" :string) 94 | (merge-rule "\IeC {\cyrushrt }" "ў" :string) 95 | (merge-rule "\IeC {\cyrv }" "в" :string) 96 | (merge-rule "\IeC {\cyrya }" "я" :string) 97 | (merge-rule "\IeC {\cyryi }" "ї" :string) 98 | (merge-rule "\IeC {\cyryo }" "ё" :string) 99 | (merge-rule "\IeC {\cyryu }" "ю" :string) 100 | (merge-rule "\IeC {\cyrz }" "з" :string) 101 | (merge-rule "\IeC {\cyrzh }" "ж" :string) 102 | -------------------------------------------------------------------------------- /sphinx/_build/latex/LatinRules.xdy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/latex/LatinRules.xdy -------------------------------------------------------------------------------- /sphinx/_build/latex/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx LaTeX output 2 | 3 | ALLDOCS = $(basename $(wildcard *.tex)) 4 | ALLPDF = $(addsuffix .pdf,$(ALLDOCS)) 5 | ALLDVI = $(addsuffix .dvi,$(ALLDOCS)) 6 | ALLXDV = 7 | ALLPS = $(addsuffix .ps,$(ALLDOCS)) 8 | 9 | # Prefix for archive names 10 | ARCHIVEPREFIX = 11 | # Additional LaTeX options (passed via variables in latexmkrc/latexmkjarc file) 12 | export LATEXOPTS ?= 13 | # Additional latexmk options 14 | LATEXMKOPTS ?= 15 | # format: pdf or dvi (used only by archive targets) 16 | FMT = pdf 17 | 18 | LATEX = latexmk -dvi 19 | PDFLATEX = latexmk -pdf -dvi- -ps- 20 | 21 | 22 | %.dvi: %.tex FORCE_MAKE 23 | $(LATEX) $(LATEXMKOPTS) '$<' 24 | 25 | %.ps: %.dvi 26 | dvips '$<' 27 | 28 | %.pdf: %.tex FORCE_MAKE 29 | $(PDFLATEX) $(LATEXMKOPTS) '$<' 30 | 31 | all: $(ALLPDF) 32 | 33 | all-dvi: $(ALLDVI) 34 | 35 | all-ps: $(ALLPS) 36 | 37 | all-pdf: $(ALLPDF) 38 | 39 | zip: all-$(FMT) 40 | mkdir $(ARCHIVEPREFIX)docs-$(FMT) 41 | cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) 42 | zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT) 43 | rm -r $(ARCHIVEPREFIX)docs-$(FMT) 44 | 45 | tar: all-$(FMT) 46 | mkdir $(ARCHIVEPREFIX)docs-$(FMT) 47 | cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) 48 | tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT) 49 | rm -r $(ARCHIVEPREFIX)docs-$(FMT) 50 | 51 | gz: tar 52 | gzip -9 < $(ARCHIVEPREFIX)docs-$(FMT).tar > $(ARCHIVEPREFIX)docs-$(FMT).tar.gz 53 | 54 | bz2: tar 55 | bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar 56 | 57 | xz: tar 58 | xz -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar 59 | 60 | clean: 61 | rm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz $(ALLPDF) $(ALLDVI) $(ALLXDV) *.fls *.fdb_latexmk 62 | 63 | .PHONY: all all-pdf all-dvi all-ps clean zip tar gz bz2 xz 64 | .PHONY: FORCE_MAKE -------------------------------------------------------------------------------- /sphinx/_build/latex/beyondml.ilg: -------------------------------------------------------------------------------- 1 | This is makeindex, version 2.17 [TeX Live 2023] (kpathsea + Thai support). 2 | Scanning style file ./python.ist.......done (7 attributes redefined, 0 ignored). 3 | Scanning input file beyondml.idx....done (328 entries accepted, 0 rejected). 4 | Sorting entries.....done (2844 comparisons). 5 | Generating output file beyondml.ind....done (622 lines written, 0 warnings). 6 | Output written in beyondml.ind. 7 | Transcript written in beyondml.ilg. 8 | -------------------------------------------------------------------------------- /sphinx/_build/latex/beyondml.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/_build/latex/beyondml.pdf -------------------------------------------------------------------------------- /sphinx/_build/latex/latexmkjarc: -------------------------------------------------------------------------------- 1 | $latex = 'pdflatex ' . $ENV{'LATEXOPTS'} . ' -kanji=utf8 %O %S'; 2 | $dvipdf = 'dvipdfmx %O -o %D %S'; 3 | $makeindex = 'internal mendex %S %B %D'; 4 | sub mendex { 5 | my ($source, $basename, $destination) = @_; 6 | my $dictfile = $basename . ".dic"; 7 | unlink($destination); 8 | system("mendex", "-U", "-f", "-d", $dictfile, "-s", "python.ist", $source); 9 | if ($? > 0) { 10 | print("mendex exited with error code $? (ignored)\n"); 11 | } 12 | if (!-e $destination) { 13 | # create an empty .ind file if nothing 14 | open(FH, ">" . $destination); 15 | close(FH); 16 | } 17 | return 0; 18 | } 19 | add_cus_dep( "glo", "gls", 0, "makeglo" ); 20 | sub makeglo { 21 | return system( "mendex -J -f -s gglo.ist -o '$_[0].gls' '$_[0].glo'" ); 22 | } -------------------------------------------------------------------------------- /sphinx/_build/latex/latexmkrc: -------------------------------------------------------------------------------- 1 | $latex = 'latex ' . $ENV{'LATEXOPTS'} . ' %O %S'; 2 | $pdflatex = 'pdflatex ' . $ENV{'LATEXOPTS'} . ' %O %S'; 3 | $lualatex = 'lualatex ' . $ENV{'LATEXOPTS'} . ' %O %S'; 4 | $xelatex = 'xelatex --no-pdf ' . $ENV{'LATEXOPTS'} . ' %O %S'; 5 | $makeindex = 'makeindex -s python.ist %O -o %D %S'; 6 | add_cus_dep( "glo", "gls", 0, "makeglo" ); 7 | sub makeglo { 8 | return system( "makeindex -s gglo.ist -o '$_[0].gls' '$_[0].glo'" ); 9 | } -------------------------------------------------------------------------------- /sphinx/_build/latex/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | pushd %~dp0 6 | 7 | set PDFLATEX=latexmk -pdf -dvi- -ps- 8 | 9 | set "LATEXOPTS= " 10 | 11 | if "%1" == "" goto all-pdf 12 | 13 | if "%1" == "all-pdf" ( 14 | :all-pdf 15 | for %%i in (*.tex) do ( 16 | %PDFLATEX% %LATEXMKOPTS% %%i 17 | ) 18 | goto end 19 | ) 20 | 21 | if "%1" == "all-pdf-ja" ( 22 | goto all-pdf 23 | ) 24 | 25 | if "%1" == "clean" ( 26 | del /q /s *.dvi *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz *.fls *.fdb_latexmk 27 | goto end 28 | ) 29 | 30 | :end 31 | popd -------------------------------------------------------------------------------- /sphinx/_build/latex/python.ist: -------------------------------------------------------------------------------- 1 | line_max 100 2 | headings_flag 1 3 | heading_prefix " \\bigletter " 4 | 5 | preamble "\\begin{sphinxtheindex} 6 | \\let\\bigletter\\sphinxstyleindexlettergroup 7 | \\let\\spxpagem \\sphinxstyleindexpagemain 8 | \\let\\spxentry \\sphinxstyleindexentry 9 | \\let\\spxextra \\sphinxstyleindexextra 10 | 11 | " 12 | 13 | postamble "\n\n\\end{sphinxtheindex}\n" 14 | 15 | symhead_positive "{\\sphinxsymbolsname}" 16 | numhead_positive "{\\sphinxnumbersname}" 17 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxhowto.cls: -------------------------------------------------------------------------------- 1 | % 2 | % sphinxhowto.cls for Sphinx (https://www.sphinx-doc.org/) 3 | % 4 | 5 | \NeedsTeXFormat{LaTeX2e}[1995/12/01] 6 | \ProvidesClass{sphinxhowto}[2019/12/01 v2.3.0 Document class (Sphinx howto)] 7 | 8 | % 'oneside' option overriding the 'twoside' default 9 | \newif\if@oneside 10 | \DeclareOption{oneside}{\@onesidetrue} 11 | % Pass remaining document options to the parent class. 12 | \DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} 13 | \ProcessOptions\relax 14 | 15 | % Default to two-side document 16 | \if@oneside 17 | % nothing to do (oneside is the default) 18 | \else 19 | \PassOptionsToClass{twoside}{\sphinxdocclass} 20 | \fi 21 | 22 | \LoadClass{\sphinxdocclass} 23 | 24 | % Set some sane defaults for section numbering depth and TOC depth. You can 25 | % reset these counters in your preamble. 26 | % 27 | \setcounter{secnumdepth}{2} 28 | \setcounter{tocdepth}{2}% i.e. section and subsection 29 | 30 | % Adapt \and command to the flushright context of \sphinxmaketitle, to 31 | % avoid ragged line endings if author names do not fit all on one single line 32 | \DeclareRobustCommand{\and}{% 33 | \end{tabular}\kern-\tabcolsep 34 | \allowbreak 35 | \hskip\dimexpr1em+\tabcolsep\@plus.17fil\begin{tabular}[t]{c}% 36 | }% 37 | % If it is desired that each author name be on its own line, use in preamble: 38 | %\DeclareRobustCommand{\and}{% 39 | % \end{tabular}\kern-\tabcolsep\\\begin{tabular}[t]{c}% 40 | %}% 41 | % Change the title page to look a bit better, and fit in with the fncychap 42 | % ``Bjarne'' style a bit better. 43 | % 44 | \newcommand{\sphinxmaketitle}{% 45 | \noindent\rule{\linewidth}{1pt}\par 46 | \begingroup % for PDF information dictionary 47 | \def\endgraf{ }\def\and{\& }% 48 | \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup 49 | \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}% 50 | \endgroup 51 | \begin{flushright} 52 | \sphinxlogo 53 | \py@HeaderFamily 54 | {\Huge \@title }\par 55 | {\itshape\large \py@release \releaseinfo}\par 56 | \vspace{25pt} 57 | {\Large 58 | \begin{tabular}[t]{c} 59 | \@author 60 | \end{tabular}\kern-\tabcolsep}\par 61 | \vspace{25pt} 62 | \@date \par 63 | \py@authoraddress \par 64 | \end{flushright} 65 | \@thanks 66 | \setcounter{footnote}{0} 67 | \let\thanks\relax\let\maketitle\relax 68 | %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} 69 | } 70 | 71 | \newcommand{\sphinxtableofcontents}{% 72 | \begingroup 73 | \parskip \z@skip 74 | \sphinxtableofcontentshook 75 | \tableofcontents 76 | \endgroup 77 | \noindent\rule{\linewidth}{1pt}\par 78 | \vspace{12pt}% 79 | } 80 | \newcommand\sphinxtableofcontentshook{} 81 | \pagenumbering{arabic} 82 | 83 | % Fix the bibliography environment to add an entry to the Table of 84 | % Contents. 85 | % For an article document class this environment is a section, 86 | % so no page break before it. 87 | % 88 | \newenvironment{sphinxthebibliography}[1]{% 89 | % \phantomsection % not needed here since TeXLive 2010's hyperref 90 | \begin{thebibliography}{#1}% 91 | \addcontentsline{toc}{section}{\ifdefined\refname\refname\else\ifdefined\bibname\bibname\fi\fi}}{\end{thebibliography}} 92 | 93 | 94 | % Same for the indices. 95 | % The memoir class already does this, so we don't duplicate it in that case. 96 | % 97 | \@ifclassloaded{memoir} 98 | {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}} 99 | {\newenvironment{sphinxtheindex}{% 100 | \phantomsection % needed because no chapter, section, ... is created by theindex 101 | \begin{theindex}% 102 | \addcontentsline{toc}{section}{\indexname}}{\end{theindex}}} 103 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxlatexcontainers.sty: -------------------------------------------------------------------------------- 1 | %% CONTAINER DIRECTIVES 2 | % 3 | % change this info string if making any custom modification 4 | \ProvidesFile{sphinxlatexcontainers.sty}[2021/05/03 containers] 5 | 6 | % The purpose of this file is to provide a dummy environment sphinxclass which 7 | % will be inserted for each class in each container directive. The class name 8 | % will be passed as the argument to the environment. 9 | % 10 | % For a class foo, the user can define customised handling of that class by 11 | % defining the sphinxclassfoo LaTeX environment. 12 | 13 | \newenvironment{sphinxuseclass}[1]{% 14 | \def\sphinxClassFunctionName{sphinxclass#1}% 15 | \ltx@ifundefined{\sphinxClassFunctionName}% 16 | {}% undefined so do nothing 17 | {\expandafter\begin\expandafter{\sphinxClassFunctionName}}% 18 | }{% 19 | \ltx@ifundefined{\sphinxClassFunctionName}% 20 | {}% we did nothing so we keep doing nothing 21 | {\expandafter\end\expandafter{\sphinxClassFunctionName}}% 22 | }% 23 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxlatexindbibtoc.sty: -------------------------------------------------------------------------------- 1 | %% INDEX, BIBLIOGRAPHY, APPENDIX, TABLE OF CONTENTS 2 | % 3 | % change this info string if making any custom modification 4 | \ProvidesFile{sphinxlatexindbibtoc.sty}[2021/01/27 index, bib., toc] 5 | 6 | % Provides support for this output mark-up from Sphinx latex writer: 7 | % 8 | % - environments: (backup defaults or get redefined) 9 | % 10 | % - sphinxtheindex (direct mark-up or via python.ist or sphinx.xdy) 11 | % - sphinxthebibliography 12 | % 13 | % - macros: (defines defaults) 14 | % 15 | % - \sphinxmaketitle 16 | % - \sphinxtableofcontents 17 | % - \sphinxnonalphabeticalgroupname 18 | % - \sphinxsymbolsname 19 | % - \sphinxnumbersname 20 | % - \sphinxcite 21 | % 22 | % Requires: 23 | \RequirePackage{makeidx} 24 | 25 | % fix the double index and bibliography on the table of contents 26 | % in jsclasses (Japanese standard document classes) 27 | \ifx\@jsc@uplatextrue\@undefined\else 28 | \renewenvironment{sphinxtheindex} 29 | {\cleardoublepage\phantomsection 30 | \begin{theindex}} 31 | {\end{theindex}} 32 | 33 | \renewenvironment{sphinxthebibliography}[1] 34 | {\cleardoublepage% \phantomsection % not needed here since TeXLive 2010's hyperref 35 | \begin{thebibliography}{#1}} 36 | {\end{thebibliography}} 37 | \fi 38 | 39 | % disable \@chappos in Appendix in pTeX 40 | \ifx\kanjiskip\@undefined\else 41 | \let\py@OldAppendix=\appendix 42 | \renewcommand{\appendix}{ 43 | \py@OldAppendix 44 | \gdef\@chappos{} 45 | } 46 | \fi 47 | 48 | % make commands known to non-Sphinx document classes 49 | \providecommand*{\sphinxmaketitle}{\maketitle} 50 | \providecommand*{\sphinxtableofcontents}{\tableofcontents} 51 | \ltx@ifundefined{sphinxthebibliography} 52 | {\newenvironment 53 | {sphinxthebibliography}{\begin{thebibliography}}{\end{thebibliography}}% 54 | } 55 | {}% else clause of \ltx@ifundefined 56 | \ltx@ifundefined{sphinxtheindex} 57 | {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}% 58 | {}% else clause of \ltx@ifundefined 59 | 60 | % for usage with xindy: this string gets internationalized in preamble 61 | \newcommand*{\sphinxnonalphabeticalgroupname}{} 62 | % redefined in preamble, headings for makeindex produced index 63 | \newcommand*{\sphinxsymbolsname}{} 64 | \newcommand*{\sphinxnumbersname}{} 65 | 66 | \protected\def\sphinxcite{\cite} 67 | 68 | 69 | \endinput 70 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxlatexstyleheadings.sty: -------------------------------------------------------------------------------- 1 | %% TITLES 2 | % 3 | % change this info string if making any custom modification 4 | \ProvidesFile{sphinxlatexstyleheadings.sty}[2023/02/11 headings] 5 | 6 | \RequirePackage[nobottomtitles*]{titlesec} 7 | \@ifpackagelater{titlesec}{2016/03/15}% 8 | {\@ifpackagelater{titlesec}{2016/03/21}% 9 | {}% 10 | {\newif\ifsphinx@ttlpatch@ok 11 | \IfFileExists{etoolbox.sty}{% 12 | \RequirePackage{etoolbox}% 13 | \patchcmd{\ttlh@hang}{\parindent\z@}{\parindent\z@\leavevmode}% 14 | {\sphinx@ttlpatch@oktrue}{}% 15 | \ifsphinx@ttlpatch@ok 16 | \patchcmd{\ttlh@hang}{\noindent}{}{}{\sphinx@ttlpatch@okfalse}% 17 | \fi 18 | }{}% 19 | \ifsphinx@ttlpatch@ok 20 | \typeout{^^J Package Sphinx Info: ^^J 21 | **** titlesec 2.10.1 successfully patched for bugfix ****^^J}% 22 | \else 23 | \AtEndDocument{\PackageWarningNoLine{sphinx}{^^J% 24 | ******** titlesec 2.10.1 has a bug, (section numbers disappear) ......|^^J% 25 | ******** and Sphinx could not patch it, perhaps because your local ...|^^J% 26 | ******** copy is already fixed without a changed release date. .......|^^J% 27 | ******** If not, you must update titlesec! ...........................|}}% 28 | \sphinxbuildwarning{badtitlesec}% 29 | \fi 30 | }% 31 | }{} 32 | 33 | % Augment the sectioning commands used to get our own font family in place, 34 | % and reset some internal data items (\titleformat from titlesec package) 35 | \titleformat{\section}{\Large\py@HeaderFamily}% 36 | {\py@TitleColor\thesection}{0.5em}{\py@TitleColor} 37 | \titleformat{\subsection}{\large\py@HeaderFamily}% 38 | {\py@TitleColor\thesubsection}{0.5em}{\py@TitleColor} 39 | % \normalsize added as work-around to a lualatex-ja upstream problem 40 | % https://osdn.net/projects/luatex-ja/ticket/47321 41 | \titleformat{\subsubsection}{\normalsize\py@HeaderFamily}% 42 | {\py@TitleColor\thesubsubsection}{0.5em}{\py@TitleColor} 43 | % By default paragraphs (and subsubsections) will not be numbered because 44 | % sphinxmanual.cls and sphinxhowto.cls set secnumdepth to 2 45 | \titleformat{\paragraph}{\normalsize\py@HeaderFamily}% 46 | {\py@TitleColor\theparagraph}{0.5em}{\py@TitleColor} 47 | \titleformat{\subparagraph}{\normalsize\py@HeaderFamily}% 48 | {\py@TitleColor\thesubparagraph}{0.5em}{\py@TitleColor} 49 | 50 | 51 | % Since Sphinx 1.5, users should use HeaderFamily key to 'sphinxsetup' rather 52 | % than defining their own \py@HeaderFamily command (which is still possible). 53 | % Memo: \py@HeaderFamily is also used by \maketitle as defined in 54 | % sphinxmanual.cls/sphinxhowto.cls 55 | \newcommand{\py@HeaderFamily}{\spx@opt@HeaderFamily} 56 | 57 | % This sets up the fancy chapter headings that make the documents look 58 | % at least a little better than the usual LaTeX output. 59 | \@ifpackagewith{fncychap}{Bjarne}{ 60 | \ChNameVar {\raggedleft\normalsize \py@HeaderFamily} 61 | \ChNumVar {\raggedleft\Large \py@HeaderFamily} 62 | \ChTitleVar{\raggedleft\Large \py@HeaderFamily} 63 | % This creates (numbered) chapter heads without the leading \vspace*{}: 64 | \def\@makechapterhead#1{% 65 | {\parindent \z@ \raggedright \normalfont 66 | \ifnum \c@secnumdepth >\m@ne 67 | \if@mainmatter 68 | \DOCH 69 | \fi 70 | \fi 71 | \interlinepenalty\@M 72 | \if@mainmatter 73 | \DOTI{#1}% 74 | \else% 75 | \DOTIS{#1}% 76 | \fi 77 | }} 78 | }{}% <-- "false" clause of \@ifpackagewith 79 | 80 | % fix fncychap's bug which uses prematurely the \textwidth value 81 | \@ifpackagewith{fncychap}{Bjornstrup} 82 | {\AtBeginDocument{\mylen\textwidth\advance\mylen-2\myhi}}% 83 | {}% <-- "false" clause of \@ifpackagewith 84 | 85 | 86 | \endinput 87 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxlatexstylepage.sty: -------------------------------------------------------------------------------- 1 | %% PAGE STYLING 2 | % 3 | % change this info string if making any custom modification 4 | \ProvidesFile{sphinxlatexstylepage.sty}[2021/01/27 page styling] 5 | 6 | % Separate paragraphs by space by default. 7 | \IfFileExists{parskip-2001-04-09.sty}% since September 2018 TeXLive update 8 | % new parskip.sty, but let it rollback to old one. 9 | % hopefully TeX installation not broken and LaTeX kernel not too old 10 | {\RequirePackage{parskip}[=v1]} 11 | % standard one from 1989. Admittedly \section of article/book gives possibly 12 | % anomalous spacing, but we can't require September 2018 release for some time. 13 | {\RequirePackage{parskip}} 14 | 15 | % Style parameters and macros used by most documents here 16 | \raggedbottom 17 | \sloppy 18 | \hbadness = 5000 % don't print trivial gripes 19 | 20 | % Require package fancyhdr except under memoir class 21 | \@ifclassloaded{memoir}{}{\RequirePackage{fancyhdr}} 22 | % Use \pagestyle{normal} as the primary pagestyle for text. 23 | % Redefine the 'normal' header/footer style when using "fancyhdr" package: 24 | \@ifpackageloaded{fancyhdr}{% 25 | \ltx@ifundefined{c@chapter} 26 | {% no \chapter, "howto" (non-Japanese) docclass 27 | \fancypagestyle{plain}{ 28 | \fancyhf{} 29 | \fancyfoot[C]{{\py@HeaderFamily\thepage}} 30 | \renewcommand{\headrulewidth}{0pt} 31 | \renewcommand{\footrulewidth}{0pt} 32 | } 33 | % Same as 'plain', this way we can use it in template 34 | % FIXME: shouldn't this have a running header with Name and Release like 'manual'? 35 | \fancypagestyle{normal}{ 36 | \fancyhf{} 37 | \fancyfoot[C]{{\py@HeaderFamily\thepage}} 38 | \renewcommand{\headrulewidth}{0pt} 39 | \renewcommand{\footrulewidth}{0pt} 40 | } 41 | }% 42 | {% classes with \chapter command 43 | \fancypagestyle{normal}{ 44 | \fancyhf{} 45 | \fancyfoot[RO]{{\py@HeaderFamily\thepage}} 46 | \fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}} 47 | \fancyhead[RO]{{\py@HeaderFamily \@title\sphinxheadercomma\py@release}} 48 | \if@twoside 49 | \fancyfoot[LE]{{\py@HeaderFamily\thepage}} 50 | \fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}} 51 | \fancyhead[LE]{{\py@HeaderFamily \@title\sphinxheadercomma\py@release}} 52 | \fi 53 | \renewcommand{\headrulewidth}{0.4pt} 54 | \renewcommand{\footrulewidth}{0.4pt} 55 | % define chaptermark with \@chappos when \@chappos is available for Japanese 56 | \ltx@ifundefined{@chappos}{} 57 | {\def\chaptermark##1{\markboth{\@chapapp\space\thechapter\space\@chappos\space ##1}{}}} 58 | } 59 | % Update the plain style so we get the page number & footer line, 60 | % but not a chapter or section title. This is to keep the first 61 | % page of a chapter `clean.' 62 | \fancypagestyle{plain}{ 63 | \fancyhf{} 64 | \fancyfoot[RO]{{\py@HeaderFamily\thepage}} 65 | \if@twoside\fancyfoot[LE]{{\py@HeaderFamily\thepage}}\fi 66 | \renewcommand{\headrulewidth}{0pt} 67 | \renewcommand{\footrulewidth}{0.4pt} 68 | } 69 | } 70 | } 71 | {% no fancyhdr: memoir class 72 | % Provide default for 'normal' style simply as an alias of 'plain' style 73 | % This way we can use \pagestyle{normal} in LaTeX template 74 | \def\ps@normal{\ps@plain} 75 | % Users of memoir class are invited to redefine 'normal' style in preamble 76 | } 77 | 78 | 79 | \endinput 80 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxmanual.cls: -------------------------------------------------------------------------------- 1 | % 2 | % sphinxmanual.cls for Sphinx (https://www.sphinx-doc.org/) 3 | % 4 | 5 | \NeedsTeXFormat{LaTeX2e}[1995/12/01] 6 | \ProvidesClass{sphinxmanual}[2019/12/01 v2.3.0 Document class (Sphinx manual)] 7 | 8 | % chapters starting at odd pages (overridden by 'openany' document option) 9 | \PassOptionsToClass{openright}{\sphinxdocclass} 10 | 11 | % 'oneside' option overriding the 'twoside' default 12 | \newif\if@oneside 13 | \DeclareOption{oneside}{\@onesidetrue} 14 | % Pass remaining document options to the parent class. 15 | \DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} 16 | \ProcessOptions\relax 17 | 18 | % Defaults two-side document 19 | \if@oneside 20 | % nothing to do (oneside is the default) 21 | \else 22 | \PassOptionsToClass{twoside}{\sphinxdocclass} 23 | \fi 24 | 25 | \LoadClass{\sphinxdocclass} 26 | 27 | % Set some sane defaults for section numbering depth and TOC depth. You can 28 | % reset these counters in your preamble. 29 | % 30 | \setcounter{secnumdepth}{2} 31 | \setcounter{tocdepth}{1} 32 | 33 | % Adapt \and command to the flushright context of \sphinxmaketitle, to 34 | % avoid ragged line endings if author names do not fit all on one single line 35 | \DeclareRobustCommand{\and}{% 36 | \end{tabular}\kern-\tabcolsep 37 | \allowbreak 38 | \hskip\dimexpr1em+\tabcolsep\@plus.17fil\begin{tabular}[t]{c}% 39 | }% 40 | % If it is desired that each author name be on its own line, use in preamble: 41 | %\DeclareRobustCommand{\and}{% 42 | % \end{tabular}\kern-\tabcolsep\\\begin{tabular}[t]{c}% 43 | %}% 44 | % Change the title page to look a bit better, and fit in with the fncychap 45 | % ``Bjarne'' style a bit better. 46 | % 47 | \newcommand{\sphinxmaketitle}{% 48 | \let\sphinxrestorepageanchorsetting\relax 49 | \ifHy@pageanchor\def\sphinxrestorepageanchorsetting{\Hy@pageanchortrue}\fi 50 | \hypersetup{pageanchor=false}% avoid duplicate destination warnings 51 | \begin{titlepage}% 52 | \let\footnotesize\small 53 | \let\footnoterule\relax 54 | \noindent\rule{\textwidth}{1pt}\par 55 | \begingroup % for PDF information dictionary 56 | \def\endgraf{ }\def\and{\& }% 57 | \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup 58 | \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}% 59 | \endgroup 60 | \begin{flushright}% 61 | \sphinxlogo 62 | \py@HeaderFamily 63 | {\Huge \@title \par} 64 | {\itshape\LARGE \py@release\releaseinfo \par} 65 | \vfill 66 | {\LARGE 67 | \begin{tabular}[t]{c} 68 | \@author 69 | \end{tabular}\kern-\tabcolsep 70 | \par} 71 | \vfill\vfill 72 | {\large 73 | \@date \par 74 | \vfill 75 | \py@authoraddress \par 76 | }% 77 | \end{flushright}%\par 78 | \@thanks 79 | \end{titlepage}% 80 | \setcounter{footnote}{0}% 81 | \let\thanks\relax\let\maketitle\relax 82 | %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} 83 | \clearpage 84 | \ifdefined\sphinxbackoftitlepage\sphinxbackoftitlepage\fi 85 | \if@openright\cleardoublepage\else\clearpage\fi 86 | \sphinxrestorepageanchorsetting 87 | } 88 | 89 | \newcommand{\sphinxtableofcontents}{% 90 | \pagenumbering{roman}% 91 | \begingroup 92 | \parskip \z@skip 93 | \sphinxtableofcontentshook 94 | \tableofcontents 95 | \endgroup 96 | % before resetting page counter, let's do the right thing. 97 | \if@openright\cleardoublepage\else\clearpage\fi 98 | \pagenumbering{arabic}% 99 | } 100 | 101 | % This is needed to get the width of the section # area wide enough in the 102 | % library reference. Doing it here keeps it the same for all the manuals. 103 | % 104 | \newcommand{\sphinxtableofcontentshook}{% 105 | \renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}}% 106 | \renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}}% 107 | } 108 | 109 | % Fix the bibliography environment to add an entry to the Table of 110 | % Contents. 111 | % For a report document class this environment is a chapter. 112 | % 113 | \newenvironment{sphinxthebibliography}[1]{% 114 | \if@openright\cleardoublepage\else\clearpage\fi 115 | % \phantomsection % not needed here since TeXLive 2010's hyperref 116 | \begin{thebibliography}{#1}% 117 | \addcontentsline{toc}{chapter}{\bibname}}{\end{thebibliography}} 118 | 119 | % Same for the indices. 120 | % The memoir class already does this, so we don't duplicate it in that case. 121 | % 122 | \@ifclassloaded{memoir} 123 | {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}} 124 | {\newenvironment{sphinxtheindex}{% 125 | \if@openright\cleardoublepage\else\clearpage\fi 126 | \phantomsection % needed as no chapter, section, ... created 127 | \begin{theindex}% 128 | \addcontentsline{toc}{chapter}{\indexname}}{\end{theindex}}} 129 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxmessages.sty: -------------------------------------------------------------------------------- 1 | % 2 | % sphinxmessages.sty 3 | % 4 | % message resources for Sphinx 5 | % 6 | \ProvidesPackage{sphinxmessages}[2019/01/04 v2.0 Localized LaTeX macros (Sphinx team)] 7 | 8 | \renewcommand{\literalblockcontinuedname}{continued from previous page} 9 | \renewcommand{\literalblockcontinuesname}{continues on next page} 10 | \renewcommand{\sphinxnonalphabeticalgroupname}{Non\sphinxhyphen{}alphabetical} 11 | \renewcommand{\sphinxsymbolsname}{Symbols} 12 | \renewcommand{\sphinxnumbersname}{Numbers} 13 | \def\pageautorefname{page} 14 | 15 | \addto\captionsenglish{\renewcommand{\figurename}{Fig.\@{} }} 16 | \def\fnum@figure{\figurename\thefigure{}} 17 | 18 | \addto\captionsenglish{\renewcommand{\tablename}{Table }} 19 | \def\fnum@table{\tablename\thetable{}} 20 | 21 | \addto\captionsenglish{\renewcommand{\literalblockname}{Listing}} -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxoptionsgeometry.sty: -------------------------------------------------------------------------------- 1 | %% OPTIONS FOR GEOMETRY 2 | % 3 | % change this info string if making any custom modification 4 | \ProvidesFile{sphinxoptionsgeometry.sty}[2021/01/27 geometry] 5 | 6 | % geometry 7 | \ifx\kanjiskip\@undefined 8 | \PassOptionsToPackage{% 9 | hmargin={\unexpanded{\spx@opt@hmargin}},% 10 | vmargin={\unexpanded{\spx@opt@vmargin}},% 11 | marginpar=\unexpanded{\spx@opt@marginpar}} 12 | {geometry} 13 | \else 14 | % set text width for Japanese documents to be integer multiple of 1zw 15 | % and text height to be integer multiple of \baselineskip 16 | % the execution is delayed to \sphinxsetup then geometry.sty 17 | \normalsize\normalfont 18 | \newcommand*\sphinxtextwidthja[1]{% 19 | \if@twocolumn\tw@\fi 20 | \dimexpr 21 | \numexpr\dimexpr\paperwidth-\tw@\dimexpr#1\relax\relax/ 22 | \dimexpr\if@twocolumn\tw@\else\@ne\fi zw\relax 23 | zw\relax}% 24 | \newcommand*\sphinxmarginparwidthja[1]{% 25 | \dimexpr\numexpr\dimexpr#1\relax/\dimexpr1zw\relax zw\relax}% 26 | \newcommand*\sphinxtextlinesja[1]{% 27 | \numexpr\@ne+\dimexpr\paperheight-\topskip-\tw@\dimexpr#1\relax\relax/ 28 | \baselineskip\relax}% 29 | \ifx\@jsc@uplatextrue\@undefined\else 30 | % the way we found in order for the papersize special written by 31 | % geometry in the dvi file to be correct in case of jsbook class 32 | \ifnum\mag=\@m\else % do nothing special if nomag class option or 10pt 33 | \PassOptionsToPackage{truedimen}{geometry}% 34 | \fi 35 | \fi 36 | \PassOptionsToPackage{% 37 | hmarginratio={1:1},% 38 | textwidth=\unexpanded{\sphinxtextwidthja{\spx@opt@hmargin}},% 39 | vmarginratio={1:1},% 40 | lines=\unexpanded{\sphinxtextlinesja{\spx@opt@vmargin}},% 41 | marginpar=\unexpanded{\sphinxmarginparwidthja{\spx@opt@marginpar}},% 42 | footskip=2\baselineskip,% 43 | }{geometry}% 44 | \AtBeginDocument 45 | {% update a dimension used by the jsclasses 46 | \ifx\@jsc@uplatextrue\@undefined\else\fullwidth\textwidth\fi 47 | % for some reason, jreport normalizes all dimensions with \@settopoint 48 | \@ifclassloaded{jreport} 49 | {\@settopoint\textwidth\@settopoint\textheight\@settopoint\marginparwidth} 50 | {}% <-- "false" clause of \@ifclassloaded 51 | }% 52 | \fi 53 | 54 | \endinput 55 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxoptionshyperref.sty: -------------------------------------------------------------------------------- 1 | %% Bookmarks and hyperlinks 2 | % 3 | % change this info string if making any custom modification 4 | \ProvidesFile{sphinxoptionshyperref.sty}[2021/01/27 hyperref] 5 | 6 | % to make pdf with correct encoded bookmarks in Japanese 7 | % this should precede the hyperref package 8 | \ifx\kanjiskip\@undefined 9 | % for non-Japanese: make sure bookmarks are ok also with lualatex 10 | \PassOptionsToPackage{pdfencoding=unicode}{hyperref} 11 | \else 12 | \RequirePackage{atbegshi} 13 | \ifx\ucs\@undefined 14 | \ifnum 42146=\euc"A4A2 15 | \AtBeginShipoutFirst{\special{pdf:tounicode EUC-UCS2}} 16 | \else 17 | \AtBeginShipoutFirst{\special{pdf:tounicode 90ms-RKSJ-UCS2}} 18 | \fi 19 | \else 20 | \AtBeginShipoutFirst{\special{pdf:tounicode UTF8-UCS2}} 21 | \fi 22 | \fi 23 | 24 | \ifx\@jsc@uplatextrue\@undefined\else 25 | \PassOptionsToPackage{setpagesize=false}{hyperref} 26 | \fi 27 | 28 | % These options can be overridden inside 'hyperref' key 29 | % or by later use of \hypersetup. 30 | \PassOptionsToPackage{colorlinks,breaklinks,% 31 | linkcolor=InnerLinkColor,filecolor=OuterLinkColor,% 32 | menucolor=OuterLinkColor,urlcolor=OuterLinkColor,% 33 | citecolor=InnerLinkColor}{hyperref} 34 | 35 | \endinput 36 | -------------------------------------------------------------------------------- /sphinx/_build/latex/sphinxpackagecyrillic.sty: -------------------------------------------------------------------------------- 1 | %% CYRILLIC IN NON-CYRILLIC DOCUMENTS (pdflatex only) 2 | % 3 | % refs: https://tex.stackexchange.com/q/460271/ 4 | \ProvidesPackage{sphinxpackagecyrillic}% 5 | [2018/11/21 v2.0 support for Cyrillic in non-Cyrillic documents] 6 | \RequirePackage{kvoptions} 7 | \SetupKeyvalOptions{prefix=spx@cyropt@} % use \spx@cyropt@ prefix 8 | \DeclareBoolOption[false]{Xtwo} 9 | \DeclareBoolOption[false]{TtwoA} 10 | \DeclareDefaultOption{\@unknownoptionerror} 11 | \ProcessLocalKeyvalOptions* % ignore class options 12 | 13 | \ifspx@cyropt@Xtwo 14 | % original code by tex.sx user egreg (updated 2019/10/28): 15 | % https://tex.stackexchange.com/a/460325/ 16 | % 159 Cyrillic glyphs as available in X2 TeX 8bit font encoding 17 | % This assumes inputenc loaded with utf8 option, or LaTeX release 18 | % as recent as 2018/04/01 which does it automatically. 19 | \@tfor\next:=% 20 | {Ё}{Ђ}{Є}{Ѕ}{І}{Ј}{Љ}{Њ}{Ћ}{Ў}{Џ}{А}{Б}{В}{Г}{Д}{Е}{Ж}{З}{И}{Й}% 21 | {К}{Л}{М}{Н}{О}{П}{Р}{С}{Т}{У}{Ф}{Х}{Ц}{Ч}{Ш}{Щ}{Ъ}{Ы}{Ь}{Э}{Ю}% 22 | {Я}{а}{б}{в}{г}{д}{е}{ж}{з}{и}{й}{к}{л}{м}{н}{о}{п}{р}{с}{т}{у}% 23 | {ф}{х}{ц}{ч}{ш}{щ}{ъ}{ы}{ь}{э}{ю}{я}{ё}{ђ}{є}{ѕ}{і}{ј}{љ}{њ}{ћ}% 24 | {ў}{џ}{Ѣ}{ѣ}{Ѫ}{ѫ}{Ѵ}{ѵ}{Ґ}{ґ}{Ғ}{ғ}{Ҕ}{ҕ}{Җ}{җ}{Ҙ}{ҙ}{Қ}{қ}{Ҝ}{ҝ}% 25 | {Ҟ}{ҟ}{Ҡ}{ҡ}{Ң}{ң}{Ҥ}{ҥ}{Ҧ}{ҧ}{Ҩ}{ҩ}{Ҫ}{ҫ}{Ҭ}{ҭ}{Ү}{ү}{Ұ}{ұ}{Ҳ}{ҳ}% 26 | {Ҵ}{ҵ}{Ҷ}{ҷ}{Ҹ}{ҹ}{Һ}{һ}{Ҽ}{ҽ}{Ҿ}{ҿ}{Ӏ}{Ӄ}{ӄ}{Ӆ}{ӆ}{Ӈ}{ӈ}{Ӌ}{ӌ}% 27 | {Ӎ}{ӎ}{Ӕ}{ӕ}{Ә}{ә}{Ӡ}{ӡ}{Ө}{ө}\do 28 | {% 29 | \begingroup\def\IeC{\protect\DeclareTextSymbolDefault}% 30 | \protected@edef\@temp{\endgroup 31 | \@ifl@t@r{\fmtversion}{2019/10/01}{\csname u8:\next\endcsname}{\next}}% 32 | \@temp{X2}% 33 | }% 34 | \else 35 | \ifspx@cyropt@TtwoA 36 | % original code by tex.sx user jfbu: 37 | % https://tex.stackexchange.com/a/460305/ 38 | % 63*2+1=127 Cyrillic glyphs as found in T2A 8bit TeX font-encoding 39 | \@tfor\@tempa:=% 40 | {ae}{a}{b}{chrdsc}{chvcrs}{ch}{c}{dje}{dze}{dzhe}{d}{erev}{ery}{e}% 41 | {f}{ghcrs}{gup}{g}{hdsc}{hrdsn}{h}{ie}{ii}{ishrt}{i}{je}% 42 | {kbeak}{kdsc}{kvcrs}{k}{lje}{l}{m}{ndsc}{ng}{nje}{n}{otld}{o}{p}{r}% 43 | {schwa}{sdsc}{sftsn}{shch}{shha}{sh}{s}{tshe}{t}{ushrt}{u}{v}% 44 | {ya}{yhcrs}{yi}{yo}{yu}{y}{zdsc}{zhdsc}{zh}{z}\do 45 | {% 46 | \expandafter\DeclareTextSymbolDefault\expandafter 47 | {\csname cyr\@tempa\endcsname}{T2A}% 48 | \expandafter\uppercase\expandafter{\expandafter 49 | \def\expandafter\@tempa\expandafter{\@tempa}}% 50 | \expandafter\DeclareTextSymbolDefault\expandafter 51 | {\csname CYR\@tempa\endcsname}{T2A}% 52 | }% 53 | \DeclareTextSymbolDefault{\CYRpalochka}{T2A}% 54 | \fi\fi 55 | \endinput 56 | -------------------------------------------------------------------------------- /sphinx/beyondml.pt.rst: -------------------------------------------------------------------------------- 1 | beyondml.pt package 2 | =================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.pt.layers 11 | beyondml.pt.utils 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml.pt 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /sphinx/beyondml.pt.utils.rst: -------------------------------------------------------------------------------- 1 | beyondml.pt.utils package 2 | ========================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | beyondml.pt.utils.utils module 8 | ------------------------------ 9 | 10 | .. automodule:: beyondml.pt.utils.utils 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: beyondml.pt.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /sphinx/beyondml.rst: -------------------------------------------------------------------------------- 1 | beyondml package 2 | ================ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.pt 11 | beyondml.tflow 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /sphinx/beyondml.tflow.rst: -------------------------------------------------------------------------------- 1 | beyondml.tflow package 2 | ====================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | beyondml.tflow.layers 11 | beyondml.tflow.utils 12 | 13 | Module contents 14 | --------------- 15 | 16 | .. automodule:: beyondml.tflow 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /sphinx/beyondml.tflow.utils.rst: -------------------------------------------------------------------------------- 1 | beyondml.tflow.utils package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | beyondml.tflow.utils.transformer module 8 | --------------------------------------- 9 | 10 | .. automodule:: beyondml.tflow.utils.transformer 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | beyondml.tflow.utils.utils module 16 | --------------------------------- 17 | 18 | .. automodule:: beyondml.tflow.utils.utils 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: beyondml.tflow.utils 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /sphinx/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | project = 'BeyondML' 10 | copyright = '2023, BeyondML Labs' 11 | author = 'BeyondML Labs' 12 | 13 | # -- General configuration --------------------------------------------------- 14 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 15 | 16 | extensions = [ 17 | 'sphinx.ext.autodoc', 18 | 'sphinx.ext.viewcode', 19 | 'sphinx.ext.napoleon' 20 | ] 21 | 22 | templates_path = ['_templates'] 23 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 24 | 25 | 26 | # -- Options for HTML output ------------------------------------------------- 27 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 28 | 29 | html_theme = 'sphinx_rtd_theme' 30 | html_static_path = ['_static'] 31 | -------------------------------------------------------------------------------- /sphinx/images/BeyondML_horizontal-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Beyond-ML-Labs/BeyondML/ae7f7ebe44ea736d6e6daaf42936738f4d313bdc/sphinx/images/BeyondML_horizontal-color.png -------------------------------------------------------------------------------- /sphinx/index.rst: -------------------------------------------------------------------------------- 1 | .. BeyondML documentation master file, created by 2 | sphinx-quickstart on Fri Jan 6 12:23:41 2023. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. image:: images/BeyondML_horizontal-color.png 7 | :align: center 8 | :width: 400 9 | 10 | | 11 | 12 | Welcome to BeyondML's documentation! 13 | ==================================== 14 | 15 | BeyondML is a Python package which enables creating sparse multitask artificial neural networks (MANNs) 16 | compatible with `TensorFlow `_ and `PyTorch `_. 17 | This package contains custom layers and utilities to facilitate the training and optimization of models 18 | using the Reduction of Sub-Network Neuroplasticity (RSN2) training procedure developed by `AI Squared, Inc `_. 19 | 20 | :download:`View this Documentation in PDF Format <./_build/latex/beyondml.pdf>` 21 | 22 | Installation 23 | ************ 24 | 25 | This package is available through `Pypi `_ and can be installed by running the following command: 26 | 27 | .. code-block:: 28 | 29 | pip install beyondml 30 | 31 | Alternatively, the latest version of the software can be installed directly from GitHub using the following command: 32 | 33 | .. code-block:: 34 | 35 | pip install git+https://github.com/beyond-ml-labs/beyondml 36 | 37 | .. toctree:: 38 | :maxdepth: 2 39 | :caption: Documentation: 40 | 41 | modules 42 | 43 | Changelog 44 | ********* 45 | 46 | - Version 0.1.0 47 | - Refactored existing MANN repository to rename to BeyondML 48 | - Version 0.1.1 49 | - Added the `SparseDense`, `SparseConv`, `SparseMultiDense`, and `SparseMultiConv` layers to 50 | `beyondml.tflow.layers`, giving users the functionality to utilize sparse tensors during 51 | inference 52 | - Version 0.1.2 53 | - Added the `MaskedMultiHeadAttention`, `MaskedTransformerEncoderLayer`, and `MaskedTransformerDecoderLayer` layers to `beyondml.pt.layers` to add pruning to the transformer architecture 54 | - Added `MaskedConv3D`, `MultiMaskedConv3D`, `MultiConv3D`, `MultiMaxPool3D`, `SparseConv3D`, and `SparseMultiConv3D` layers to `beyondml.tflow.layers` 55 | - Added `MaskedConv3D`, `MultiMaskedConv3D`, `MultiConv3D`, `MultiMaxPool3D`, `SparseConv3D`, `SparseMultiConv3D`, and `MultiMaxPool2D` layers to `beyondml.pt.layers` 56 | - Version 0.1.3 57 | - Added `beyondml.pt` compatibility with more native PyTorch functionality for using models on different devices and datatypes 58 | - Added `train_model` function to `beyondml.tflow.utils` 59 | - Added `MultitaskNormalization` layer to `beyondml.tflow.layers` and `beyondml.pt.layers` 60 | - Version 0.1.4 61 | - Updated documentation to use Sphinx 62 | - Version 0.1.5 63 | - Updated requirements to use newer version of TensorFlow 64 | - Fixed errors with changes to types of `input_shape` in TensorFlow Keras layers 65 | - Fixed errors resulting from model/configuration changes with TensorFlow 66 | - Version 0.1.6 67 | - Fixed issues with converting between masked and unmasked models in TensorFlow 68 | - Version 0.1.7 69 | - Updated Pytorch implementation of Transformer-based architectures -------------------------------------------------------------------------------- /sphinx/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /sphinx/modules.rst: -------------------------------------------------------------------------------- 1 | beyondml 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | beyondml 8 | -------------------------------------------------------------------------------- /tests/test_simple.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | def test_pass(): 5 | assert True 6 | 7 | 8 | def test_import(): 9 | import beyondml 10 | --------------------------------------------------------------------------------