├── .github ├── ISSUE_TEMPLATE │ ├── ask-a-question.md │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── publish.yml │ ├── tests-fast-pr.yml │ ├── tests-fast.yml │ └── tests-slow.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README-pypi.rst ├── README.rst ├── STYLE.md ├── bandit.yaml ├── bin ├── flambe └── flambe-site ├── docs ├── Makefile ├── README.md ├── _static │ └── css │ │ └── custom.css ├── conf.py ├── image │ ├── ASAPP_Flambe_Symbol_RGB_Black.png │ ├── ASAPP_Flambe_Symbol_RGB_Pink.png │ ├── Flambe_Logo_RGB_FullColor.png │ ├── aws-cluster │ │ ├── access-keys.png │ │ ├── attach-2.png │ │ ├── attach.png │ │ ├── auto-assign-2.png │ │ ├── auto-assign.png │ │ ├── cluster-run-reuse.png │ │ ├── cluster-run.png │ │ ├── console-home.png │ │ ├── create-ig.png │ │ ├── create-internet-gateway.png │ │ ├── create-key-pair.png │ │ ├── create-sg.png │ │ ├── create-subnet-2.png │ │ ├── create-subnet-3.png │ │ ├── create-subnet.png │ │ ├── create-vpc-2.png │ │ ├── create-vpc.png │ │ ├── download-key-pair.png │ │ ├── download-keys.png │ │ ├── ec2-home.png │ │ ├── edit-dns.png │ │ └── instances.png │ ├── remote-arch.png │ ├── report-site │ │ ├── console.png │ │ ├── final-variants.png │ │ ├── final.png │ │ ├── partial-variants.png │ │ ├── partial.png │ │ └── tensorboard.png │ └── ssh-cluster.png ├── index.rst ├── requirements.txt ├── source │ ├── flambe.cluster.aws.rst │ ├── flambe.cluster.cluster.rst │ ├── flambe.cluster.const.rst │ ├── flambe.cluster.errors.rst │ ├── flambe.cluster.instance.errors.rst │ ├── flambe.cluster.instance.instance.rst │ ├── flambe.cluster.instance.rst │ ├── flambe.cluster.rst │ ├── flambe.cluster.ssh.rst │ ├── flambe.cluster.utils.rst │ ├── flambe.compile.component.rst │ ├── flambe.compile.const.rst │ ├── flambe.compile.downloader.rst │ ├── flambe.compile.extensions.rst │ ├── flambe.compile.registrable.rst │ ├── flambe.compile.rst │ ├── flambe.compile.serialization.rst │ ├── flambe.compile.utils.rst │ ├── flambe.const.rst │ ├── flambe.dataset.dataset.rst │ ├── flambe.dataset.rst │ ├── flambe.dataset.tabular.rst │ ├── flambe.experiment.experiment.rst │ ├── flambe.experiment.options.rst │ ├── flambe.experiment.progress.rst │ ├── flambe.experiment.rst │ ├── flambe.experiment.tune_adapter.rst │ ├── flambe.experiment.utils.rst │ ├── flambe.experiment.webapp.app.rst │ ├── flambe.experiment.webapp.rst │ ├── flambe.experiment.wording.rst │ ├── flambe.export.builder.rst │ ├── flambe.export.exporter.rst │ ├── flambe.export.rst │ ├── flambe.field.bow.rst │ ├── flambe.field.field.rst │ ├── flambe.field.label.rst │ ├── flambe.field.rst │ ├── flambe.field.text.rst │ ├── flambe.learn.distillation.rst │ ├── flambe.learn.eval.rst │ ├── flambe.learn.rst │ ├── flambe.learn.script.rst │ ├── flambe.learn.train.rst │ ├── flambe.logging.datatypes.rst │ ├── flambe.logging.handler.contextual_file.rst │ ├── flambe.logging.handler.rst │ ├── flambe.logging.handler.tensorboard.rst │ ├── flambe.logging.logging.rst │ ├── flambe.logging.rst │ ├── flambe.logging.utils.rst │ ├── flambe.logo.rst │ ├── flambe.metric.dev.accuracy.rst │ ├── flambe.metric.dev.auc.rst │ ├── flambe.metric.dev.binary.rst │ ├── flambe.metric.dev.perplexity.rst │ ├── flambe.metric.dev.rst │ ├── flambe.metric.loss.cross_entropy.rst │ ├── flambe.metric.loss.nll_loss.rst │ ├── flambe.metric.loss.rst │ ├── flambe.metric.metric.rst │ ├── flambe.metric.rst │ ├── flambe.model.logistic_regression.rst │ ├── flambe.model.rst │ ├── flambe.nlp.classification.datasets.rst │ ├── flambe.nlp.classification.model.rst │ ├── flambe.nlp.classification.rst │ ├── flambe.nlp.fewshot.model.rst │ ├── flambe.nlp.fewshot.rst │ ├── flambe.nlp.language_modeling.datasets.rst │ ├── flambe.nlp.language_modeling.fields.rst │ ├── flambe.nlp.language_modeling.model.rst │ ├── flambe.nlp.language_modeling.rst │ ├── flambe.nlp.rst │ ├── flambe.nlp.transformers.bert.rst │ ├── flambe.nlp.transformers.openai.rst │ ├── flambe.nlp.transformers.optim.rst │ ├── flambe.nlp.transformers.rst │ ├── flambe.nn.cnn.rst │ ├── flambe.nn.distance.cosine.rst │ ├── flambe.nn.distance.distance.rst │ ├── flambe.nn.distance.euclidean.rst │ ├── flambe.nn.distance.hyperbolic.rst │ ├── flambe.nn.distance.rst │ ├── flambe.nn.embedder.rst │ ├── flambe.nn.mlp.rst │ ├── flambe.nn.module.rst │ ├── flambe.nn.mos.rst │ ├── flambe.nn.rnn.rst │ ├── flambe.nn.rst │ ├── flambe.nn.sequential.rst │ ├── flambe.nn.softmax.rst │ ├── flambe.rst │ ├── flambe.runnable.cluster_runnable.rst │ ├── flambe.runnable.context.rst │ ├── flambe.runnable.environment.rst │ ├── flambe.runnable.error.rst │ ├── flambe.runnable.rst │ ├── flambe.runnable.runnable.rst │ ├── flambe.runnable.utils.rst │ ├── flambe.runner.garbage_collector.rst │ ├── flambe.runner.report_site_run.rst │ ├── flambe.runner.rst │ ├── flambe.runner.run.rst │ ├── flambe.runner.utils.rst │ ├── flambe.sampler.base.rst │ ├── flambe.sampler.episodic.rst │ ├── flambe.sampler.rst │ ├── flambe.sampler.sampler.rst │ ├── flambe.tokenizer.char.rst │ ├── flambe.tokenizer.label.rst │ ├── flambe.tokenizer.rst │ ├── flambe.tokenizer.tokenizer.rst │ ├── flambe.tokenizer.word.rst │ ├── flambe.version.rst │ └── flambe.vision.rst ├── starting │ ├── contribute.rst │ ├── install.rst │ ├── motivation.rst │ └── usage.rst ├── tutorials │ ├── aws_cluster.rst │ ├── custom.rst │ ├── multistage.rst │ ├── script.rst │ └── ssh_cluster.rst └── understanding │ ├── advanced.rst │ ├── builder.rst │ ├── clusters.rst │ ├── component.rst │ ├── experiments.rst │ ├── extensions.rst │ ├── report_site.rst │ ├── runnables.rst │ └── security.rst ├── examples ├── basic_example.yaml ├── cluster.yaml └── full_example.yaml ├── flambe ├── __init__.py ├── cluster │ ├── __init__.py │ ├── aws.py │ ├── cluster.py │ ├── const.py │ ├── errors.py │ ├── instance │ │ ├── __init__.py │ │ ├── errors.py │ │ ├── instance.py │ │ └── scripts │ │ │ ├── install_cuda_ubuntu1804.sh │ │ │ ├── install_docker.sh │ │ │ └── install_nvidia_docker.sh │ ├── ssh.py │ └── utils.py ├── compile │ ├── __init__.py │ ├── component.py │ ├── const.py │ ├── downloader.py │ ├── extensions.py │ ├── registrable.py │ ├── serialization.py │ └── utils.py ├── const.py ├── dataset │ ├── __init__.py │ ├── dataset.py │ └── tabular.py ├── experiment │ ├── __init__.py │ ├── experiment.py │ ├── options.py │ ├── progress.py │ ├── tune_adapter.py │ ├── utils.py │ ├── webapp │ │ ├── __init__.py │ │ ├── app.py │ │ ├── static │ │ │ ├── ASAPP_Flambe_Logo_RGB_FullColor.svg │ │ │ └── main.js │ │ └── templates │ │ │ ├── console.html │ │ │ └── index.html │ └── wording.py ├── export │ ├── __init__.py │ ├── builder.py │ └── exporter.py ├── field │ ├── __init__.py │ ├── bow.py │ ├── field.py │ ├── label.py │ └── text.py ├── learn │ ├── __init__.py │ ├── distillation.py │ ├── eval.py │ ├── script.py │ ├── train.py │ └── utils.py ├── logging │ ├── __init__.py │ ├── datatypes.py │ ├── handler │ │ ├── __init__.py │ │ ├── contextual_file.py │ │ └── tensorboard.py │ ├── logging.py │ └── utils.py ├── logo.py ├── metric │ ├── __init__.py │ ├── dev │ │ ├── __init__.py │ │ ├── accuracy.py │ │ ├── auc.py │ │ ├── binary.py │ │ ├── bpc.py │ │ ├── perplexity.py │ │ └── recall.py │ ├── loss │ │ ├── __init__.py │ │ ├── cross_entropy.py │ │ └── nll_loss.py │ └── metric.py ├── model │ ├── __init__.py │ └── logistic_regression.py ├── nlp │ ├── __init__.py │ ├── classification │ │ ├── __init__.py │ │ ├── datasets.py │ │ └── model.py │ ├── fewshot │ │ ├── __init__.py │ │ └── model.py │ ├── language_modeling │ │ ├── __init__.py │ │ ├── datasets.py │ │ ├── fields.py │ │ ├── model.py │ │ └── sampler.py │ └── transformers │ │ ├── __init__.py │ │ ├── field.py │ │ └── model.py ├── nn │ ├── __init__.py │ ├── cnn.py │ ├── distance │ │ ├── __init__.py │ │ ├── cosine.py │ │ ├── distance.py │ │ ├── euclidean.py │ │ └── hyperbolic.py │ ├── embedding.py │ ├── mlp.py │ ├── module.py │ ├── mos.py │ ├── pooling.py │ ├── rnn.py │ ├── sequential.py │ ├── softmax.py │ ├── transformer.py │ └── transformer_sru.py ├── optim │ ├── __init__.py │ ├── linear.py │ ├── noam.py │ ├── radam.py │ └── scheduler.py ├── runnable │ ├── __init__.py │ ├── cluster_runnable.py │ ├── context.py │ ├── environment.py │ ├── error.py │ ├── runnable.py │ └── utils.py ├── runner │ ├── __init__.py │ ├── report_site_run.py │ ├── run.py │ └── utils.py ├── sampler │ ├── __init__.py │ ├── base.py │ ├── episodic.py │ └── sampler.py ├── tokenizer │ ├── __init__.py │ ├── char.py │ ├── label.py │ ├── tokenizer.py │ └── word.py ├── utils │ ├── __init__.py │ └── config.py ├── version.py └── vision │ ├── __init__.py │ └── classification │ ├── __init__.py │ ├── datasets.py │ └── model.py ├── imgs ├── Flambe_Logo_CMYK_Black.png ├── Flambe_Logo_CMYK_FullColor.png ├── Flambe_Logo_CMYK_Pink.png ├── Flambe_Logo_CMYK_White.png ├── Flambe_Symbol_CMYK_Black.png ├── Flambe_Symbol_CMYK_Pink.png └── Flambe_Symbol_CMYK_White.png ├── mypy.ini ├── readthedocs.yaml ├── requirements.txt ├── scripts ├── deploy_documentation.py ├── publish_documentation.sh └── publish_pypi.sh ├── setup.py ├── sonar-project.properties ├── tests ├── .coveragerc ├── README.md ├── __init__.py ├── conftest.py ├── data │ ├── .gitignore │ ├── dummy_configs │ │ ├── config.txt │ │ └── wrong_config.yaml │ ├── dummy_embeddings │ │ └── test.txt │ ├── dummy_extensions │ │ ├── inference │ │ │ ├── flambe_inference │ │ │ │ ├── __init__.py │ │ │ │ └── obj.py │ │ │ └── setup.py │ │ ├── runnable │ │ │ ├── flambe_runnable │ │ │ │ ├── __init__.py │ │ │ │ └── runnable.py │ │ │ └── setup.py │ │ └── script │ │ │ ├── flambe_script │ │ │ ├── __init__.py │ │ │ └── train.py │ │ │ └── setup.py │ ├── dummy_tabular │ │ ├── test.csv │ │ ├── train.csv │ │ └── val.csv │ ├── dummy_tabular_test │ │ └── test.csv │ └── no_header_dataset.csv ├── integration │ ├── end2end │ │ ├── chain_intrablock.yaml │ │ ├── image.yaml │ │ ├── lm.yaml │ │ ├── random_tag.yaml │ │ ├── script.yaml │ │ ├── tc.yaml │ │ └── transformer.yaml │ ├── test_builder.py │ ├── test_examples.py │ └── test_resources_experiment.py ├── requirements.txt └── unit │ ├── __init__.py │ ├── cluster │ ├── test_aws.py │ ├── test_cluster.py │ └── test_instance.py │ ├── compile │ ├── __init__.py │ ├── test_compilable.py │ ├── test_downloader.py │ ├── test_extensions.py │ ├── test_registrable.py │ ├── test_serialization.py │ └── test_utils.py │ ├── dataset │ ├── __init__.py │ └── test_tabular.py │ ├── experiment │ ├── __init__.py │ ├── test_experiment.py │ ├── test_experiment_preprocess.py │ ├── test_options.py │ └── test_utils.py │ ├── field │ ├── __init__.py │ ├── test_label_field.py │ └── test_text_field.py │ ├── learn │ └── test_trainer.py │ ├── metrics │ ├── __init__.py │ ├── test_dev.py │ └── test_loss.py │ ├── model │ └── test_logistic_regression.py │ ├── nlp │ ├── __init__.py │ ├── classification │ │ ├── __init__.py │ │ └── test_tc_datasets.py │ └── language_modeling │ │ ├── __init__.py │ │ └── test_lm_datasets.py │ ├── nn │ ├── test_cnn.py │ ├── test_mlp.py │ ├── test_pooling.py │ └── test_rnn.py │ ├── optim │ └── test_scheduler.py │ ├── remote │ └── __init__.py │ ├── runnable │ └── test_runnable.py │ ├── runner │ ├── test_args.py │ └── test_utils.py │ └── sampler │ ├── __init__.py │ └── test_base.py └── tox.ini /.github/ISSUE_TEMPLATE/ask-a-question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ask a Question 3 | about: Ask a question about Flambé 4 | title: '' 5 | labels: '' 6 | assignees: nmatthews-asapp 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. Specify if you're running on a cluster or locally. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Software Versions (please complete the following information):** 27 | - OS: [e.g. macOS 10.14, Ubuntu 18.04, ...] 28 | - Python Version [>= 3.6.1] 29 | - PyTorch Version [>= 1.0] 30 | - Flambé Version [>= 0.2.4] 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | publish: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | - name: Set up Python 3.6 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: '3.6' 16 | - name: Install dependencies package 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install setuptools wheel twine 20 | - name: Build package 21 | run: | 22 | python setup.py sdist bdist_wheel 23 | - name: Publish in test pypi 24 | env: 25 | TWINE_USERNAME: ${{ secrets.PYPI_TEST_USERNAME }} 26 | TWINE_PASSWORD: ${{ secrets.PYPI_TEST_PASSWORD }} 27 | run: | 28 | twine upload --repository-url https://test.pypi.org/legacy/ dist/* 29 | - name: Official publish 30 | env: 31 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 32 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 33 | run: | 34 | twine upload dist/* 35 | -------------------------------------------------------------------------------- /.github/workflows/tests-fast-pr.yml: -------------------------------------------------------------------------------- 1 | name: Run fast tests 2 | 3 | on: pull_request 4 | 5 | jobs: 6 | unit: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: [3.6, 3.7] 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python ${{ matrix.python-version }} 14 | uses: actions/setup-python@v1 15 | with: 16 | python-version: ${{ matrix.python-version }} 17 | - name: Cache pip 18 | uses: actions/cache@v1 19 | with: 20 | path: ~/.cache/pip 21 | key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} 22 | restore-keys: | 23 | ${{ runner.os }}-pip- 24 | ${{ runner.os }}- 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install --upgrade -r requirements.txt 29 | pip install --upgrade -r tests/requirements.txt 30 | pip install tox awscli 31 | python -m awscli configure set region us-east-1 32 | - name: Run Unit Tests with tox 33 | run: | 34 | tox -- -v -m "not end2end and not examples" 35 | - name: Upload test results 36 | uses: actions/upload-artifact@v1 37 | with: 38 | name: artifacts 39 | path: test_results 40 | 41 | end2end: 42 | runs-on: ubuntu-latest 43 | strategy: 44 | matrix: 45 | python-version: [3.6, 3.7] 46 | steps: 47 | - uses: actions/checkout@v2 48 | - name: Set up Python ${{ matrix.python-version }} 49 | uses: actions/setup-python@v1 50 | with: 51 | python-version: ${{ matrix.python-version }} 52 | - name: Cache pip 53 | uses: actions/cache@v1 54 | with: 55 | path: ~/.cache/pip 56 | key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} 57 | restore-keys: | 58 | ${{ runner.os }}-pip- 59 | ${{ runner.os }}- 60 | - name: Install dependencies 61 | run: | 62 | python -m pip install --upgrade pip 63 | pip install --upgrade -r requirements.txt 64 | pip install --upgrade -r tests/requirements.txt 65 | pip install tox awscli 66 | python -m awscli configure set region us-east-1 67 | - name: Run End2End Tests with tox 68 | run: | 69 | tox -- -v -m "end2end" 70 | -------------------------------------------------------------------------------- /.github/workflows/tests-fast.yml: -------------------------------------------------------------------------------- 1 | name: Run fast tests 2 | 3 | on: push 4 | 5 | jobs: 6 | unit: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: [3.6, 3.7] 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python ${{ matrix.python-version }} 14 | uses: actions/setup-python@v1 15 | with: 16 | python-version: ${{ matrix.python-version }} 17 | - name: Cache pip 18 | uses: actions/cache@v1 19 | with: 20 | path: ~/.cache/pip 21 | key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} 22 | restore-keys: | 23 | ${{ runner.os }}-pip- 24 | ${{ runner.os }}- 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install --upgrade -r requirements.txt 29 | pip install --upgrade -r tests/requirements.txt 30 | pip install tox awscli 31 | python -m awscli configure set region us-east-1 32 | - name: Run Unit Tests with tox 33 | run: | 34 | tox -- -v -m "not end2end and not examples" 35 | - name: Upload test results 36 | uses: actions/upload-artifact@v1 37 | with: 38 | name: artifacts 39 | path: test_results 40 | 41 | end2end: 42 | runs-on: ubuntu-latest 43 | strategy: 44 | matrix: 45 | python-version: [3.6, 3.7] 46 | steps: 47 | - uses: actions/checkout@v2 48 | - name: Set up Python ${{ matrix.python-version }} 49 | uses: actions/setup-python@v1 50 | with: 51 | python-version: ${{ matrix.python-version }} 52 | - name: Cache pip 53 | uses: actions/cache@v1 54 | with: 55 | path: ~/.cache/pip 56 | key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} 57 | restore-keys: | 58 | ${{ runner.os }}-pip- 59 | ${{ runner.os }}- 60 | - name: Install dependencies 61 | run: | 62 | python -m pip install --upgrade pip 63 | pip install --upgrade -r requirements.txt 64 | pip install --upgrade -r tests/requirements.txt 65 | pip install tox awscli 66 | python -m awscli configure set region us-east-1 67 | - name: Run End2End Tests with tox 68 | run: | 69 | tox -- -v -m "end2end" 70 | 71 | sonarCloudTrigger: 72 | name: SonarCloud Trigger 73 | needs: unit 74 | runs-on: ubuntu-latest 75 | steps: 76 | - uses: actions/checkout@v2 77 | - name: Download test results 78 | uses: actions/download-artifact@v1 79 | with: 80 | name: artifacts 81 | - name: SonarCloud Scan 82 | uses: SonarSource/sonarcloud-github-action@v1.1 83 | env: 84 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 85 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} 86 | -------------------------------------------------------------------------------- /.github/workflows/tests-slow.yml: -------------------------------------------------------------------------------- 1 | name: Run slow tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | examples: 10 | timeout-minutes: 60 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: [3.6, 3.7] 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python ${{ matrix.python-version }} 18 | uses: actions/setup-python@v1 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | - name: Cache pip 22 | uses: actions/cache@v1 23 | with: 24 | path: ~/.cache/pip 25 | key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} 26 | restore-keys: | 27 | ${{ runner.os }}-pip- 28 | ${{ runner.os }}- 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install --upgrade -r requirements.txt 33 | pip install --upgrade -r tests/requirements.txt 34 | pip install tox awscli 35 | python -m awscli configure set region us-east-1 36 | - name: Text examples 37 | run: | 38 | python3 -m tox -- -v -m "examples" 39 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribute 2 | 3 | We aim to foster a healthy, inclusive, organized and efficient community around the Flambé project. We believe prioritizing responsiveness, thoroughness and helping others along with following best practices. Below we outline what’s involved in different aspects of contribution. 4 | 5 | ### Contributor License Agreement ("CLA") 6 | 7 | In order to accept your pull request, we need you to submit a CLA. Once you complete your CLA, then it is valid for all future contributions you make to this repository. 8 | 9 | Complete your CLA here: [CLA Form](https://docs.google.com/forms/d/e/1FAIpQLScMjmYVAnjRDks-n925KKyWqvsMbn_NBEWXZ4LvyOBtq1QTDQ/viewform) 10 | 11 | ### Filing an issue (Bug or Enhancement) 12 | 13 | Please create an issue on [GitHub](https://github.com/asappresearch/flambe/issues). 14 | 15 | Please follow the templates shown when you click on “New Issue.” 16 | 17 | ### Fixing a bug (PR, etc.) 18 | 19 | Open an issue for the bug if one doesn’t exist already. To create a PR, you must first fork the repository and apply your changes to the fork. When addressing a particular issue, please follow the naming convention: issue-###_short-desc_github-handle, using the issue number for the relevant bug. Your PR will be reviewed, merged, and ultimately included in a future release (see next section). 20 | 21 | If your bug fix does not follow our style and contribution guidelines, you will have to make the necessary changes before we can accept the PR. Further, you can expect at least 1-2 reviews from the flambé team where we will check for code quality. 22 | 23 | ### Release Cycle 24 | 25 | We aim to release patches at least once a week, but currently follow no other regular cycles or sprint schedules. 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 ASAPP Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include README-pypi.rst 3 | include requirements.txt 4 | include tests/requirements.txt 5 | include flambe/experiment/webapp/templates/* 6 | include flambe/experiment/webapp/static/* 7 | include flambe/cluster/instance/scripts/* 8 | -------------------------------------------------------------------------------- /STYLE.md: -------------------------------------------------------------------------------- 1 | # Style 2 | 3 | Flambé is lightweight, pragmatic, and easy to understand; To achieve these qualities we follow Python’s PEP8 style guide, Google’s python style guide, and use NumPy docstring format. Because Machine Learning pipelines are prone to subtle bugs, we use Python’s type annotations to enforce safe type contracts across the codebase. In general, we recommend following best practices, particularly as outlined here, up until the point that stylistic concerns dramatically slow down development. 4 | 5 | When contributing to Flambé, please make sure to read the style guideline for code and numpy docstrings: 6 | 7 | - http://google.github.io/styleguide/pyguide.html 8 | - https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html 9 | 10 | We run Flake8 and mypy in our test suite, which will flag a lot of errors unless you respect the guidelines. 11 | 12 | > “A Foolish Consistency is the Hobgoblin of Little Minds” - Emerson 13 | -------------------------------------------------------------------------------- /bandit.yaml: -------------------------------------------------------------------------------- 1 | ### profile may optionally select or skip tests 2 | 3 | skips: ['B301','B311','B403','B404','B603','B605','B601','B607','B307', 'B110', 'B602'] 4 | 5 | # See bandit --help for details on each skip 6 | -------------------------------------------------------------------------------- /bin/flambe: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python3 -Wignore -u -m flambe.runner.run "$@" 3 | -------------------------------------------------------------------------------- /bin/flambe-site: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | python3 -m flambe.runner.report_site_run "$@" 3 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = Flambe 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ## How to compile documentation 2 | 3 | #### Install requirements 4 | 5 | ```bash 6 | $ pip install -r docs/requirements.txt 7 | ``` 8 | 9 | Run: 10 | 11 | ```bash 12 | $ make html 13 | ``` 14 | 15 | This will generate a `_build` folder with the `html` files in it 16 | 17 | 18 | #### Rebuilding RST's 19 | 20 | ```bash 21 | $ sphinx-apidoc -fMeT -o source ../flambe 22 | ``` 23 | 24 | Then: 25 | 26 | ```bash 27 | $ rm source/flambe.rst 28 | $ rm source/flambe.logo.rst 29 | $ rm source/flambe.version.rst 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/image/ASAPP_Flambe_Symbol_RGB_Black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/ASAPP_Flambe_Symbol_RGB_Black.png -------------------------------------------------------------------------------- /docs/image/ASAPP_Flambe_Symbol_RGB_Pink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/ASAPP_Flambe_Symbol_RGB_Pink.png -------------------------------------------------------------------------------- /docs/image/Flambe_Logo_RGB_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/Flambe_Logo_RGB_FullColor.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/access-keys.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/access-keys.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/attach-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/attach-2.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/attach.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/attach.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/auto-assign-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/auto-assign-2.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/auto-assign.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/auto-assign.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/cluster-run-reuse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/cluster-run-reuse.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/cluster-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/cluster-run.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/console-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/console-home.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-ig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-ig.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-internet-gateway.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-internet-gateway.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-key-pair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-key-pair.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-sg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-sg.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-subnet-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-subnet-2.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-subnet-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-subnet-3.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-subnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-subnet.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-vpc-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-vpc-2.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/create-vpc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/create-vpc.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/download-key-pair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/download-key-pair.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/download-keys.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/download-keys.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/ec2-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/ec2-home.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/edit-dns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/edit-dns.png -------------------------------------------------------------------------------- /docs/image/aws-cluster/instances.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/aws-cluster/instances.png -------------------------------------------------------------------------------- /docs/image/remote-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/remote-arch.png -------------------------------------------------------------------------------- /docs/image/report-site/console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/report-site/console.png -------------------------------------------------------------------------------- /docs/image/report-site/final-variants.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/report-site/final-variants.png -------------------------------------------------------------------------------- /docs/image/report-site/final.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/report-site/final.png -------------------------------------------------------------------------------- /docs/image/report-site/partial-variants.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/report-site/partial-variants.png -------------------------------------------------------------------------------- /docs/image/report-site/partial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/report-site/partial.png -------------------------------------------------------------------------------- /docs/image/report-site/tensorboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/report-site/tensorboard.png -------------------------------------------------------------------------------- /docs/image/ssh-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/docs/image/ssh-cluster.png -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-rtd-theme 3 | sphinx-autodoc-typehints 4 | sphinx-autoapi 5 | 6 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.aws.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.aws module 2 | ========================= 3 | 4 | .. automodule:: flambe.cluster.aws 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.cluster.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.cluster module 2 | ============================= 3 | 4 | .. automodule:: flambe.cluster.cluster 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.const.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.const module 2 | =========================== 3 | 4 | .. automodule:: flambe.cluster.const 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.errors.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.errors module 2 | ============================ 3 | 4 | .. automodule:: flambe.cluster.errors 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.instance.errors.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.instance.errors module 2 | ===================================== 3 | 4 | .. automodule:: flambe.cluster.instance.errors 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.instance.instance.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.instance.instance module 2 | ======================================= 3 | 4 | .. automodule:: flambe.cluster.instance.instance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.instance.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.instance package 2 | =============================== 3 | 4 | .. automodule:: flambe.cluster.instance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.cluster.instance.errors 15 | flambe.cluster.instance.instance 16 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster package 2 | ====================== 3 | 4 | .. automodule:: flambe.cluster 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.cluster.instance 15 | 16 | Submodules 17 | ---------- 18 | 19 | .. toctree:: 20 | 21 | flambe.cluster.aws 22 | flambe.cluster.cluster 23 | flambe.cluster.const 24 | flambe.cluster.errors 25 | flambe.cluster.ssh 26 | flambe.cluster.utils 27 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.ssh.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.ssh module 2 | ========================= 3 | 4 | .. automodule:: flambe.cluster.ssh 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.cluster.utils.rst: -------------------------------------------------------------------------------- 1 | flambe.cluster.utils module 2 | =========================== 3 | 4 | .. automodule:: flambe.cluster.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.component.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.component module 2 | =============================== 3 | 4 | .. automodule:: flambe.compile.component 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.const.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.const module 2 | =========================== 3 | 4 | .. automodule:: flambe.compile.const 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.downloader.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.downloader module 2 | ================================ 3 | 4 | .. automodule:: flambe.compile.downloader 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.extensions.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.extensions module 2 | ================================ 3 | 4 | .. automodule:: flambe.compile.extensions 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.registrable.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.registrable module 2 | ================================= 3 | 4 | .. automodule:: flambe.compile.registrable 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.rst: -------------------------------------------------------------------------------- 1 | flambe.compile package 2 | ====================== 3 | 4 | .. automodule:: flambe.compile 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.compile.component 15 | flambe.compile.const 16 | flambe.compile.downloader 17 | flambe.compile.extensions 18 | flambe.compile.registrable 19 | flambe.compile.serialization 20 | flambe.compile.utils 21 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.serialization.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.serialization module 2 | =================================== 3 | 4 | .. automodule:: flambe.compile.serialization 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.compile.utils.rst: -------------------------------------------------------------------------------- 1 | flambe.compile.utils module 2 | =========================== 3 | 4 | .. automodule:: flambe.compile.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.const.rst: -------------------------------------------------------------------------------- 1 | flambe.const module 2 | =================== 3 | 4 | .. automodule:: flambe.const 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.dataset.dataset.rst: -------------------------------------------------------------------------------- 1 | flambe.dataset.dataset module 2 | ============================= 3 | 4 | .. automodule:: flambe.dataset.dataset 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.dataset.rst: -------------------------------------------------------------------------------- 1 | flambe.dataset package 2 | ====================== 3 | 4 | .. automodule:: flambe.dataset 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.dataset.dataset 15 | flambe.dataset.tabular 16 | -------------------------------------------------------------------------------- /docs/source/flambe.dataset.tabular.rst: -------------------------------------------------------------------------------- 1 | flambe.dataset.tabular module 2 | ============================= 3 | 4 | .. automodule:: flambe.dataset.tabular 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.experiment.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.experiment module 2 | =================================== 3 | 4 | .. automodule:: flambe.experiment.experiment 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.options.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.options module 2 | ================================ 3 | 4 | .. automodule:: flambe.experiment.options 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.progress.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.progress module 2 | ================================= 3 | 4 | .. automodule:: flambe.experiment.progress 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment package 2 | ========================= 3 | 4 | .. automodule:: flambe.experiment 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.experiment.webapp 15 | 16 | Submodules 17 | ---------- 18 | 19 | .. toctree:: 20 | 21 | flambe.experiment.experiment 22 | flambe.experiment.options 23 | flambe.experiment.progress 24 | flambe.experiment.tune_adapter 25 | flambe.experiment.utils 26 | flambe.experiment.wording 27 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.tune_adapter.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.tune\_adapter module 2 | ====================================== 3 | 4 | .. automodule:: flambe.experiment.tune_adapter 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.utils.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.utils module 2 | ============================== 3 | 4 | .. automodule:: flambe.experiment.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.webapp.app.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.webapp.app module 2 | =================================== 3 | 4 | .. automodule:: flambe.experiment.webapp.app 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.webapp.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.webapp package 2 | ================================ 3 | 4 | .. automodule:: flambe.experiment.webapp 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.experiment.webapp.app 15 | -------------------------------------------------------------------------------- /docs/source/flambe.experiment.wording.rst: -------------------------------------------------------------------------------- 1 | flambe.experiment.wording module 2 | ================================ 3 | 4 | .. automodule:: flambe.experiment.wording 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.export.builder.rst: -------------------------------------------------------------------------------- 1 | flambe.export.builder module 2 | ============================ 3 | 4 | .. automodule:: flambe.export.builder 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.export.exporter.rst: -------------------------------------------------------------------------------- 1 | flambe.export.exporter module 2 | ============================= 3 | 4 | .. automodule:: flambe.export.exporter 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.export.rst: -------------------------------------------------------------------------------- 1 | flambe.export package 2 | ===================== 3 | 4 | .. automodule:: flambe.export 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.export.builder 15 | flambe.export.exporter 16 | -------------------------------------------------------------------------------- /docs/source/flambe.field.bow.rst: -------------------------------------------------------------------------------- 1 | flambe.field.bow module 2 | ======================= 3 | 4 | .. automodule:: flambe.field.bow 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.field.field.rst: -------------------------------------------------------------------------------- 1 | flambe.field.field module 2 | ========================= 3 | 4 | .. automodule:: flambe.field.field 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.field.label.rst: -------------------------------------------------------------------------------- 1 | flambe.field.label module 2 | ========================= 3 | 4 | .. automodule:: flambe.field.label 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.field.rst: -------------------------------------------------------------------------------- 1 | flambe.field package 2 | ==================== 3 | 4 | .. automodule:: flambe.field 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.field.bow 15 | flambe.field.field 16 | flambe.field.label 17 | flambe.field.text 18 | -------------------------------------------------------------------------------- /docs/source/flambe.field.text.rst: -------------------------------------------------------------------------------- 1 | flambe.field.text module 2 | ======================== 3 | 4 | .. automodule:: flambe.field.text 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.learn.distillation.rst: -------------------------------------------------------------------------------- 1 | flambe.learn.distillation module 2 | ================================ 3 | 4 | .. automodule:: flambe.learn.distillation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.learn.eval.rst: -------------------------------------------------------------------------------- 1 | flambe.learn.eval module 2 | ======================== 3 | 4 | .. automodule:: flambe.learn.eval 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.learn.rst: -------------------------------------------------------------------------------- 1 | flambe.learn package 2 | ==================== 3 | 4 | .. automodule:: flambe.learn 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.learn.distillation 15 | flambe.learn.eval 16 | flambe.learn.script 17 | flambe.learn.train 18 | -------------------------------------------------------------------------------- /docs/source/flambe.learn.script.rst: -------------------------------------------------------------------------------- 1 | flambe.learn.script module 2 | ========================== 3 | 4 | .. automodule:: flambe.learn.script 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.learn.train.rst: -------------------------------------------------------------------------------- 1 | flambe.learn.train module 2 | ========================= 3 | 4 | .. automodule:: flambe.learn.train 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.datatypes.rst: -------------------------------------------------------------------------------- 1 | flambe.logging.datatypes module 2 | =============================== 3 | 4 | .. automodule:: flambe.logging.datatypes 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.handler.contextual_file.rst: -------------------------------------------------------------------------------- 1 | flambe.logging.handler.contextual\_file module 2 | ============================================== 3 | 4 | .. automodule:: flambe.logging.handler.contextual_file 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.handler.rst: -------------------------------------------------------------------------------- 1 | flambe.logging.handler package 2 | ============================== 3 | 4 | .. automodule:: flambe.logging.handler 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.logging.handler.contextual_file 15 | flambe.logging.handler.tensorboard 16 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.handler.tensorboard.rst: -------------------------------------------------------------------------------- 1 | flambe.logging.handler.tensorboard module 2 | ========================================= 3 | 4 | .. automodule:: flambe.logging.handler.tensorboard 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.logging.rst: -------------------------------------------------------------------------------- 1 | flambe.logging.logging module 2 | ============================= 3 | 4 | .. automodule:: flambe.logging.logging 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.rst: -------------------------------------------------------------------------------- 1 | flambe.logging package 2 | ====================== 3 | 4 | .. automodule:: flambe.logging 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.logging.handler 15 | 16 | Submodules 17 | ---------- 18 | 19 | .. toctree:: 20 | 21 | flambe.logging.datatypes 22 | flambe.logging.logging 23 | flambe.logging.utils 24 | -------------------------------------------------------------------------------- /docs/source/flambe.logging.utils.rst: -------------------------------------------------------------------------------- 1 | flambe.logging.utils module 2 | =========================== 3 | 4 | .. automodule:: flambe.logging.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.logo.rst: -------------------------------------------------------------------------------- 1 | flambe.logo module 2 | ================== 3 | 4 | .. automodule:: flambe.logo 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.dev.accuracy.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.dev.accuracy module 2 | ================================= 3 | 4 | .. automodule:: flambe.metric.dev.accuracy 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.dev.auc.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.dev.auc module 2 | ============================ 3 | 4 | .. automodule:: flambe.metric.dev.auc 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.dev.binary.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.dev.binary module 2 | =============================== 3 | 4 | .. automodule:: flambe.metric.dev.binary 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.dev.perplexity.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.dev.perplexity module 2 | =================================== 3 | 4 | .. automodule:: flambe.metric.dev.perplexity 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.dev.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.dev package 2 | ========================= 3 | 4 | .. automodule:: flambe.metric.dev 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.metric.dev.accuracy 15 | flambe.metric.dev.auc 16 | flambe.metric.dev.binary 17 | flambe.metric.dev.perplexity 18 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.loss.cross_entropy.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.loss.cross\_entropy module 2 | ======================================== 3 | 4 | .. automodule:: flambe.metric.loss.cross_entropy 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.loss.nll_loss.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.loss.nll\_loss module 2 | =================================== 3 | 4 | .. automodule:: flambe.metric.loss.nll_loss 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.loss.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.loss package 2 | ========================== 3 | 4 | .. automodule:: flambe.metric.loss 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.metric.loss.cross_entropy 15 | flambe.metric.loss.nll_loss 16 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.metric.rst: -------------------------------------------------------------------------------- 1 | flambe.metric.metric module 2 | =========================== 3 | 4 | .. automodule:: flambe.metric.metric 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.metric.rst: -------------------------------------------------------------------------------- 1 | flambe.metric package 2 | ===================== 3 | 4 | .. automodule:: flambe.metric 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.metric.dev 15 | flambe.metric.loss 16 | 17 | Submodules 18 | ---------- 19 | 20 | .. toctree:: 21 | 22 | flambe.metric.metric 23 | -------------------------------------------------------------------------------- /docs/source/flambe.model.logistic_regression.rst: -------------------------------------------------------------------------------- 1 | flambe.model.logistic\_regression module 2 | ======================================== 3 | 4 | .. automodule:: flambe.model.logistic_regression 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.model.rst: -------------------------------------------------------------------------------- 1 | flambe.model package 2 | ==================== 3 | 4 | .. automodule:: flambe.model 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.model.logistic_regression 15 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.classification.datasets.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.classification.datasets module 2 | ========================================= 3 | 4 | .. automodule:: flambe.nlp.classification.datasets 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.classification.model.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.classification.model module 2 | ====================================== 3 | 4 | .. automodule:: flambe.nlp.classification.model 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.classification.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.classification package 2 | ================================= 3 | 4 | .. automodule:: flambe.nlp.classification 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nlp.classification.datasets 15 | flambe.nlp.classification.model 16 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.fewshot.model.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.fewshot.model module 2 | =============================== 3 | 4 | .. automodule:: flambe.nlp.fewshot.model 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.fewshot.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.fewshot package 2 | ========================== 3 | 4 | .. automodule:: flambe.nlp.fewshot 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nlp.fewshot.model 15 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.language_modeling.datasets.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.language\_modeling.datasets module 2 | ============================================= 3 | 4 | .. automodule:: flambe.nlp.language_modeling.datasets 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.language_modeling.fields.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.language\_modeling.fields module 2 | =========================================== 3 | 4 | .. automodule:: flambe.nlp.language_modeling.fields 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.language_modeling.model.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.language\_modeling.model module 2 | ========================================== 3 | 4 | .. automodule:: flambe.nlp.language_modeling.model 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.language_modeling.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.language\_modeling package 2 | ===================================== 3 | 4 | .. automodule:: flambe.nlp.language_modeling 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nlp.language_modeling.datasets 15 | flambe.nlp.language_modeling.fields 16 | flambe.nlp.language_modeling.model 17 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp package 2 | ================== 3 | 4 | .. automodule:: flambe.nlp 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nlp.classification 15 | flambe.nlp.fewshot 16 | flambe.nlp.language_modeling 17 | flambe.nlp.transformers 18 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.transformers.bert.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.transformers.bert module 2 | =================================== 3 | 4 | .. automodule:: flambe.nlp.transformers.bert 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.transformers.openai.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.transformers.openai module 2 | ===================================== 3 | 4 | .. automodule:: flambe.nlp.transformers.openai 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.transformers.optim.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.transformers.optim module 2 | ==================================== 3 | 4 | .. automodule:: flambe.nlp.transformers.optim 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nlp.transformers.rst: -------------------------------------------------------------------------------- 1 | flambe.nlp.transformers package 2 | =============================== 3 | 4 | .. automodule:: flambe.nlp.transformers 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nlp.transformers.bert 15 | flambe.nlp.transformers.openai 16 | flambe.nlp.transformers.optim 17 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.cnn.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.cnn module 2 | ==================== 3 | 4 | .. automodule:: flambe.nn.cnn 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.distance.cosine.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.distance.cosine module 2 | ================================ 3 | 4 | .. automodule:: flambe.nn.distance.cosine 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.distance.distance.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.distance.distance module 2 | ================================== 3 | 4 | .. automodule:: flambe.nn.distance.distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.distance.euclidean.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.distance.euclidean module 2 | =================================== 3 | 4 | .. automodule:: flambe.nn.distance.euclidean 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.distance.hyperbolic.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.distance.hyperbolic module 2 | ==================================== 3 | 4 | .. automodule:: flambe.nn.distance.hyperbolic 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.distance.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.distance package 2 | ========================== 3 | 4 | .. automodule:: flambe.nn.distance 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nn.distance.cosine 15 | flambe.nn.distance.distance 16 | flambe.nn.distance.euclidean 17 | flambe.nn.distance.hyperbolic 18 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.embedder.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.embedder module 2 | ========================= 3 | 4 | .. automodule:: flambe.nn.embedder 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.mlp.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.mlp module 2 | ==================== 3 | 4 | .. automodule:: flambe.nn.mlp 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.module.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.module module 2 | ======================= 3 | 4 | .. automodule:: flambe.nn.module 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.mos.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.mos module 2 | ==================== 3 | 4 | .. automodule:: flambe.nn.mos 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.rnn.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.rnn module 2 | ==================== 3 | 4 | .. automodule:: flambe.nn.rnn 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.rst: -------------------------------------------------------------------------------- 1 | flambe.nn package 2 | ================= 3 | 4 | .. automodule:: flambe.nn 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.nn.distance 15 | 16 | Submodules 17 | ---------- 18 | 19 | .. toctree:: 20 | 21 | flambe.nn.cnn 22 | flambe.nn.embedder 23 | flambe.nn.mlp 24 | flambe.nn.module 25 | flambe.nn.mos 26 | flambe.nn.rnn 27 | flambe.nn.sequential 28 | flambe.nn.softmax 29 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.sequential.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.sequential module 2 | =========================== 3 | 4 | .. automodule:: flambe.nn.sequential 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.nn.softmax.rst: -------------------------------------------------------------------------------- 1 | flambe.nn.softmax module 2 | ======================== 3 | 4 | .. automodule:: flambe.nn.softmax 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.rst: -------------------------------------------------------------------------------- 1 | flambe package 2 | ============== 3 | 4 | .. automodule:: flambe 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Subpackages 10 | ----------- 11 | 12 | .. toctree:: 13 | 14 | flambe.cluster 15 | flambe.compile 16 | flambe.dataset 17 | flambe.experiment 18 | flambe.export 19 | flambe.field 20 | flambe.learn 21 | flambe.logging 22 | flambe.metric 23 | flambe.model 24 | flambe.nlp 25 | flambe.nn 26 | flambe.runnable 27 | flambe.runner 28 | flambe.sampler 29 | flambe.tokenizer 30 | flambe.vision 31 | 32 | Submodules 33 | ---------- 34 | 35 | .. toctree:: 36 | 37 | flambe.const 38 | flambe.logo 39 | flambe.version 40 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.cluster_runnable.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable.cluster\_runnable module 2 | ======================================== 3 | 4 | .. automodule:: flambe.runnable.cluster_runnable 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.context.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable.context module 2 | ============================== 3 | 4 | .. automodule:: flambe.runnable.context 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.environment.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable.environment module 2 | ================================== 3 | 4 | .. automodule:: flambe.runnable.environment 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.error.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable.error module 2 | ============================ 3 | 4 | .. automodule:: flambe.runnable.error 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable package 2 | ======================= 3 | 4 | .. automodule:: flambe.runnable 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.runnable.cluster_runnable 15 | flambe.runnable.context 16 | flambe.runnable.environment 17 | flambe.runnable.error 18 | flambe.runnable.runnable 19 | flambe.runnable.utils 20 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.runnable.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable.runnable module 2 | =============================== 3 | 4 | .. automodule:: flambe.runnable.runnable 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runnable.utils.rst: -------------------------------------------------------------------------------- 1 | flambe.runnable.utils module 2 | ============================ 3 | 4 | .. automodule:: flambe.runnable.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runner.garbage_collector.rst: -------------------------------------------------------------------------------- 1 | flambe.runner.garbage\_collector module 2 | ======================================= 3 | 4 | .. automodule:: flambe.runner.garbage_collector 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runner.report_site_run.rst: -------------------------------------------------------------------------------- 1 | flambe.runner.report\_site\_run module 2 | ====================================== 3 | 4 | .. automodule:: flambe.runner.report_site_run 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runner.rst: -------------------------------------------------------------------------------- 1 | flambe.runner package 2 | ===================== 3 | 4 | .. automodule:: flambe.runner 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.runner.garbage_collector 15 | flambe.runner.report_site_run 16 | flambe.runner.run 17 | flambe.runner.utils 18 | -------------------------------------------------------------------------------- /docs/source/flambe.runner.run.rst: -------------------------------------------------------------------------------- 1 | flambe.runner.run module 2 | ======================== 3 | 4 | .. automodule:: flambe.runner.run 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.runner.utils.rst: -------------------------------------------------------------------------------- 1 | flambe.runner.utils module 2 | ========================== 3 | 4 | .. automodule:: flambe.runner.utils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.sampler.base.rst: -------------------------------------------------------------------------------- 1 | flambe.sampler.base module 2 | ========================== 3 | 4 | .. automodule:: flambe.sampler.base 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.sampler.episodic.rst: -------------------------------------------------------------------------------- 1 | flambe.sampler.episodic module 2 | ============================== 3 | 4 | .. automodule:: flambe.sampler.episodic 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.sampler.rst: -------------------------------------------------------------------------------- 1 | flambe.sampler package 2 | ====================== 3 | 4 | .. automodule:: flambe.sampler 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.sampler.base 15 | flambe.sampler.episodic 16 | flambe.sampler.sampler 17 | -------------------------------------------------------------------------------- /docs/source/flambe.sampler.sampler.rst: -------------------------------------------------------------------------------- 1 | flambe.sampler.sampler module 2 | ============================= 3 | 4 | .. automodule:: flambe.sampler.sampler 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.tokenizer.char.rst: -------------------------------------------------------------------------------- 1 | flambe.tokenizer.char module 2 | ============================ 3 | 4 | .. automodule:: flambe.tokenizer.char 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.tokenizer.label.rst: -------------------------------------------------------------------------------- 1 | flambe.tokenizer.label module 2 | ============================= 3 | 4 | .. automodule:: flambe.tokenizer.label 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.tokenizer.rst: -------------------------------------------------------------------------------- 1 | flambe.tokenizer package 2 | ======================== 3 | 4 | .. automodule:: flambe.tokenizer 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Submodules 10 | ---------- 11 | 12 | .. toctree:: 13 | 14 | flambe.tokenizer.char 15 | flambe.tokenizer.label 16 | flambe.tokenizer.tokenizer 17 | flambe.tokenizer.word 18 | -------------------------------------------------------------------------------- /docs/source/flambe.tokenizer.tokenizer.rst: -------------------------------------------------------------------------------- 1 | flambe.tokenizer.tokenizer module 2 | ================================= 3 | 4 | .. automodule:: flambe.tokenizer.tokenizer 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.tokenizer.word.rst: -------------------------------------------------------------------------------- 1 | flambe.tokenizer.word module 2 | ============================ 3 | 4 | .. automodule:: flambe.tokenizer.word 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.version.rst: -------------------------------------------------------------------------------- 1 | flambe.version module 2 | ===================== 3 | 4 | .. automodule:: flambe.version 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/source/flambe.vision.rst: -------------------------------------------------------------------------------- 1 | flambe.vision package 2 | ===================== 3 | 4 | .. automodule:: flambe.vision 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/starting/install.rst: -------------------------------------------------------------------------------- 1 | .. _starting-install_label: 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | Via ``pip`` 8 | ------------ 9 | 10 | You can install the latest **stable** version of flambé as follows: 11 | 12 | .. code:: bash 13 | 14 | pip install flambe 15 | 16 | 17 | From source 18 | ----------- 19 | 20 | For the lastest version you can install from source: 21 | 22 | .. code:: bash 23 | 24 | git clone git@github.com:asappresearch/flambe.git 25 | cd flambe 26 | pip install . 27 | 28 | 29 | .. hint:: 30 | We recommend installing flambé in an isolated `virtual environment `_ 31 | -------------------------------------------------------------------------------- /docs/starting/motivation.rst: -------------------------------------------------------------------------------- 1 | .. _starting-motivation: 2 | 3 | ========== 4 | Motivation 5 | ========== 6 | 7 | Flambé's primary objective is to **speed up all of the research lifecycle** including model prototyping, 8 | hyperparameter optimization and execution on a cluster. 9 | 10 | 11 | Why Flambé? 12 | ----------- 13 | 14 | 1. Running machine learning experiments takes a lot of continuous and tedious effort. 15 | 2. Standardizing data preprocessing and weights sharing across the community or within a team is difficult. 16 | 17 | We've found that while there are several new libraries offering a selection of 18 | reliable model implementations, there isn't a great library that couples these 19 | modules with an experimentation framework. Since experimentation (especially 20 | hyper-parameter search, deployment on remote machines, and data loading and 21 | preprocessing) is one of the most important and time-consuming aspects of ML 22 | research we decided to build Flambé. 23 | 24 | An important component of Flambé is Ray, an open source distributed ML library. 25 | Ray has some of the necessary infrastructure to build experiments at scale; 26 | coupled with Flambé you could be tuning many variants of your already existing 27 | models on a large cluster in minutes! Flambé's crucial contribution is to 28 | facilitate rapid iteration and experimentation where tools like Ray and AllenNLP 29 | alone require large development costs to integrate. 30 | 31 | The most important contribution of Flambé is to improve the user experience 32 | involved in doing research, including the various phases of experimentation 33 | we outlined at the very beginning of this page. To do this well, we try to 34 | adhere to the following values: 35 | 36 | Core values 37 | ----------- 38 | 39 | - **Practicality**: customize functionality in code, and iterate over settings and hyperparameters in config files. 40 | - **Modularity & Composability**: rapidly repurpose existing code for hyper-parameter optimization and new use-cases 41 | - **Reproducability**: reproducible experiments, by anyone, at any time. 42 | -------------------------------------------------------------------------------- /docs/tutorials/ssh_cluster.rst: -------------------------------------------------------------------------------- 1 | ========================================== 2 | Creating a cluster with existing instances 3 | ========================================== 4 | 5 | Flambé provides a :class:`~flambe.cluster.Cluster` implementation called :class:`~flambe.cluster.SSHCluster` 6 | that is able to build a cluster from existing instances. 7 | 8 | .. important:: 9 | As described in :ref:`understanding-clusters_label`, all clusters have an orchestrator host and a set 10 | of factories hosts. 11 | 12 | Instances in a cloud service provider 13 | ------------------------------------- 14 | 15 | Let's assume that the user contains the following cluster: 16 | 17 | 18 | .. image:: ../image/ssh-cluster.png 19 | :width: 100% 20 | :name: report-site 21 | :align: center 22 | 23 | .. tip:: 24 | It's not required that the factories contain GPU. 25 | 26 | .. important:: 27 | It is required that: 28 | 29 | * All instances are in same private LAN. 30 | * All host have the same username. 31 | * All host are accessible with the same private key. 32 | 33 | Implementing an :class:`~flambe.cluster.SSHCluster` is as simple as: 34 | 35 | 36 | .. code-block:: yaml 37 | :caption: ssh-cluster.yaml 38 | 39 | !SSHCluster 40 | 41 | name: my-cluster 42 | 43 | orchestrator_ip: [53.10.21.32, 10.150.0.1] 44 | factories_ips: 45 | - [53.10.21.54, 10.150.0.2] 46 | - [53.10.21.73, 10.150.0.3] 47 | 48 | key: /path/to/my/key 49 | 50 | username: ubuntu 51 | 52 | 53 | Note that all hosts have information about both the public IP and the private IP. 54 | 55 | Instances in the private LAN 56 | ---------------------------- 57 | 58 | If the instances do not have a public IP because they are running on-premise, then 59 | :class:`~flambe.cluster.SSHCluster` supports providing private IPs only. 60 | 61 | For example: 62 | 63 | .. code-block:: yaml 64 | :caption: ssh-cluster.yaml 65 | 66 | !SSHCluster 67 | 68 | name: my-cluster 69 | 70 | orchestrator_ip: 10.150.0.10 71 | factories_ips: 72 | - 10.150.0.20 73 | - 10.150.0.30 74 | 75 | key: /path/to/my/key 76 | 77 | username: ubuntu 78 | 79 | More information 80 | ---------------- 81 | 82 | Refer to the :ref:`understanding-clusters_label` section or checkout the documentation of 83 | :class:`~flambe.cluster.SSHCluster`. 84 | -------------------------------------------------------------------------------- /examples/basic_example.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: sst-text-classification 4 | 5 | pipeline: 6 | 7 | # stage 0 - Load the Stanford Sentiment Treebank dataset and run preprocessing 8 | dataset: !SSTDataset # this is a simple Python object, and the arguments to build it 9 | transform: # these arguments are passed to the init method 10 | text: !TextField 11 | label: !LabelField 12 | 13 | # Stage 1 - Define a model 14 | model: !TextClassifier 15 | embedder: !Embedder 16 | embedding: !torch.Embedding # automatically use pytorch classes 17 | num_embeddings: !@ dataset.text.vocab_size # link to other components, and attributes 18 | embedding_dim: 300 19 | embedding_dropout: 0.3 20 | encoder: !PooledRNNEncoder 21 | input_size: 300 22 | n_layers: !g [2, 3, 4] # grid search over any parameters 23 | hidden_size: 128 24 | rnn_type: sru 25 | dropout: 0.3 26 | output_layer: !SoftmaxLayer 27 | input_size: !@ model[embedder][encoder].rnn.hidden_size # also use inner-links 28 | output_size: !@ dataset.label.vocab_size 29 | 30 | # Stage 2 - Train the model on the dataset 31 | train: !Trainer 32 | dataset: !@ dataset 33 | model: !@ model 34 | train_sampler: !BaseSampler 35 | val_sampler: !BaseSampler 36 | loss_fn: !torch.NLLLoss 37 | metric_fn: !Accuracy 38 | optimizer: !torch.Adam 39 | params: !@ train[model].trainable_params 40 | max_steps: 2 41 | iter_per_step: 2 42 | 43 | # Stage 3 - Eval on the test set 44 | eval: !Evaluator 45 | dataset: !@ dataset 46 | model: !@ train[model] 47 | metric_fn: !Accuracy 48 | eval_sampler: !BaseSampler 49 | 50 | # Define how to schedule variants 51 | schedulers: 52 | train: !ray.HyperBandScheduler 53 | -------------------------------------------------------------------------------- /examples/cluster.yaml: -------------------------------------------------------------------------------- 1 | !AWSCluster 2 | 3 | name: my-cluster # Make sure to name your cluster 4 | 5 | factories_num: 1 # Number of factories to spin up, there is always just 1 orchestrator 6 | factories_type: g3.4xlarge 7 | orchestrator_type: t3.large 8 | orchestrator_timeout: -1 # -1 means the orchestrator will have to be killed manually (recommended) 9 | factories_timeout: -1 # Factories timeout after being unused for these many hours 10 | 11 | creator: user@company.com 12 | key_name: my_key 13 | 14 | key: '/path/to/ssh/key' 15 | 16 | tags: 17 | project: my-project 18 | company: xxxxxxxxxx 19 | 20 | subnet_id: subnet-XXXXXXX 21 | volume_size: 100 22 | 23 | # Don't change if not sure 24 | security_group: sg-XXXXXXXXXXXX 25 | -------------------------------------------------------------------------------- /examples/full_example.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: full-example 4 | 5 | pipeline: # Define the list of components to execute 6 | 1_train: !Trainer # This is the top level component to execute the run method on 7 | dataset: !SSTDataset # dataset is an argument to Trainer.__init__ 8 | transform: # Similarly, transform is an argument to the SSTDataset.__init__ 9 | text: !TextField # Fields produce columns, this particular Textfield produces a single one 10 | embeddings: glove-twitter-200 # note the native support for gensim embedddings 11 | embeddings_format: gensim 12 | label: !LabelField 13 | train_sampler: !BaseSampler 14 | batch_size: 32 15 | val_sampler: !BaseSampler 16 | batch_size: 512 17 | model: !TextClassifier 18 | embedder: !Embedder 19 | embedding: !Embeddings.from_pretrained 20 | embeddings: !@ 1_train[dataset].text.embedding_matrix # Link over attributes of previously defined objects 21 | freeze: True 22 | embedding_dropout: 0.3 23 | encoder: !PooledRNNEncoder 24 | input_size: !@ 1_train[model][embedder][embedding].embedding_dim 25 | rnn_type: lstm 26 | n_layers: !g [2, 3, 4] # Grid search over the number of layers 27 | hidden_size: 256 28 | pooling: last 29 | dropout: 0.3 30 | output_layer: !SoftmaxLayer 31 | input_size: !@ 1_train[model][embedder][encoder].rnn.hidden_size 32 | output_size: !@ 1_train[dataset].label.vocab_size 33 | loss_fn: !torch.NLLLoss 34 | metric_fn: !Accuracy 35 | optimizer: !torch.Adam 36 | params: !@ 1_train[model].trainable_params 37 | max_steps: 1 38 | iter_per_step: 1 39 | 2_evaluate: !Evaluator # We evaluate in a second stage, note the reduce argument at the bottom of the file 40 | dataset: !@ 1_train[dataset] 41 | model: !@ 1_train[model] 42 | metric_fn: !Accuracy 43 | eval_sampler: !BaseSampler 44 | batch_size: 512 45 | 3_export: !Exporter # This component doesn't do anything, but makes it easy to identify the objects needed for later use 46 | model: !@ 2_evaluate[model] # Note that it is completely optional, as these objects can be fecthed from the saved evaluator step 47 | text: !@ 2_evaluate[dataset].text 48 | 49 | schedulers: # Define how to schedule variants 50 | 1_train: !ray.HyperBandScheduler 51 | reduce: # Only use the best variant in subsequent blocks, meaning only the best Trainer will be linked to the Evaluator stage 52 | 1_train: 1 53 | -------------------------------------------------------------------------------- /flambe/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: E402 2 | 3 | import logging as main_logging 4 | 5 | # Work based on https://github.com/tensorflow/tensorflow/issues/26691 6 | # This check is done to avoid tensorflow import (done by pytorch 1.1) 7 | # break logging. 8 | try: 9 | # Tensorflow uses Google's abseil-py library, which uses a Google-specific 10 | # wrapper for logging. That wrapper will write a warning to sys.stderr if 11 | # the Google command-line flags library has not been initialized. 12 | # 13 | # https://github.com/abseil/abseil-py/blob/pypi-v0.7.1/absl/logging/__init__.py#L819-L825 14 | # 15 | # This is not right behavior for Python code that is invoked outside of a 16 | # Google-authored main program. Use knowledge of abseil-py to disable that 17 | # warning; ignore and continue if something goes wrong. 18 | import absl.logging 19 | 20 | # https://github.com/abseil/abseil-py/issues/99 21 | main_logging.root.removeHandler(absl.logging._absl_handler) 22 | # https://github.com/abseil/abseil-py/issues/102 23 | absl.logging._warn_preinit_stderr = False 24 | except Exception: 25 | pass 26 | 27 | main_logging.disable(main_logging.WARNING) 28 | 29 | from flambe.compile import Component, Schema, save, load 30 | from flambe.compile import save_state_to_file, load_state_from_file 31 | from flambe.logging import log 32 | from flambe import compile, dataset, experiment, field, learn, nlp, vision, export, model 33 | from flambe import cluster, metric, nn, runner, sampler, runnable, tokenizer, optim 34 | from flambe.version import VERSION as __version__ 35 | from flambe.logo import ASCII_LOGO 36 | 37 | 38 | __all__ = ['Component', 'Schema', 'log', 'tokenizer', 39 | 'compile', 'dataset', 'experiment', 'field', 'learn', 'export', 40 | 'cluster', 'metric', 'nn', 'model', 'optim', 'runner', 'runnable', 'sampler', 41 | 'nlp', 'vision', '__version__', 'ASCII_LOGO', 'save', 'load', 42 | 'load_state_from_file', 'save_state_to_file'] 43 | 44 | 45 | main_logging.disable(main_logging.NOTSET) 46 | -------------------------------------------------------------------------------- /flambe/cluster/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.cluster.cluster import Cluster 2 | from flambe.cluster.aws import AWSCluster 3 | from flambe.cluster.ssh import SSHCluster 4 | 5 | 6 | __all__ = ['Cluster', 'AWSCluster', 'SSHCluster'] 7 | -------------------------------------------------------------------------------- /flambe/cluster/const.py: -------------------------------------------------------------------------------- 1 | # Logging socket config 2 | SOCKET_TIMEOUT = 50 3 | 4 | # SSH connection settings 5 | RETRY_DELAY = 1 6 | RETRIES = 60 7 | 8 | TENSORBOARD_IMAGE = "tensorflow/tensorflow:1.15.0" 9 | 10 | RAY_REDIS_PORT = 12345 11 | 12 | # This names are specific because of tune stuff! 13 | # NOTE: DO NOT CHANGE THIS FILE NAMES 14 | # TODO: do this in a cleaner way 15 | 16 | PRIVATE_KEY = "ray_bootstrap_key.pem" 17 | PUBLIC_KEY = "ray_bootstrap_key.pub" 18 | 19 | REPORT_SITE_PORT = 49558 20 | TENSORBOARD_PORT = 49556 21 | 22 | # This is the account number for Flambe AWS account. 23 | # There is not risk of it being public. 24 | AWS_FLAMBE_ACCOUNT = "808129580301" 25 | -------------------------------------------------------------------------------- /flambe/cluster/errors.py: -------------------------------------------------------------------------------- 1 | 2 | class ClusterError(Exception): 3 | """Error raised in case of any unexpected error in the Ray cluster. 4 | 5 | """ 6 | pass 7 | 8 | 9 | class ClusterConfigurationError(Exception): 10 | """Error raised when the configuration of the Cluster is not valid. 11 | 12 | """ 13 | pass 14 | -------------------------------------------------------------------------------- /flambe/cluster/instance/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.cluster.instance.instance import Instance 2 | from flambe.cluster.instance.instance import CPUFactoryInstance, GPUFactoryInstance 3 | from flambe.cluster.instance.instance import OrchestratorInstance 4 | 5 | __all__ = ['CPUFactoryInstance', 'GPUFactoryInstance', 'OrchestratorInstance', 'Instance'] 6 | -------------------------------------------------------------------------------- /flambe/cluster/instance/errors.py: -------------------------------------------------------------------------------- 1 | 2 | class RemoteCommandError(Exception): 3 | """Error raised when any remote command/script fail in an Instance. 4 | 5 | """ 6 | pass 7 | 8 | 9 | class SSHConnectingError(Exception): 10 | """Error raised when opening a SSH connection fails. 11 | 12 | """ 13 | pass 14 | 15 | 16 | class MissingAuthError(Exception): 17 | """Error raised when there is missing authentication information. 18 | 19 | """ 20 | pass 21 | 22 | 23 | class RemoteFileTransferError(Exception): 24 | """Error raised when sending a local file to an Instance fails. 25 | 26 | """ 27 | pass 28 | -------------------------------------------------------------------------------- /flambe/cluster/instance/scripts/install_cuda_ubuntu1804.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get install linux-headers-$(uname -r) 4 | 5 | wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-repo-ubuntu1804_10.0.130-1_amd64.deb 6 | sudo dpkg -i cuda-repo-ubuntu1804_10.0.130-1_amd64.deb 7 | sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub 8 | sudo apt-get update 9 | sudo apt-get -y install cuda 10 | 11 | export PATH=/usr/local/cuda-10.0/bin${PATH:+:${PATH}} 12 | export LD_LIBRARY_PATH=/usr/local/cuda-10.0/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} 13 | -------------------------------------------------------------------------------- /flambe/cluster/instance/scripts/install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get update 4 | sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common 5 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 6 | sudo apt-key fingerprint 0EBFCD88 7 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 8 | sudo apt-get update 9 | apt-cache policy docker-ce 10 | sudo apt-get install -y docker-ce 11 | sudo systemctl status docker 12 | 13 | sudo usermod -aG docker $USER 14 | 15 | sudo docker run hello-world 16 | -------------------------------------------------------------------------------- /flambe/cluster/instance/scripts/install_nvidia_docker.sh: -------------------------------------------------------------------------------- 1 | # If you have nvidia-docker 1.0 installed: we need to remove it and all existing GPU containers 2 | sudo docker volume ls -q -f driver=nvidia-docker | xargs -r -I{} -n1 sudo docker ps -q -a -f volume={} | xargs -r sudo docker rm -f 3 | sudo apt-get purge -y nvidia-docker 4 | 5 | # Add the package repositories 6 | curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - 7 | distribution=$(. /etc/os-release;echo $ID$VERSION_ID) 8 | curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list 9 | sudo apt-get update 10 | 11 | # Install nvidia-docker2 and reload the Docker daemon configuration 12 | sudo apt-get install -y nvidia-docker2 13 | sudo pkill -SIGHUP dockerd 14 | 15 | # Test nvidia-smi with the latest official CUDA image 16 | sudo docker run --runtime=nvidia --rm nvidia/cuda nvidia-smi 17 | -------------------------------------------------------------------------------- /flambe/cluster/utils.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | 4 | RemoteCommand = namedtuple('RemoteCommand', ['success', 'msg']) 5 | -------------------------------------------------------------------------------- /flambe/compile/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.compile.registrable import RegistrationError, Registrable, alias, yaml, \ 2 | register, registrable_factory, registration_context, MappedRegistrable 3 | from flambe.compile.component import Schema, Component, Link, dynamic_component 4 | from flambe.compile.utils import make_component, all_subclasses 5 | from flambe.compile.serialization import save, load, save_state_to_file, load_state_from_file, \ 6 | State 7 | 8 | 9 | __all__ = ['RegistrationError', 'Registrable', 'alias', 'Schema', 10 | 'Link', 'Component', 'yaml', 'register', 'dynamic_component', 11 | 'make_component', 'all_subclasses', 'registrable_factory', 12 | 'registration_context', 'save', 'load', 'State', 13 | 'save_state_to_file', 'load_state_from_file', 'MappedRegistrable'] 14 | -------------------------------------------------------------------------------- /flambe/compile/const.py: -------------------------------------------------------------------------------- 1 | STATE_DICT_DELIMETER = '.' 2 | FLAMBE_SOURCE_KEY = '_flambe_source' 3 | FLAMBE_CLASS_KEY = '_flambe_class' 4 | FLAMBE_CONFIG_KEY = '_flambe_config' 5 | FLAMBE_DIRECTORIES_KEY = '_flambe_directories' 6 | FLAMBE_STASH_KEY = '_flambe_stash' 7 | KEEP_VARS_KEY = 'keep_vars' 8 | VERSION_KEY = '_flambe_version' 9 | HIGHEST_SERIALIZATION_PROTOCOL_VERSION = 1 10 | DEFAULT_SERIALIZATION_PROTOCOL_VERSION = 1 11 | 12 | DEFAULT_PROTOCOL = 2 # For pickling 13 | STATE_FILE_NAME = 'state.pt' 14 | VERSION_FILE_NAME = 'version.txt' 15 | SOURCE_FILE_NAME = 'source.py' 16 | CONFIG_FILE_NAME = 'config.yaml' 17 | STASH_FILE_NAME = 'stash.pkl' 18 | PROTOCOL_VERSION_FILE_NAME = 'protocol_version.txt' 19 | REQUIREMENTS_FILE_NAME = 'requirements.txt' 20 | -------------------------------------------------------------------------------- /flambe/const.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import os 3 | 4 | FLAMBE_GLOBAL_FOLDER = os.path.join(str(Path.home()), ".flambe") 5 | -------------------------------------------------------------------------------- /flambe/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.dataset.dataset import Dataset 2 | from flambe.dataset.tabular import TabularDataset 3 | 4 | 5 | __all__ = ['Dataset', 'TabularDataset'] 6 | -------------------------------------------------------------------------------- /flambe/dataset/dataset.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import Sequence 3 | 4 | from flambe import Component 5 | 6 | 7 | class Dataset(Component): 8 | """Base Dataset interface. 9 | 10 | Dataset objects offer the main interface to loading data into the 11 | experiment pipepine. Dataset objects have three attributes: 12 | `train`, `dev`, and `test`, each pointing to a list of examples. 13 | 14 | Note that Datasets should also be "immutable", and as such, 15 | `__setitem__` and `__delitem__` will raise an error. Although this 16 | does not mean that the object will not be mutated in other ways, 17 | it should help avoid issues now and then. 18 | 19 | """ 20 | 21 | @property 22 | @abstractmethod 23 | def train(self) -> Sequence[Sequence]: 24 | """Returns the training data as a sequence of examples.""" 25 | pass 26 | 27 | @property 28 | @abstractmethod 29 | def val(self) -> Sequence[Sequence]: 30 | """Returns the validation data as a sequence of examples.""" 31 | pass 32 | 33 | @property 34 | @abstractmethod 35 | def test(self) -> Sequence[Sequence]: 36 | """Returns the test data as a sequence of examples.""" 37 | pass 38 | 39 | def __setitem__(self): 40 | """Raise an error.""" 41 | raise ValueError("Dataset objects are immutable") 42 | 43 | def __delitem__(self): 44 | """Raise an error.""" 45 | raise ValueError("Dataset objects are immutable") 46 | -------------------------------------------------------------------------------- /flambe/experiment/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.experiment.experiment import Experiment 2 | from flambe.experiment.progress import ProgressState 3 | from flambe.experiment.tune_adapter import TuneAdapter 4 | from flambe.experiment.options import GridSearchOptions, SampledUniformSearchOptions 5 | 6 | 7 | __all__ = ['Experiment', 'TuneAdapter', 'GridSearchOptions', 8 | 'SampledUniformSearchOptions', 'ProgressState'] 9 | -------------------------------------------------------------------------------- /flambe/experiment/webapp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/flambe/experiment/webapp/__init__.py -------------------------------------------------------------------------------- /flambe/export/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.export.builder import Builder 2 | from flambe.export.exporter import Exporter 3 | 4 | 5 | __all__ = ['Builder', 'Exporter'] 6 | -------------------------------------------------------------------------------- /flambe/export/exporter.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any 2 | 3 | from flambe import Component 4 | 5 | 6 | class Exporter(Component): 7 | """Implement an Exporter computable. 8 | 9 | This object can be viewed as a dummy computable. It is useful 10 | to group objects into a block when those get save, to more 11 | easily refer to them later on, for instance in an object builder. 12 | 13 | """ 14 | 15 | def __init__(self, **kwargs: Dict[str, Any]) -> None: 16 | """Initialize the Exporter. 17 | 18 | Parameters 19 | ---------- 20 | kwargs: Dict[str, Any] 21 | Mapping from name to any object to export 22 | 23 | """ 24 | self.objects = kwargs 25 | 26 | for name, obj in kwargs.items(): 27 | setattr(self, name, obj) 28 | if not isinstance(obj, Component): 29 | self.register_attrs(name) 30 | 31 | def run(self) -> bool: 32 | """Run the exporter. 33 | 34 | Returns 35 | ------- 36 | bool 37 | False, as this is a single step Component. 38 | 39 | """ 40 | _continue = False 41 | return _continue 42 | -------------------------------------------------------------------------------- /flambe/field/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.field.field import Field 2 | from flambe.field.text import TextField 3 | from flambe.field.bow import BoWField 4 | from flambe.field.label import LabelField 5 | 6 | 7 | __all__ = ['Field', 'TextField', 'LabelField', 'BoWField'] 8 | -------------------------------------------------------------------------------- /flambe/field/field.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import Any, Union, Tuple, List, Dict 3 | 4 | import torch 5 | import numpy as np 6 | 7 | from flambe import Component 8 | 9 | 10 | class Field(Component): 11 | """Base Field interface. 12 | 13 | A field processes raw examples and produces Tensors. 14 | 15 | """ 16 | def setup(self, *data: np.ndarray) -> None: 17 | """Setup the field. 18 | 19 | This method will be called with all the data in the dataset and 20 | it can be used to compute aggregated information (for example, 21 | vocabulary in Fields that process text). 22 | 23 | ATTENTION: this method could be called multiple times in case 24 | the same field is used in different datasets. Take this into 25 | account and build a stateful implementation. 26 | 27 | Parameters 28 | ---------- 29 | *data: np.ndarray 30 | Multiple 2d arrays (ex: train_data, dev_data, test_data). 31 | First dimension is for the examples, second dimension for 32 | the columns specified for this specific field. 33 | 34 | """ 35 | pass 36 | 37 | @abstractmethod 38 | def process(self, *example: Any) \ 39 | -> Union[torch.Tensor, 40 | Tuple[torch.Tensor, ...], 41 | List[torch.Tensor], 42 | Dict[str, torch.Tensor]]: 43 | """Process an example into a Tensor or tuple of Tensor. 44 | 45 | This method allows N to M mappings from example columns (N) 46 | to tensors (M). 47 | 48 | Parameters 49 | ---------- 50 | *example: Any 51 | Column values of the example 52 | 53 | Returns 54 | ------- 55 | Union[torch.Tensor, Tuple[torch.Tensor, ...]] 56 | The processed example, as a tensor or tuple of tensors 57 | """ 58 | pass 59 | -------------------------------------------------------------------------------- /flambe/learn/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.learn.train import Trainer 2 | from flambe.learn.eval import Evaluator 3 | from flambe.learn.script import Script 4 | from flambe.learn.distillation import DistillationTrainer 5 | 6 | 7 | __all__ = ['Trainer', 'Evaluator', 'Script', 'DistillationTrainer'] 8 | -------------------------------------------------------------------------------- /flambe/learn/script.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional, List 2 | import sys 3 | import runpy 4 | from copy import deepcopy 5 | 6 | from flambe.logging import get_trial_dir 7 | from flambe.compile import Component 8 | 9 | 10 | class Script(Component): 11 | """Implement a Script computable. 12 | 13 | The obejct can be used to turn any script into a Flambé computable. 14 | This is useful when you want to rapidly integrate code. Note 15 | however that this computable does not enable checkpointing or 16 | linking to internal components as it does not have any attributes. 17 | 18 | To use this object, your script needs to be in a pip installable, 19 | containing all dependencies. The script is run with the following 20 | command: 21 | 22 | .. code-block:: bash 23 | 24 | python -m script.py --arg1 value1 --arg2 value2 25 | 26 | """ 27 | 28 | def __init__(self, 29 | script: str, 30 | args: List[Any], 31 | kwargs: Optional[Dict[str, Any]] = None, 32 | output_dir_arg: Optional[str] = None) -> None: 33 | """Initialize a Script. 34 | 35 | Parameters 36 | ---------- 37 | path: str 38 | The script module 39 | args: List[Any] 40 | Argument List 41 | kwargs: Optional[Dict[str, Any]] 42 | Keyword argument dictionary 43 | output_dir_arg: str, optional 44 | The name of the argument corresponding to the output 45 | directory, should there be one. 46 | 47 | """ 48 | self.script = script 49 | self.args = args 50 | if kwargs is None: 51 | self.kwargs: Dict[str, Any] = {} 52 | else: 53 | self.kwargs = kwargs 54 | 55 | if output_dir_arg is not None: 56 | self.kwargs[output_dir_arg] = get_trial_dir() 57 | 58 | def run(self) -> bool: 59 | """Run the evaluation. 60 | 61 | Returns 62 | ------- 63 | Dict[str, float] 64 | Report dictionary to use for logging 65 | 66 | """ 67 | parser_kwargs = {f'--{k}': v for k, v in self.kwargs.items()} 68 | # Flatten the arguments into a single list to pass to sys.argv 69 | parser_args_flat = [str(item) for item in self.args] 70 | parser_args_flat += [str(item) for items in parser_kwargs.items() for item in items] 71 | 72 | sys_save = deepcopy(sys.argv) 73 | sys.argv = [''] + parser_args_flat # add dummy sys[0] 74 | runpy.run_module(self.script, run_name='__main__', alter_sys=True) 75 | sys.argv = sys_save 76 | 77 | continue_ = False # Single step, so don't continue 78 | return continue_ 79 | -------------------------------------------------------------------------------- /flambe/learn/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | 5 | 6 | def select_device(device: Optional[str]) -> str: 7 | """ 8 | Chooses the torch device to run in. 9 | 10 | Parameters 11 | ------------ 12 | device: Union[torch.device, str] 13 | A device or a string representing a device, such as 'cpu' 14 | 15 | Returns 16 | --------- 17 | str 18 | the passed-as-parameter device if any, otherwise 19 | cuda if available. Last option is cpu. 20 | """ 21 | 22 | if device is not None: 23 | return device 24 | else: 25 | return "cuda" if torch.cuda.is_available() else "cpu" 26 | -------------------------------------------------------------------------------- /flambe/logging/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.logging.logging import TrialLogging, setup_global_logging 2 | from flambe.logging.datatypes import ScalarT, ScalarsT, HistogramT, TextT, ImageT, PRCurveT 3 | from flambe.logging.datatypes import EmbeddingT, GraphT 4 | from flambe.logging.utils import log, coloredlogs 5 | from flambe.logging.utils import log_scalar, log_scalars, log_text, log_image, log_histogram 6 | from flambe.logging.utils import log_pr_curve, get_trial_dir 7 | 8 | 9 | __all__ = ['ScalarT', 'ScalarsT', 'HistogramT', 'TextT', 'ImageT', 'EmbeddingT', 'GraphT', 10 | 'PRCurveT', 'TrialLogging', 'setup_global_logging', 'log', 'coloredlogs', 'log_scalar', 11 | 'log_scalars', 'log_text', 'log_image', 'log_histogram', 'log_pr_curve', 'get_trial_dir'] 12 | -------------------------------------------------------------------------------- /flambe/logging/handler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/flambe/logging/handler/__init__.py -------------------------------------------------------------------------------- /flambe/logo.py: -------------------------------------------------------------------------------- 1 | ASCII_LOGO = """ 2 | 3 | /$$$$$$$$ /$$ /$$ /$ 4 | | $$_____/| $$ | $$ /$ 5 | | $$ | $$ /$$$$$$ /$$$$$$/$$$$ | $$$$$$$ /$$$$$$ 6 | | $$$$$ | $$ |____ $$| $$_ $$_ $$| $$__ $$ /$$__ $$ 7 | | $$__/ | $$ /$$$$$$$| $$ \ $$ \ $$| $$ \ $$| $$$$$$$$ 8 | | $$ | $$ /$$__ $$| $$ | $$ | $$| $$ | $$| $$_____/ 9 | | $$ | $$| $$$$$$$| $$ | $$ | $$| $$$$$$$/| $$$$$$$ 10 | |__/ |__/ \_______/|__/ |__/ |__/|_______/ \_______/ 11 | 12 | 13 | """ # noqa: W605""" 14 | 15 | ASCII_LOGO_DEV = """ 16 | 17 | /$$$$$$$$ /$$ /$$ /$ 18 | | $$_____/| $$ | $$ /$ 19 | | $$ | $$ /$$$$$$ /$$$$$$/$$$$ | $$$$$$$ /$$$$$$ 20 | | $$$$$ | $$ |____ $$| $$_ $$_ $$| $$__ $$ /$$__ $$ _ 21 | | $$__/ | $$ /$$$$$$$| $$ \ $$ \ $$| $$ \ $$| $$$$$$$$ | | 22 | | $$ | $$ /$$__ $$| $$ | $$ | $$| $$ | $$| $$_____/ __| | _____ __ 23 | | $$ | $$| $$$$$$$| $$ | $$ | $$| $$$$$$$/| $$$$$$$ / _` |/ _ \ \ / / 24 | |__/ |__/ \_______/|__/ |__/ |__/|_______/ \_______/| (_| | __/\ V / 25 | \__,_|\___| \_/ 26 | 27 | """ # noqa: W605""" 28 | -------------------------------------------------------------------------------- /flambe/metric/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.metric.metric import Metric 2 | from flambe.metric.loss.cross_entropy import MultiLabelCrossEntropy 3 | from flambe.metric.loss.nll_loss import MultiLabelNLLLoss 4 | from flambe.metric.dev.accuracy import Accuracy 5 | from flambe.metric.dev.perplexity import Perplexity 6 | from flambe.metric.dev.bpc import BPC 7 | from flambe.metric.dev.auc import AUC, MultiClassAUC 8 | from flambe.metric.dev.binary import BinaryPrecision, BinaryRecall, BinaryAccuracy, F1 9 | from flambe.metric.dev.recall import Recall 10 | 11 | 12 | __all__ = ['Metric', 13 | 'Accuracy', 'AUC', 'Perplexity', 'BPC', 14 | 'MultiLabelCrossEntropy', 'MultiLabelNLLLoss', 15 | 'BinaryPrecision', 'BinaryRecall', 'BinaryAccuracy', 'F1', 16 | 'Recall', 'MultiClassAUC'] 17 | -------------------------------------------------------------------------------- /flambe/metric/dev/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/flambe/metric/dev/__init__.py -------------------------------------------------------------------------------- /flambe/metric/dev/accuracy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from flambe.metric.metric import AverageableMetric 4 | 5 | 6 | class Accuracy(AverageableMetric): 7 | 8 | def compute(self, pred: torch.Tensor, target: torch.Tensor) \ 9 | -> torch.Tensor: 10 | """Computes the loss. 11 | 12 | Parameters 13 | ---------- 14 | pred: Tensor 15 | input logits of shape (B x N) 16 | target: LontTensor 17 | target tensor of shape (B) or (B x N) 18 | 19 | Returns 20 | ------- 21 | accuracy: torch.Tensor 22 | single label accuracy, of shape (B) 23 | 24 | """ 25 | # If 2-dimensional, select the highest score in each row 26 | if len(target.size()) == 2: 27 | target = target.argmax(dim=1) 28 | 29 | acc = (pred.argmax(dim=1) == target) 30 | return acc.float().mean() 31 | -------------------------------------------------------------------------------- /flambe/metric/dev/bpc.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from flambe.metric.dev.perplexity import Perplexity 7 | 8 | 9 | class BPC(Perplexity): 10 | """Bits per character. Computed as log_2(perplexity) 11 | 12 | Inherits from Perplexity to share aggregate functionality. 13 | """ 14 | 15 | def compute(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: 16 | """Compute the bits per character given the input and target. 17 | 18 | Parameters 19 | ---------- 20 | pred: torch.Tensor 21 | input logits of shape (B x N) 22 | target: torch.LontTensor 23 | target tensor of shape (B) 24 | 25 | Returns 26 | ------- 27 | torch.float 28 | Output perplexity 29 | 30 | """ 31 | entropy = self.entropy(pred, target).mean() 32 | return torch.log2(torch.exp(entropy)) 33 | 34 | def finalize(self, state: Dict) -> float: 35 | """Finalizes the metric computation 36 | 37 | Parameters 38 | ---------- 39 | state: dict 40 | the metric state 41 | 42 | Returns 43 | ------- 44 | float 45 | The final score. 46 | 47 | """ 48 | if not state or state['sample_count'] == 0: 49 | # call on empty state 50 | return np.NaN 51 | return torch.log2(torch.exp(state['accumulated_score'] / state['sample_count'])).item() 52 | -------------------------------------------------------------------------------- /flambe/metric/dev/perplexity.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | import numpy as np 3 | import torch 4 | 5 | from flambe.metric import Metric 6 | 7 | 8 | class Perplexity(Metric): 9 | """Token level perplexity, computed a exp(cross_entropy).""" 10 | 11 | def __init__(self): 12 | """Perplexity, computed as CrossEntropy""" 13 | self.entropy = torch.nn.CrossEntropyLoss(reduction='none') 14 | 15 | def compute(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: 16 | """Compute the preplexity given the input and target. 17 | 18 | Parameters 19 | ---------- 20 | pred: torch.Tensor 21 | input logits of shape (B x N) 22 | target: torch.LontTensor 23 | target tensor of shape (B) 24 | 25 | Returns 26 | ------- 27 | torch.float 28 | Output perplexity 29 | 30 | """ 31 | entropy = self.entropy(pred, target).mean() 32 | return torch.exp(entropy) 33 | 34 | def aggregate(self, state: dict, *args, **kwargs) -> Dict: 35 | """Aggregates by only storing entropy per sample 36 | 37 | Parameters 38 | ---------- 39 | state: dict 40 | the metric state 41 | args: the pred, target tuple 42 | 43 | Returns 44 | ------- 45 | dict 46 | the state dict 47 | 48 | """ 49 | pred, target = args 50 | if not state: 51 | state['accumulated_score'] = 0. 52 | state['sample_count'] = 0 53 | logits = self.entropy(pred, target).cpu().detach() 54 | state['accumulated_score'] += logits.sum() 55 | state['sample_count'] += logits.size(0) 56 | return state 57 | 58 | def finalize(self, state: Dict) -> float: 59 | """Finalizes the metric computation 60 | 61 | Parameters 62 | ---------- 63 | state: dict 64 | the metric state 65 | 66 | Returns 67 | ------- 68 | float 69 | The final score. 70 | 71 | """ 72 | if not state or state['sample_count'] == 0: 73 | # call on empty state 74 | return np.NaN 75 | return torch.exp(state['accumulated_score'] / state['sample_count']).item() 76 | -------------------------------------------------------------------------------- /flambe/metric/dev/recall.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from flambe.metric.metric import AverageableMetric 4 | 5 | 6 | class Recall(AverageableMetric): 7 | 8 | def __init__(self, top_k: int = 1) -> None: 9 | """Initialize the Recall metric. 10 | 11 | Parameters 12 | --------- 13 | top_k: int 14 | used to compute recall@k. For k = 1, this becomes 15 | accuracy 16 | """ 17 | self.top_k = top_k 18 | 19 | def __str__(self) -> str: 20 | """Return the name of the Metric (for use in logging).""" 21 | return f'{self.__class__.__name__}@{self.top_k}' 22 | 23 | def compute(self, pred: torch.Tensor, target: torch.Tensor) \ 24 | -> torch.Tensor: 25 | """Computes the recall @ k. 26 | 27 | Parameters 28 | ---------- 29 | pred: Tensor 30 | input logits of shape (B x N) 31 | target: LongTensor 32 | target tensor of shape (B) or (B x N) 33 | 34 | Returns 35 | ------- 36 | recall: torch.Tensor 37 | single label recall, of shape (B) 38 | 39 | """ 40 | # If 2-dimensional, select the highest score in each row 41 | if len(target.size()) == 2: 42 | target = target.argmax(dim=1) 43 | 44 | ranked_scores = torch.argsort(pred, dim=1)[:, -self.top_k:] 45 | recalled = torch.sum((target.unsqueeze(1) == ranked_scores).float(), dim=1) 46 | return recalled.mean() 47 | -------------------------------------------------------------------------------- /flambe/metric/loss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/flambe/metric/loss/__init__.py -------------------------------------------------------------------------------- /flambe/metric/loss/nll_loss.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | from flambe.metric.metric import Metric 7 | 8 | 9 | class MultiLabelNLLLoss(Metric): 10 | 11 | def __init__(self, 12 | weight: Optional[torch.Tensor] = None, 13 | ignore_index: Optional[int] = None, 14 | reduction: str = 'mean') -> None: 15 | """Initialize the MultiLabelNLLLoss. 16 | 17 | Parameters 18 | ---------- 19 | weight : Optional[torch.Tensor] 20 | A manual rescaling weight given to each class. 21 | If given, has to be a Tensor of size N, where N is the 22 | number of classes. 23 | ignore_index : Optional[int], optional 24 | Specifies a target value that is ignored and does not 25 | contribute to the input gradient. When size_average is 26 | True, the loss is averaged over non-ignored targets. 27 | reduction : str, optional 28 | Specifies the reduction to apply to the output: 29 | 'none' | 'mean' | 'sum'. 30 | 'none': no reduction will be applied, 31 | 'mean': the output will be averaged 32 | 'sum': the output will be summed.\ 33 | """ 34 | super().__init__() 35 | self.weight = weight 36 | self.ignore_index = ignore_index 37 | self.reduction = reduction 38 | 39 | def __str__(self) -> str: 40 | """Return the name of the Metric (for use in logging).""" 41 | return 'MultiLabelNLLLoss' if self.weight is None \ 42 | else 'WeightedMultiLabelNLLLoss' 43 | 44 | def compute(self, pred: torch.Tensor, target: torch.Tensor) \ 45 | -> torch.Tensor: 46 | """Computes the Negative log likelihood loss for multilabel. 47 | 48 | Parameters 49 | ---------- 50 | pred: torch.Tensor 51 | input logits of shape (B x N) 52 | target: torch.LontTensor 53 | target tensor of shape (B x N) 54 | 55 | Returns 56 | ------- 57 | loss: torch.float 58 | Multi label negative log likelihood loss, of shape (B) 59 | 60 | """ 61 | if self.ignore_index is not None: 62 | target[:, self.ignore_index] = 0 63 | 64 | if self.weight is None: 65 | self.weight = torch.ones(pred.size(1)).to(pred) 66 | 67 | norm_target = F.normalize(target.float(), p=1, dim=1) 68 | loss = - (self.weight * norm_target * pred).sum(dim=1) 69 | 70 | if self.reduction == 'mean': 71 | loss = loss.mean() 72 | elif self.reduction == 'sum': 73 | loss = loss.sum() 74 | elif self.reduction is not None: 75 | raise ValueError("Unknown reduction: {self.reduction}") 76 | 77 | return loss 78 | -------------------------------------------------------------------------------- /flambe/model/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-defined] 2 | 3 | from flambe.model.logistic_regression import LogisticRegression 4 | 5 | __all__ = ['LogisticRegression'] 6 | -------------------------------------------------------------------------------- /flambe/model/logistic_regression.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | from typing import Optional, Tuple, Union 4 | 5 | from torch import Tensor 6 | from torch.nn import Sigmoid 7 | from flambe.nn.module import Module # type: ignore[attr-define] 8 | from flambe.nn import MLPEncoder 9 | 10 | 11 | class LogisticRegression(Module): 12 | """ 13 | Logistic regression model given an input vector v 14 | the forward calculation is sigmoid(Wv+b), where 15 | W is a weight vector and b a bias term. The result 16 | is then passed to a sigmoid function, which maps it 17 | as a real number in [0,1]. This is typically interpreted 18 | in classification settings as the probability of belonging 19 | to a given class. 20 | 21 | Attributes 22 | ---------- 23 | input_size : int 24 | Dimension (number of features) of the input vector. 25 | """ 26 | 27 | def __init__(self, input_size: int) -> None: 28 | """ 29 | Initialize the Logistic Regression Model. 30 | Parameters 31 | ---------- 32 | input_size: int 33 | The dimension of the input vector 34 | """ 35 | super().__init__() 36 | self.encoder = MLPEncoder(input_size, output_size=1, 37 | n_layers=1, output_activation=Sigmoid()) 38 | 39 | def forward(self, 40 | data: Tensor, 41 | target: Optional[Tensor] = None) -> Union[Tensor, Tuple[Tensor, Tensor]]: 42 | """Forward pass that encodes data 43 | Parameters 44 | ---------- 45 | data : Tensor 46 | input data to encode 47 | target: Optional[Tensor] 48 | target value, will be casted to a float tensor. 49 | """ 50 | encoding = self.encoder(data) 51 | return (encoding, target.float()) if target is not None else encoding 52 | -------------------------------------------------------------------------------- /flambe/nlp/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.nlp import language_modeling 2 | from flambe.nlp import classification 3 | from flambe.nlp import fewshot 4 | from flambe.nlp import transformers 5 | 6 | __all__ = ['language_modeling', 'classification', 'fewshot', 'transformers'] 7 | -------------------------------------------------------------------------------- /flambe/nlp/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-define] 2 | 3 | from flambe.nlp.classification.datasets import SSTDataset, TRECDataset, NewsGroupDataset 4 | from flambe.nlp.classification.model import TextClassifier 5 | 6 | 7 | __all__ = ['TextClassifier', 'SSTDataset', 'TRECDataset', 'NewsGroupDataset'] 8 | -------------------------------------------------------------------------------- /flambe/nlp/classification/model.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | from typing import Optional, Tuple, Union 4 | 5 | import torch.nn as nn 6 | from torch import Tensor 7 | 8 | from flambe.nn import Embedder, Module 9 | 10 | 11 | class TextClassifier(Module): 12 | """Implements a standard classifier. 13 | 14 | The classifier is composed of an encoder module, followed by 15 | a fully connected output layer, with a dropout layer in between. 16 | 17 | Attributes 18 | ---------- 19 | embedder: Embedder 20 | The embedder layer 21 | output_layer : Module 22 | The output layer, yields a probability distribution over targets 23 | drop: nn.Dropout 24 | the dropout layer 25 | loss: Metric 26 | the loss function to optimize the model with 27 | metric: Metric 28 | the dev metric to evaluate the model on 29 | 30 | """ 31 | 32 | def __init__(self, 33 | embedder: Embedder, 34 | output_layer: Module, 35 | dropout: float = 0) -> None: 36 | """Initialize the TextClassifier model. 37 | 38 | Parameters 39 | ---------- 40 | embedder: Embedder 41 | The embedder layer 42 | output_layer : Module 43 | The output layer, yields a probability distribution 44 | dropout : float, optional 45 | Amount of dropout to include between layers (defaults to 0) 46 | 47 | """ 48 | super().__init__() 49 | 50 | self.embedder = embedder 51 | self.output_layer = output_layer 52 | 53 | self.drop = nn.Dropout(dropout) 54 | 55 | def forward(self, 56 | data: Tensor, 57 | target: Optional[Tensor] = None) -> Union[Tensor, Tuple[Tensor, Tensor]]: 58 | """Run a forward pass through the network. 59 | 60 | Parameters 61 | ---------- 62 | data: Tensor 63 | The input data 64 | target: Tensor, optional 65 | The input targets, optional 66 | 67 | Returns 68 | ------- 69 | Union[Tensor, Tuple[Tensor, Tensor] 70 | The output predictions, and optionally the targets 71 | 72 | """ 73 | outputs = self.embedder(data) 74 | if isinstance(outputs, tuple): 75 | encoding = outputs[0] 76 | else: 77 | encoding = outputs 78 | 79 | pred = self.output_layer(self.drop(encoding)) 80 | return (pred, target) if target is not None else pred 81 | -------------------------------------------------------------------------------- /flambe/nlp/fewshot/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-define] 2 | 3 | from flambe.nlp.fewshot.model import PrototypicalTextClassifier 4 | 5 | 6 | __all__ = ['PrototypicalTextClassifier'] 7 | -------------------------------------------------------------------------------- /flambe/nlp/language_modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-define] 2 | 3 | from flambe.nlp.language_modeling.datasets import PTBDataset, Wiki103, Enwiki8 4 | from flambe.nlp.language_modeling.fields import LMField 5 | from flambe.nlp.language_modeling.model import LanguageModel 6 | from flambe.nlp.language_modeling.sampler import CorpusSampler 7 | 8 | 9 | __all__ = ['PTBDataset', 'Wiki103', 'Enwiki8', 'LanguageModel', 'LMField', 'CorpusSampler'] 10 | -------------------------------------------------------------------------------- /flambe/nlp/language_modeling/fields.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | import torch 3 | 4 | from flambe.field import TextField 5 | 6 | 7 | class LMField(TextField): 8 | """Language Model field. 9 | 10 | Generates the original tensor alongside its shifted version. 11 | 12 | """ 13 | 14 | def __init__(self, 15 | **kwargs) -> None: 16 | super().__init__(**kwargs) 17 | 18 | def process(self, example: str) -> Tuple[torch.Tensor, ...]: # type: ignore 19 | """Process an example and create 2 Tensors. 20 | 21 | Parameters 22 | ---------- 23 | example: str 24 | The example to process, as a single string 25 | 26 | Returns 27 | ------- 28 | Tuple[torch.Tensor, ...] 29 | The processed example, tokenized and numericalized 30 | 31 | """ 32 | ret = super().process(example) 33 | return ret[:-1], ret[1:] # type: ignore 34 | -------------------------------------------------------------------------------- /flambe/nlp/transformers/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-define] 2 | 3 | from flambe.nlp.transformers.field import PretrainedTransformerField 4 | from flambe.nlp.transformers.model import PretrainedTransformerEmbedder 5 | 6 | 7 | __all__ = ['PretrainedTransformerField', 'PretrainedTransformerEmbedder'] 8 | -------------------------------------------------------------------------------- /flambe/nn/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-defined] 2 | 3 | from flambe.nn.module import Module 4 | from flambe.nn.softmax import SoftmaxLayer 5 | from flambe.nn.mos import MixtureOfSoftmax 6 | from flambe.nn.embedding import Embeddings, Embedder 7 | from flambe.nn.mlp import MLPEncoder 8 | from flambe.nn.rnn import RNNEncoder, PooledRNNEncoder 9 | from flambe.nn.cnn import CNNEncoder 10 | from flambe.nn.sequential import Sequential 11 | from flambe.nn.pooling import FirstPooling, LastPooling, SumPooling, AvgPooling, \ 12 | StructuredSelfAttentivePooling, GeneralizedPooling 13 | from flambe.nn.transformer import Transformer, TransformerEncoder, TransformerDecoder 14 | from flambe.nn.transformer_sru import TransformerSRU, TransformerSRUEncoder, TransformerSRUDecoder 15 | 16 | 17 | __all__ = ['Module', 'Embeddings', 'Embedder', 'RNNEncoder', 18 | 'PooledRNNEncoder', 'CNNEncoder', 'MLPEncoder', 19 | 'SoftmaxLayer', 'MixtureOfSoftmax', 'Sequential', 20 | 'Transformer', 'TransformerEncoder', 'TransformerDecoder', 21 | 'TransformerSRU', 'TransformerSRUEncoder', 'TransformerSRUDecoder', 22 | 'FirstPooling', 'LastPooling', 'SumPooling', 'AvgPooling', 23 | 'StructuredSelfAttentivePooling', 'GeneralizedPooling'] 24 | -------------------------------------------------------------------------------- /flambe/nn/distance/__init__.py: -------------------------------------------------------------------------------- 1 | # type: ignore[attr-defined] 2 | 3 | from flambe.nn.distance.distance import DistanceModule, MeanModule 4 | from flambe.nn.distance.euclidean import EuclideanDistance, EuclideanMean 5 | from flambe.nn.distance.cosine import CosineDistance, CosineMean 6 | from flambe.nn.distance.hyperbolic import HyperbolicDistance, HyperbolicMean 7 | 8 | 9 | def get_distance_module(metric: str) -> DistanceModule: 10 | """Get the distance module from a string alias. 11 | 12 | Currently available: 13 | . `euclidean` 14 | . `cosine` 15 | . `hyperbolic` 16 | 17 | Parameters 18 | ---------- 19 | metric : str 20 | The distance metric to use 21 | 22 | Raises 23 | ------ 24 | ValueError 25 | Unvalid distance string alias provided 26 | 27 | Returns 28 | ------- 29 | DistanceModule 30 | The instantiated distance module 31 | 32 | """ 33 | if metric == 'euclidean': 34 | module = EuclideanDistance() 35 | elif metric == 'cosine': 36 | module = CosineDistance() 37 | elif metric == 'hyperbolic': 38 | module = HyperbolicDistance() 39 | else: 40 | raise ValueError(f"Unknown distance alias: {metric}") 41 | 42 | return module 43 | 44 | 45 | def get_mean_module(metric: str) -> MeanModule: 46 | """Get the mean module from a string alias. 47 | 48 | Currently available: 49 | . `euclidean` 50 | . `cosine` 51 | . `hyperbolic` 52 | 53 | Parameters 54 | ---------- 55 | metric : str 56 | The distance metric to use 57 | 58 | Raises 59 | ------ 60 | ValueError 61 | Unvalid distance string alias provided 62 | 63 | Returns 64 | ------- 65 | DistanceModule 66 | The instantiated distance module 67 | 68 | """ 69 | if metric == 'euclidean': 70 | module = EuclideanMean() 71 | elif metric == 'cosine': 72 | module = CosineMean() 73 | elif metric == 'hyperbolic': 74 | module = HyperbolicMean() 75 | else: 76 | raise ValueError(f"Unknown distance alias: {metric}") 77 | 78 | return module 79 | -------------------------------------------------------------------------------- /flambe/nn/distance/cosine.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | import torch 4 | from torch import Tensor 5 | from flambe.nn.distance import DistanceModule, MeanModule 6 | 7 | 8 | class CosineDistance(DistanceModule): 9 | """Implement a CosineDistance object. 10 | 11 | """ 12 | 13 | def __init__(self, eps: float = 1e-8) -> None: 14 | """Initialize the CosineDistance module. 15 | 16 | Parameters 17 | ---------- 18 | eps : float, optional 19 | Used for numerical stability 20 | 21 | """ 22 | super().__init__() 23 | self.eps = eps 24 | 25 | def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor: 26 | """Returns the cosine distance between each 27 | element in mat_1 and each element in mat_2. 28 | 29 | Parameters 30 | ---------- 31 | mat_1: torch.Tensor 32 | matrix of shape (n_1, n_features) 33 | mat_2: torch.Tensor 34 | matrix of shape (n_2, n_features) 35 | 36 | Returns 37 | ------- 38 | dist: torch.Tensor 39 | distance matrix of shape (n_1, n_2) 40 | 41 | """ 42 | w1 = mat_1.norm(p=2, dim=1, keepdim=True) 43 | w2 = mat_2.norm(p=2, dim=1, keepdim=True) 44 | return 1 - torch.mm(mat_1, mat_2.t()) / (w1 * w2.t()).clamp(min=self.eps) 45 | 46 | 47 | class CosineMean(MeanModule): 48 | """Implement a CosineMean object. 49 | 50 | """ 51 | def forward(self, data: Tensor) -> Tensor: 52 | """Performs a forward pass through the network. 53 | 54 | Parameters 55 | ---------- 56 | data : torch.Tensor 57 | The input data, as a float tensor 58 | 59 | Returns 60 | ------- 61 | torch.Tensor 62 | The encoded output, as a float tensor 63 | 64 | """ 65 | data = data / (data.norm(dim=1, keepdim=True)) 66 | return data.mean(0) 67 | -------------------------------------------------------------------------------- /flambe/nn/distance/distance.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | from torch import Tensor 4 | 5 | from flambe.nn.module import Module 6 | 7 | 8 | class DistanceModule(Module): 9 | """Implement a DistanceModule object. 10 | 11 | """ 12 | 13 | def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor: 14 | """Performs a forward pass through the network. 15 | 16 | Parameters 17 | ---------- 18 | data : torch.Tensor 19 | The input data, as a float tensor 20 | 21 | Returns 22 | ------- 23 | torch.Tensor 24 | The encoded output, as a float tensor 25 | 26 | """ 27 | raise NotImplementedError 28 | 29 | 30 | class MeanModule(Module): 31 | """Implement a MeanModule object. 32 | 33 | """ 34 | def __init__(self, detach_mean: bool = False) -> None: 35 | """Initilaize the MeanModule. 36 | 37 | Parameters 38 | ---------- 39 | detach_mean : bool, optional 40 | Set to detach the mean computation, this is useful when the 41 | mean computation does not admit a closed form. 42 | 43 | """ 44 | super().__init__() 45 | self.detach_mean = detach_mean 46 | 47 | def forward(self, data: Tensor) -> Tensor: 48 | """Performs a forward pass through the network. 49 | 50 | Parameters 51 | ---------- 52 | data : torch.Tensor 53 | The input data, as a float tensor 54 | 55 | Returns 56 | ------- 57 | torch.Tensor 58 | The encoded output, as a float tensor 59 | 60 | """ 61 | raise NotImplementedError 62 | -------------------------------------------------------------------------------- /flambe/nn/distance/euclidean.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | import torch 4 | from torch import Tensor 5 | from flambe.nn.distance.distance import DistanceModule, MeanModule 6 | 7 | 8 | class EuclideanDistance(DistanceModule): 9 | """Implement a EuclideanDistance object.""" 10 | 11 | def forward(self, mat_1: Tensor, mat_2: Tensor) -> Tensor: 12 | """Returns the squared euclidean distance between each 13 | element in mat_1 and each element in mat_2. 14 | 15 | Parameters 16 | ---------- 17 | mat_1: torch.Tensor 18 | matrix of shape (n_1, n_features) 19 | mat_2: torch.Tensor 20 | matrix of shape (n_2, n_features) 21 | 22 | Returns 23 | ------- 24 | dist: torch.Tensor 25 | distance matrix of shape (n_1, n_2) 26 | 27 | """ 28 | dist = [torch.sum((mat_1 - mat_2[i])**2, dim=1) for i in range(mat_2.size(0))] 29 | dist = torch.stack(dist, dim=1) 30 | return dist 31 | 32 | 33 | class EuclideanMean(MeanModule): 34 | """Implement a EuclideanMean object.""" 35 | 36 | def forward(self, data: Tensor) -> Tensor: 37 | """Performs a forward pass through the network. 38 | 39 | Parameters 40 | ---------- 41 | data : torch.Tensor 42 | The input data, as a float tensor 43 | 44 | Returns 45 | ------- 46 | torch.Tensor 47 | The encoded output, as a float tensor 48 | 49 | """ 50 | return data.mean(0) 51 | -------------------------------------------------------------------------------- /flambe/nn/mos.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | import torch 4 | import torch.nn as nn 5 | from torch import Tensor 6 | 7 | from flambe.nn.mlp import MLPEncoder 8 | from flambe.nn.module import Module 9 | 10 | 11 | class MixtureOfSoftmax(Module): 12 | """Implement the MixtureOfSoftmax output layer. 13 | 14 | Attributes 15 | ---------- 16 | pi: FullyConnected 17 | softmax layer over the different softmax 18 | layers: [FullyConnected] 19 | list of the k softmax layers 20 | 21 | """ 22 | def __init__(self, 23 | input_size: int, 24 | output_size: int, 25 | k: int = 1, 26 | take_log: bool = True) -> None: 27 | """Initialize the MOS layer. 28 | 29 | Parameters 30 | ---------- 31 | input_size: int 32 | input dimension 33 | output_size: int 34 | output dimension 35 | k: int (Default: 1) 36 | number of softmax in the mixture 37 | 38 | """ 39 | super().__init__() 40 | 41 | self.pi_w = MLPEncoder(input_size, k) 42 | self.softmax = nn.Softmax() 43 | 44 | self.layers = [MLPEncoder(input_size, output_size) for _ in range(k)] 45 | self.tanh = nn.Tanh() 46 | 47 | self.activation = nn.LogSoftmax() if take_log else nn.Softmax() 48 | 49 | def forward(self, data: Tensor) -> Tensor: 50 | """Implement mixture of softmax for language modeling. 51 | 52 | Parameters 53 | ---------- 54 | data: torch.Tensor 55 | seq_len x batch_size x hidden_size 56 | 57 | Return 58 | ------- 59 | out: Variable 60 | output matrix of shape seq_len x batch_size x out_size 61 | 62 | """ 63 | w = self.softmax(self.pi_w(data)) 64 | # Compute k softmax, and combine using above weights 65 | out = [w[:, :, i] * self.tanh(W(data)) for i, W in enumerate(self.layers)] 66 | out = torch.cat(out, dim=0).sum(dim=0) 67 | 68 | return self.activation(out) 69 | -------------------------------------------------------------------------------- /flambe/nn/sequential.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | from typing import Union, Dict 4 | 5 | import torch 6 | 7 | from flambe.nn import Module 8 | 9 | 10 | class Sequential(Module): 11 | """Implement a Sequential module. 12 | 13 | This class can be used in the same way as torch's nn.Sequential, 14 | with the difference that it accepts kwargs arguments. 15 | 16 | """ 17 | def __init__(self, **kwargs: Dict[str, Union[Module, torch.nn.Module]]) -> None: 18 | """Initialize the Sequential module. 19 | 20 | Parameters 21 | ---------- 22 | kwargs: Dict[str, Union[Module, torch.nn.Module]] 23 | The list of modules. 24 | 25 | """ 26 | super().__init__() 27 | 28 | modules = [] 29 | for name, module in kwargs.items(): 30 | setattr(self, name, module) 31 | modules.append(module) 32 | 33 | self.seq = torch.nn.Sequential(modules) 34 | 35 | def forward(self, data: torch.Tensor) -> torch.Tensor: 36 | """Performs a forward pass through the network. 37 | 38 | Parameters 39 | ---------- 40 | data: torch.Tensor 41 | input to the model 42 | 43 | Returns 44 | ------- 45 | output: torch.Tensor 46 | output of the model 47 | 48 | """ 49 | return self.seq(data) 50 | -------------------------------------------------------------------------------- /flambe/nn/softmax.py: -------------------------------------------------------------------------------- 1 | # type: ignore[override] 2 | 3 | from typing import Optional 4 | 5 | import torch 6 | from torch import nn 7 | 8 | from flambe.nn.mlp import MLPEncoder 9 | from flambe.nn.module import Module 10 | 11 | 12 | class SoftmaxLayer(Module): 13 | """Implement an SoftmaxLayer module. 14 | 15 | Can be used to form a classifier out of any encoder. 16 | Note: by default takes the log_softmax so that it can be fed to 17 | the NLLLoss module. You can disable this behavior through the 18 | `take_log` argument. 19 | 20 | """ 21 | def __init__(self, 22 | input_size: int, 23 | output_size: int, 24 | mlp_layers: int = 1, 25 | mlp_dropout: float = 0., 26 | mlp_hidden_activation: Optional[nn.Module] = None, 27 | take_log: bool = True) -> None: 28 | """Initialize the SoftmaxLayer. 29 | 30 | Parameters 31 | ---------- 32 | input_size : int 33 | Input size of the decoder, usually the hidden size of 34 | some encoder. 35 | output_size : int 36 | The output dimension, usually the number of target labels 37 | mlp_layers : int 38 | The number of layers in the MLP 39 | mlp_dropout: float, optional 40 | Dropout to be used before each MLP layer 41 | mlp_hidden_activation: nn.Module, optional 42 | Any PyTorch activation layer, defaults to None 43 | take_log: bool, optional 44 | If ``True``, compute the LogSoftmax to be fed in NLLLoss. 45 | Defaults to ``False``. 46 | 47 | """ 48 | super().__init__() 49 | 50 | softmax = nn.LogSoftmax(dim=-1) if take_log else nn.Softmax() 51 | self.mlp = MLPEncoder(input_size=input_size, output_size=output_size, 52 | n_layers=mlp_layers, dropout=mlp_dropout, 53 | hidden_activation=mlp_hidden_activation, 54 | output_activation=softmax) 55 | 56 | def forward(self, data: torch.Tensor) -> torch.Tensor: 57 | """Performs a forward pass through the network. 58 | 59 | Parameters 60 | ---------- 61 | data: torch.Tensor 62 | input to the model of shape (*, input_size) 63 | 64 | Returns 65 | ------- 66 | output: torch.Tensor 67 | output of the model of shape (*, output_size) 68 | 69 | """ 70 | return self.mlp(data) 71 | -------------------------------------------------------------------------------- /flambe/optim/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.optim.scheduler import LRScheduler, LambdaLR 2 | from flambe.optim.noam import NoamScheduler 3 | from flambe.optim.linear import WarmupLinearScheduler 4 | from flambe.optim.radam import RAdam 5 | 6 | 7 | __all__ = ['LRScheduler', 'LambdaLR', 8 | 'NoamScheduler', 'WarmupLinearScheduler', 'RAdam'] 9 | -------------------------------------------------------------------------------- /flambe/optim/linear.py: -------------------------------------------------------------------------------- 1 | from flambe.optim.scheduler import LambdaLR 2 | 3 | 4 | class WarmupLinearScheduler(LambdaLR): 5 | """Linear warmup and then linear decay. 6 | 7 | Linearly increases learning rate from 0 to 1 over 8 | `warmup` training steps. 9 | Linearly decreases learning rate from 1. to 0. over 10 | remaining `n_steps - warmup` steps. 11 | 12 | This scheduler is generally used after every training batch. 13 | 14 | """ 15 | 16 | def __init__(self, 17 | optimizer, 18 | warmup: int, 19 | n_steps: int): 20 | """Initialize the WarmupLinearScheduler. 21 | 22 | Parameters 23 | ---------- 24 | optimizer : torch.optim.Optimizer 25 | Wrapped optimizer. 26 | warmup : int 27 | The number of linear warmup phases 28 | n_steps : int, optional 29 | The index of last step. Default: -1 30 | 31 | """ 32 | self.warmup = warmup 33 | self.n_steps = n_steps 34 | super().__init__(optimizer, lr_lambda=self.lr_lambda, last_epoch=-1) # type: ignore 35 | 36 | def lr_lambda(self, step: int) -> float: 37 | """Compue the learning rate factor. 38 | 39 | Parameters 40 | ---------- 41 | step : int 42 | The current step. Could be training over 43 | validation steps. 44 | 45 | Returns 46 | ------- 47 | float 48 | The output factor 49 | 50 | """ 51 | if step < self.warmup: 52 | return float(step) / float(max(1, self.warmup)) 53 | return max(0.0, float(self.n_steps - step) / float(max(1.0, self.n_steps - self.warmup))) 54 | -------------------------------------------------------------------------------- /flambe/optim/noam.py: -------------------------------------------------------------------------------- 1 | from flambe.optim.scheduler import LambdaLR 2 | 3 | 4 | class NoamScheduler(LambdaLR): 5 | """Linear warmup and then quadratic decay. 6 | 7 | Linearly increases the learning rate from 0 to 1 over 8 | `warmup` steps. 9 | Quadratically decreases the learning rate after. 10 | 11 | This scheduler is generally used after every training batch. 12 | 13 | """ 14 | 15 | def __init__(self, 16 | optimizer, 17 | warmup: int, 18 | d_model: int): 19 | """Initialize the NoamScheduler. 20 | 21 | Parameters 22 | ---------- 23 | optimizer : torch.optim.Optimizer 24 | Wrapped optimizer. 25 | warmup : int 26 | The number of linear warmup phases 27 | d_model : int, optional 28 | The index of last step. Default: -1 29 | 30 | """ 31 | self.warmup = warmup 32 | self.d_model = d_model 33 | super().__init__(optimizer, lr_lambda=self.lr_lambda, last_epoch=-1) # type: ignore 34 | 35 | def lr_lambda(self, step: int) -> float: 36 | """Compue the learning rate factor. 37 | 38 | Parameters 39 | ---------- 40 | step : int 41 | The current step. Could be training over 42 | validation steps. 43 | 44 | Returns 45 | ------- 46 | float 47 | The output factor 48 | 49 | """ 50 | if step == 0 and self.warmup == 0: 51 | return 1. / (self.d_model ** 0.5) 52 | else: 53 | if step > self.warmup: 54 | return 1. / (self.d_model ** 0.5) / (step ** 0.5) 55 | else: 56 | return step / (self.d_model ** 0.5) / (self.warmup ** 1.5) 57 | -------------------------------------------------------------------------------- /flambe/optim/scheduler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from flambe.compile import Component 3 | 4 | 5 | class LRScheduler(torch.optim.lr_scheduler._LRScheduler, Component): 6 | 7 | def state_dict(self): 8 | state_dict = super().state_dict() 9 | del state_dict['_schema'] 10 | del state_dict['_saved_kwargs'] 11 | del state_dict['_extensions'] 12 | return state_dict 13 | 14 | 15 | class LambdaLR(torch.optim.lr_scheduler.LambdaLR, Component): 16 | 17 | def state_dict(self): 18 | state_dict = super().state_dict() 19 | del state_dict['_schema'] 20 | del state_dict['_saved_kwargs'] 21 | del state_dict['_extensions'] 22 | return state_dict 23 | -------------------------------------------------------------------------------- /flambe/runnable/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.runnable.runnable import Runnable 2 | from flambe.runnable.cluster_runnable import ClusterRunnable 3 | from flambe.runnable.context import SafeExecutionContext 4 | from flambe.runnable.environment import RemoteEnvironment 5 | 6 | 7 | __all__ = ['Runnable', 'SafeExecutionContext', 'ClusterRunnable', 'RemoteEnvironment'] 8 | -------------------------------------------------------------------------------- /flambe/runnable/error.py: -------------------------------------------------------------------------------- 1 | class ProtocolError(Exception): 2 | 3 | def __init__(self, message: str) -> None: 4 | """Base ProtocolError implementation. 5 | 6 | Parameters 7 | ---------- 8 | message : str 9 | The message to display 10 | 11 | """ 12 | self.message = message 13 | 14 | def __repr__(self) -> str: 15 | """Override output message to show ProtocolError 16 | 17 | Returns 18 | ------- 19 | str 20 | The output message 21 | 22 | """ 23 | return f"ProtocolError: {self.message}" 24 | 25 | 26 | class LinkError(ProtocolError): 27 | 28 | def __init__(self, block_id: str, target_block_id: str) -> None: 29 | """Link error on undeclared block. 30 | 31 | Parameters 32 | ---------- 33 | block_id : str 34 | The block including the link 35 | target_block_id : str 36 | The link's target block 37 | 38 | """ 39 | default_message = ( 40 | f"Block '{block_id}' has a link to '{target_block_id}' " 41 | "which has not yet been declared." 42 | ) 43 | super().__init__(default_message) 44 | 45 | 46 | class SearchComponentError(ProtocolError): 47 | 48 | def __init__(self, block_id: str) -> None: 49 | """Search error on non-computable. 50 | 51 | Parameters 52 | ---------- 53 | block_id : str 54 | The block with the wrong type 55 | 56 | """ 57 | default_message = ( 58 | f"Block '{block_id}' is a non-component; " 59 | "only Component objects can be in the pipeline" 60 | ) 61 | super().__init__(default_message) 62 | 63 | 64 | class UnsuccessfulRunnableError(RuntimeError): 65 | pass 66 | 67 | 68 | class RunnableFileError(Exception): 69 | pass 70 | 71 | 72 | class ResourceError(RunnableFileError): 73 | pass 74 | 75 | 76 | class NonExistentResourceError(RunnableFileError): 77 | pass 78 | 79 | 80 | class ExistentResourceError(RunnableFileError): 81 | pass 82 | 83 | 84 | class ParsingRunnableError(RunnableFileError): 85 | pass 86 | 87 | 88 | class TagError(RunnableFileError): 89 | pass 90 | 91 | 92 | class MissingSecretsError(Exception): 93 | pass 94 | -------------------------------------------------------------------------------- /flambe/runner/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/flambe/runner/__init__.py -------------------------------------------------------------------------------- /flambe/runner/report_site_run.py: -------------------------------------------------------------------------------- 1 | """Script to run the report web site 2 | 3 | It takes the `app` defined in `flambe.remote.webapp.app` and runs it. 4 | 5 | """ 6 | import logging 7 | import argparse 8 | from typing import Optional 9 | import os 10 | 11 | from flambe.experiment.webapp.app import app 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def launch_tensorboard(tracking_address) -> Optional[str]: 17 | # https://stackoverflow.com/a/55708102 18 | # tb will run in background but it will 19 | # be stopped once the main process is stopped. 20 | try: 21 | from tensorboard import program 22 | tb = program.TensorBoard() 23 | tb.configure(argv=[None, '--logdir', tracking_address]) 24 | url = tb.launch() 25 | if url.endswith("/"): 26 | url = url[:-1] 27 | 28 | return url 29 | except Exception: 30 | return None 31 | 32 | 33 | if __name__ == '__main__': 34 | parser = argparse.ArgumentParser(description='Flambe webapp') 35 | parser.add_argument('progress_file', type=str, default='localhost', 36 | help='The location of the pickled progress file') 37 | parser.add_argument('--output-log', type=str, default=None, 38 | help='The experiment log file') 39 | parser.add_argument('--output-dir', type=str, default=None, 40 | help='The experiment output directory') 41 | parser.add_argument('--host', type=str, default="localhost", 42 | help='Port in which the site will be running url') 43 | parser.add_argument('--port', type=int, default=49558, 44 | help='Port in which the site will be running url') 45 | parser.add_argument('--tensorboard_url', type=str, help='Tensorboard url') 46 | args = parser.parse_args() 47 | 48 | app.config['progress_file'] = args.progress_file 49 | app.config['output_log'] = args.output_log 50 | app.config['output_dir'] = args.output_dir 51 | app.config['tensorboard_url'] = args.tensorboard_url 52 | 53 | if app.config['tensorboard_url'] is None: 54 | app.config['tensorboard_url'] = launch_tensorboard( 55 | os.path.dirname(app.config['progress_file'])) 56 | 57 | # debug=False won't work remotely as Flask will not be able 58 | # to bind 0.0.0.0. Make sure to use debug=True is the report site 59 | # runs remotely. 60 | # Only use False for debugging purposes. 61 | app.run(host=args.host, port=args.port, debug=False) 62 | -------------------------------------------------------------------------------- /flambe/runner/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from typing import Iterable 4 | 5 | from flambe.const import FLAMBE_GLOBAL_FOLDER 6 | from flambe.experiment.wording import print_extensions_cache_size_warning 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | MB = 2**20 12 | WARN_LIMIT_MB = 100 13 | 14 | 15 | def get_files(path: str) -> Iterable[str]: 16 | """Return the list of all files (recursively) 17 | a directory has. 18 | 19 | Parameters 20 | ---------- 21 | path: str 22 | The directory's path 23 | 24 | Return 25 | ------ 26 | List[str] 27 | The list of files (each file with its path from 28 | the given parameter) 29 | 30 | Raise 31 | ----- 32 | ValueError 33 | In case the path does not exist 34 | 35 | """ 36 | if not os.path.exists(path): 37 | raise ValueError(f"{path} does not exist") 38 | 39 | def _wrapped(): 40 | for dirpath, dirnames, filenames in os.walk(path): 41 | for f in filenames: 42 | fp = os.path.join(dirpath, f) 43 | yield fp 44 | 45 | return _wrapped() 46 | 47 | 48 | def get_size_MB(path: str) -> float: 49 | """Return the size of a file/folder in MB. 50 | 51 | Parameters 52 | ---------- 53 | path: str 54 | The path to the folder or file 55 | 56 | Returns 57 | ------- 58 | float 59 | The size in MB 60 | 61 | """ 62 | accum = 0 63 | if os.path.isdir(path): 64 | for fp in get_files(path): 65 | if os.path.exists(fp) and not os.path.islink(fp): 66 | accum += os.path.getsize(fp) 67 | else: 68 | accum = os.path.getsize(path) 69 | return accum / MB 70 | 71 | 72 | def check_system_reqs() -> None: 73 | """Run system checks and prepare the system before a run. 74 | 75 | This method should: 76 | * Create folders, files that are needed for flambe 77 | * Raise errors in case requirements are not met. This should 78 | run under the SafeExecutionContext, so errors will be handled 79 | * Warn the user in case something needs attention. 80 | 81 | """ 82 | # Create the flambe folder if it does not exist 83 | if not os.path.exists(FLAMBE_GLOBAL_FOLDER): 84 | os.mkdir(FLAMBE_GLOBAL_FOLDER) 85 | 86 | # Check if extensions folder is getting big 87 | extensions_folder = os.path.join(FLAMBE_GLOBAL_FOLDER, "extensions") 88 | if os.path.exists(extensions_folder) and get_size_MB(extensions_folder) > WARN_LIMIT_MB: 89 | print_extensions_cache_size_warning(extensions_folder, WARN_LIMIT_MB) 90 | -------------------------------------------------------------------------------- /flambe/sampler/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.sampler.sampler import Sampler 2 | from flambe.sampler.base import BaseSampler 3 | from flambe.sampler.episodic import EpisodicSampler 4 | 5 | 6 | __all__ = ['Sampler', 'BaseSampler', 'EpisodicSampler'] 7 | -------------------------------------------------------------------------------- /flambe/sampler/sampler.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import Iterator, Sequence, Tuple 3 | 4 | import torch 5 | 6 | from flambe.compile import Component 7 | 8 | 9 | class Sampler(Component): 10 | """Base Sampler interface. 11 | 12 | Objects implementing this interface should implement two methods: 13 | 14 | - *sample*: takes a set of data and returns an iterator 15 | - *lenght*: takes a set of data and return the length of the 16 | iterator that would be given by the sample method 17 | 18 | Sampler objects are used inside the Trainer to provide the data to 19 | the models. Note that pushing the data to the appropriate device 20 | is usually done inside the Trainer. 21 | 22 | """ 23 | @abstractmethod 24 | def sample(self, 25 | data: Sequence[Sequence[torch.Tensor]], 26 | n_epochs: int = 1) -> Iterator[Tuple[torch.Tensor, ...]]: 27 | """Sample from the list of features and yields batches. 28 | 29 | Parameters 30 | ---------- 31 | data: Sequence[Sequence[torch.Tensor, ...]] 32 | The input data to sample from 33 | n_epochs: int, optional 34 | The number of epochs to run in the output iterator. 35 | 36 | Yields 37 | ------ 38 | Iterator[Tuple[Tensor]] 39 | A batch of data, as a tuple of Tensors 40 | 41 | """ 42 | pass 43 | 44 | @abstractmethod 45 | def length(self, data: Sequence[Sequence[torch.Tensor]]) -> int: 46 | """Return the number of batches in the sampler. 47 | 48 | Parameters 49 | ---------- 50 | data: Sequence[Sequence[torch.Tensor, ...]] 51 | The input data to sample from 52 | 53 | Returns 54 | ------- 55 | int 56 | The number of batches that would be created per epoch 57 | 58 | """ 59 | pass 60 | -------------------------------------------------------------------------------- /flambe/tokenizer/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.tokenizer.tokenizer import Tokenizer 2 | from flambe.tokenizer.char import CharTokenizer 3 | from flambe.tokenizer.word import WordTokenizer, NLTKWordTokenizer, NGramsTokenizer 4 | from flambe.tokenizer.label import LabelTokenizer 5 | 6 | 7 | __all__ = ['Tokenizer', 'WordTokenizer', 'CharTokenizer', 8 | 'LabelTokenizer', 'NGramsTokenizer', 'NLTKWordTokenizer'] 9 | -------------------------------------------------------------------------------- /flambe/tokenizer/char.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from flambe.tokenizer import Tokenizer 4 | 5 | 6 | class CharTokenizer(Tokenizer): 7 | """Implement a character level tokenizer.""" 8 | 9 | def tokenize(self, example: str) -> List[str]: 10 | """Tokenize an input example. 11 | 12 | Parameters 13 | ---------- 14 | example : str 15 | The input example, as a string 16 | 17 | Returns 18 | ------- 19 | List[str] 20 | The output charachter tokens, as a list of strings 21 | 22 | """ 23 | return list(example) 24 | -------------------------------------------------------------------------------- /flambe/tokenizer/label.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import Optional, List 3 | 4 | from flambe.tokenizer import Tokenizer 5 | 6 | 7 | class LabelTokenizer(Tokenizer): 8 | """Base label tokenizer. 9 | 10 | This object tokenizes string labels into a list of a single or 11 | multiple elements, depending on the provided separator. 12 | 13 | """ 14 | def __init__(self, multilabel_sep: Optional[str] = None) -> None: 15 | """Initialize the tokenizer. 16 | 17 | Parameters 18 | ---------- 19 | multilabel_sep : Optional[str], optional 20 | Used to split multi label inputs, if given 21 | 22 | """ 23 | self.multilabel_sep = multilabel_sep 24 | 25 | def tokenize(self, example: str) -> List[str]: 26 | """Tokenize an input example. 27 | 28 | Parameters 29 | ---------- 30 | example : str 31 | The input example, as a string 32 | 33 | Returns 34 | ------- 35 | List[str] 36 | The output tokens, as a list of strings 37 | 38 | """ 39 | sep = self.multilabel_sep 40 | return example.split(sep) if sep else [example] 41 | -------------------------------------------------------------------------------- /flambe/tokenizer/tokenizer.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import List 3 | 4 | from flambe import Component 5 | 6 | 7 | class Tokenizer(Component): 8 | """Base interface to a Tokenizer object. 9 | 10 | Tokenizers implement the `tokenize` method, which takes a 11 | string as input and produces a list of strings as output. 12 | 13 | """ 14 | 15 | @abstractmethod 16 | def tokenize(self, example: str) -> List[str]: 17 | """Tokenize an input example. 18 | 19 | Parameters 20 | ---------- 21 | example : str 22 | The input example, as a string 23 | 24 | Returns 25 | ------- 26 | List[str] 27 | The output tokens, as a list of strings 28 | 29 | """ 30 | pass 31 | 32 | def __call__(self, example: str): 33 | """Make a tokenizer callable.""" 34 | return self.tokenize(example) 35 | -------------------------------------------------------------------------------- /flambe/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.utils.config import generate_config_from_template 2 | 3 | __all__ = ['generate_config_from_template'] 4 | -------------------------------------------------------------------------------- /flambe/utils/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from typing import Dict 4 | 5 | import jinja2 6 | 7 | 8 | def generate_config_from_template(template_path: str, 9 | config_path: str, 10 | remove_comments: bool = False, 11 | **template_kwargs: Dict[str, str]): 12 | """ 13 | Parameters 14 | ---------- 15 | template_path: str 16 | The path to the config template 17 | config_path: str 18 | The path to which the rendered config should be written 19 | remove_comments: bool 20 | If `True`, removes comments from the rendered config before 21 | writing it to disk 22 | template_kwargs: 23 | Keyword arguments to pass to your template, e.g. 24 | `path='config.yaml', foo='bar'` 25 | 26 | Example config: 27 | 28 | ```yaml 29 | !Experiment 30 | 31 | foo: {{ bar }} 32 | baz: {{ skittles }} 33 | ``` 34 | 35 | If saved as config.yaml.template, then invoking: 36 | 37 | ```python 38 | generate_config_from_template('config.yaml.template', 39 | 'config.yaml', bar='pickles', skittles='yum') 40 | ``` 41 | 42 | the following config will be written to 'config.yaml': 43 | 44 | ```yaml 45 | !Experiment 46 | 47 | foo: pickles 48 | baz: yum 49 | ``` 50 | """ 51 | dirname = os.path.dirname(template_path) 52 | basename = os.path.basename(template_path) 53 | loader = jinja2.FileSystemLoader(searchpath=dirname) 54 | env = jinja2.Environment(loader=loader, autoescape=True) 55 | template = env.get_template(basename) 56 | with open(config_path, 'w') as f: 57 | for line in template.render(**template_kwargs).split('\n'): 58 | if remove_comments: 59 | line = re.sub('# .*', '', line).rstrip() 60 | if line: 61 | f.write(line + '\n') 62 | -------------------------------------------------------------------------------- /flambe/version.py: -------------------------------------------------------------------------------- 1 | MAJOR = "0" 2 | MINOR = "4" 3 | PATCH = "18" 4 | 5 | VERSION = f'{MAJOR}.{MINOR}.{PATCH}' 6 | -------------------------------------------------------------------------------- /flambe/vision/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.vision import classification 2 | 3 | __all__ = ['classification'] 4 | -------------------------------------------------------------------------------- /flambe/vision/classification/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe.vision.classification.datasets import MNISTDataset 2 | from flambe.vision.classification.model import ImageClassifier 3 | 4 | __all__ = ['MNISTDataset', 'ImageClassifier'] 5 | -------------------------------------------------------------------------------- /flambe/vision/classification/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor 3 | 4 | from typing import Optional, Tuple, Union 5 | from flambe.nn import Module # type: ignore[attr-defined] 6 | 7 | 8 | class ImageClassifier(Module): 9 | """Implements a simple image classifier. 10 | 11 | This classifier consists of an encocder module, followed by 12 | a fully connected output layer that outputs a probability 13 | distribution. 14 | 15 | Attributes 16 | ---------- 17 | encoder: Moodule 18 | The encoder layer 19 | output_layer: Module 20 | The output layer, yields a probability distribution over targets 21 | """ 22 | def __init__(self, 23 | encoder: Module, 24 | output_layer: Module) -> None: 25 | super().__init__() 26 | 27 | self.encoder = encoder 28 | self.output_layer = output_layer 29 | 30 | def forward(self, 31 | data: Tensor, 32 | target: Optional[Tensor] = None) -> Union[Tensor, Tuple[Tensor, Tensor]]: 33 | """Run a forward pass through the network. 34 | 35 | Parameters 36 | ---------- 37 | data: Tensor 38 | The input data 39 | target: Tensor, optional 40 | The input targets, optional 41 | 42 | Returns 43 | ------- 44 | Union[Tensor, Tuple[Tensor, Tensor] 45 | The output predictions, and optionally the targets 46 | 47 | """ 48 | encoded = self.encoder(data) 49 | pred = self.output_layer(torch.flatten(encoded, 1)) 50 | return (pred, target) if target is not None else pred 51 | -------------------------------------------------------------------------------- /imgs/Flambe_Logo_CMYK_Black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Logo_CMYK_Black.png -------------------------------------------------------------------------------- /imgs/Flambe_Logo_CMYK_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Logo_CMYK_FullColor.png -------------------------------------------------------------------------------- /imgs/Flambe_Logo_CMYK_Pink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Logo_CMYK_Pink.png -------------------------------------------------------------------------------- /imgs/Flambe_Logo_CMYK_White.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Logo_CMYK_White.png -------------------------------------------------------------------------------- /imgs/Flambe_Symbol_CMYK_Black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Symbol_CMYK_Black.png -------------------------------------------------------------------------------- /imgs/Flambe_Symbol_CMYK_Pink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Symbol_CMYK_Pink.png -------------------------------------------------------------------------------- /imgs/Flambe_Symbol_CMYK_White.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/imgs/Flambe_Symbol_CMYK_White.png -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | ignore_missing_imports = True 3 | allow_redefinition = True 4 | ; follow_imports = skip 5 | ; warn_unused_configs = True 6 | -------------------------------------------------------------------------------- /readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/conf.py 11 | 12 | # Optionally build your docs in additional formats such as PDF and ePub 13 | formats: all 14 | 15 | # Optionally set the version of Python and requirements required to build your docs 16 | python: 17 | version: 3.6 18 | install: 19 | - requirements: docs/requirements.txt 20 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | colorama~=0.3.9 2 | torch~=1.3.0 3 | numpy~=1.16.0 4 | ruamel.yaml>=0.15.87,<0.16.5 5 | pandas>=0.23.4 6 | tqdm~=4.28 7 | scikit-learn~=0.20.3 8 | paramiko~=2.7.1 9 | boto3>=1.9.100 10 | ray==0.7.6 11 | requests~=2.21.0 12 | Flask~=1.0.0 13 | tensorboardx-hparams==1.7.2 14 | GitPython~=2.1.11 15 | sru~=2.2.1 16 | pygments~=2.3.1 17 | nltk~=3.4.1 18 | dill>=0.3.1 19 | gensim==3.7.3 20 | pip~=19.1 21 | awscli~=1.17 22 | transformers~=2.2.1 23 | jinja2~=2.10.1 24 | ninja~=1.9.0 25 | -------------------------------------------------------------------------------- /scripts/deploy_documentation.py: -------------------------------------------------------------------------------- 1 | import mimetypes 2 | import boto3 3 | import os 4 | from tqdm import tqdm 5 | import sys 6 | 7 | 8 | if len(sys.argv) < 2: 9 | raise ValueError("Need to provide the Bucket Name") 10 | 11 | 12 | FLAMBE_BUCKET_NAME = sys.argv[1] 13 | 14 | 15 | def get_flambe_bucket(s3): 16 | for b in s3.buckets.all(): 17 | if b.name == FLAMBE_BUCKET_NAME: 18 | return b 19 | 20 | raise Exception("Flambe bucket not found") 21 | 22 | 23 | def get_mime_type(path): 24 | mimetype, _ = mimetypes.guess_type(path) 25 | if mimetype is None: 26 | raise Exception("Failed to guess mimetype") 27 | return mimetype 28 | 29 | 30 | def upload_documentation(bucket, doc_html_dir): 31 | # enumerate local files recursively 32 | for root, dirs, files in tqdm(os.walk(doc_html_dir)): 33 | for filename in files: 34 | # construct the full local path 35 | local_path = os.path.join(root, filename) 36 | 37 | # construct the full path 38 | s3_path = os.path.relpath(local_path, doc_html_dir) 39 | try: 40 | bucket.upload_file(local_path, s3_path, ExtraArgs={ 41 | "ContentType": get_mime_type(local_path) 42 | }) 43 | except Exception: 44 | print(f"Could not upload {s3_path}.") 45 | 46 | 47 | if __name__ == "__main__": 48 | _dir = os.path.dirname(os.path.abspath(__file__)) 49 | html_dir = os.path.join(_dir, "..", "docs", "_build", "html") 50 | 51 | if not os.path.exists(html_dir): 52 | print("Docuementation HTML not found. Were the docs built?") 53 | sys.exit(1) 54 | 55 | s3 = boto3.resource('s3') 56 | b = get_flambe_bucket(s3) 57 | upload_documentation(b, html_dir) 58 | -------------------------------------------------------------------------------- /scripts/publish_documentation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | set -e 4 | 5 | BASE=${PWD} 6 | ENV_DIR=${BASE}/flambe-docu-env_${BUILD_NUMBER} 7 | 8 | virtualenv -p python3.6 ${ENV_DIR} 9 | source ${ENV_DIR}/bin/activate 10 | 11 | pip install . 12 | pip install -r docs/requirements.txt 13 | 14 | cd docs 15 | make html 16 | cd .. 17 | 18 | pip install boto3 19 | pip install tqdm 20 | 21 | python scripts/deploy_documentation.py ${BUCKET_NAME} 22 | 23 | deactivate 24 | 25 | rm -Rf ${ENV_DIR} 26 | unset ENV_DIR 27 | unset BASE 28 | -------------------------------------------------------------------------------- /scripts/publish_pypi.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | BASE=${PWD} 4 | ENV_DIR=${BASE}/flambe-release-env_${BUILD_NUMBER} 5 | 6 | set -x 7 | 8 | virtualenv -p python3.6 ${ENV_DIR} --system-site-packages 9 | source ${ENV_DIR}/bin/activate 10 | 11 | TAG_VERSION=$(echo "${GIT_BRANCH}" | cut -d / -f 3) 12 | echo $TAG_VERSION 13 | echo "${GIT_BRANCH}" 14 | echo "${TAG_VERSION}" 15 | 16 | pip install twine 17 | 18 | TAG_VERSION=${TAG_VERSION} python setup.py sdist bdist_wheel 19 | twine upload --repository-url ${PYPI_REPO_URL} dist/* 20 | 21 | deactivate 22 | 23 | rm -Rf ${ENV_DIR} 24 | unset ENV_DIR 25 | unset BASE 26 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | """ 4 | setup.py 5 | """ 6 | 7 | from setuptools import setup, find_packages 8 | from typing import Dict 9 | import os 10 | 11 | 12 | NAME = "flambe" 13 | AUTHOR = "ASAPP Inc." 14 | EMAIL = "flambe@asapp.com" 15 | DESCRIPTION = "Pytorch based library for robust prototyping, standardized \ 16 | benchmarking, and effortless experiment management" 17 | 18 | 19 | def readme(): 20 | with open('README-pypi.rst', encoding='utf-8') as f: 21 | return f.read() 22 | 23 | 24 | def required(): 25 | with open('requirements.txt') as f: 26 | return f.read().splitlines() 27 | 28 | 29 | # So that we don't import flambe. 30 | VERSION: Dict[str, str] = {} 31 | with open("flambe/version.py", "r") as version_file: 32 | exec(version_file.read(), VERSION) 33 | 34 | setup( 35 | 36 | name=NAME, 37 | version=os.environ.get("TAG_VERSION", VERSION['VERSION']), 38 | 39 | description=DESCRIPTION, 40 | long_description=readme(), 41 | long_description_content_type="text/x-rst; charset=UTF-8", 42 | 43 | # Author information 44 | author=AUTHOR, 45 | author_email=EMAIL, 46 | 47 | # What is packaged here. 48 | packages=find_packages(exclude=("tests", "tests.*", "extensions")), 49 | scripts=[ 50 | 'bin/flambe', 51 | 'bin/flambe-site' 52 | ], 53 | 54 | install_requires=required(), 55 | include_package_data=True, 56 | 57 | python_requires='>=3.6.1', 58 | zip_safe=True 59 | 60 | ) 61 | -------------------------------------------------------------------------------- /sonar-project.properties: -------------------------------------------------------------------------------- 1 | sonar.organization=asappresearch 2 | sonar.projectKey=asappresearch_flambe 3 | 4 | 5 | # relative paths to source directories. More details and properties are described 6 | # in https://sonarcloud.io/documentation/project-administration/narrowing-the-focus/ 7 | sonar.sources=flambe 8 | sonar.tests=tests 9 | sonar.python.xunit.reportPath=artifacts/pytest/junit-pytest-report.xml 10 | sonar.python.coverage.reportPaths=artifacts/pytest/full_path_coverage.xml 11 | sonar.python.coveragePlugin=cobertura 12 | -------------------------------------------------------------------------------- /tests/.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | # Regexes for lines to exclude from consideration 3 | exclude_lines = 4 | # Have to re-enable the standard pragma 5 | pragma: no cover 6 | 7 | # Don't complain about missing debug-only code: 8 | def __repr__ 9 | if self\.debug 10 | 11 | # Don't complain if tests don't hit defensive assertion code: 12 | raise AssertionError 13 | raise NotImplementedError 14 | 15 | # Don't complain if non-runnable code isn't run: 16 | if 0: 17 | if __name__ == .__main__.: 18 | 19 | include = */flambe/* 20 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | The [pytest](https://docs.pytest.org/en/latest/contents.html#toc) framework will be used for this project. 2 | This document will highlight the main concepts to keep in mind. For details, more nuanced cases, or examples, click through the `[Read more]` sections. 3 | 4 | 5 | ##### Key points 6 | * All files should be named test\_\*.py or \*\_test.py. All test functions should also be prefaced with test\_\*. \[[Read more](https://docs.pytest.org/en/latest/goodpractices.html#test-discovery)\] 7 | * Use fixtures for shared functions and objects. \[[Read more](https://docs.pytest.org/en/latest/fixture.html#fixture)\] 8 | * Parameterize functions to avoid writing duplciate code. \[[Read more](https://docs.pytest.org/en/latest/parametrize.html)\] 9 | * Use attributes to mark special test cases. \[[Read more](https://docs.pytest.org/en/latest/mark.html#mark)\] 10 | * This is the dedicated `test` directory. It will exactly mimic the layout of the main directory. Tests for any specific module will be placed within its corresponding folder/sub-folders within this directory. \[[Read more](https://docs.pytest.org/en/latest/goodpractices.html#tests-outside-application-code)\] 11 |
For example: 12 | ``` 13 | module1/ 14 | module2/ 15 | file1.py 16 | tests/ 17 | module1/ 18 | module2/ 19 | test_file1.py 20 | ``` 21 | * Any data files for tests will live in the `data` directory. 22 | * There should be one concept per test. This might take the form of multiple assertions, but only one main thing should be tested. It needs to be clear from the tests what exactly failed. 23 | 24 | ##### Running 25 | 26 | Simply call `pytest [file/directory]` via command line. \[[Read more](https://docs.pytest.org/en/latest/usage.html)\] 27 | 28 | ##### Plugins 29 | * [pytest-pep8](https://pypi.org/project/pytest-pep8/): check pep8 compliance for all files 30 | * [pytest-cov](https://pypi.org/project/pytest-cov/): automatically produce coverage reports 31 | * [pytest-mock](https://github.com/pytest-dev/pytest-mock): monkeypatch fixtures 32 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | 4 | 5 | @pytest.fixture 6 | def top_level(): 7 | parent = os.path.dirname 8 | return parent(parent(os.path.abspath(__file__))) 9 | -------------------------------------------------------------------------------- /tests/data/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/data/.gitignore -------------------------------------------------------------------------------- /tests/data/dummy_configs/config.txt: -------------------------------------------------------------------------------- 1 | This is a text file 2 | -------------------------------------------------------------------------------- /tests/data/dummy_configs/wrong_config.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: sst-text-classification-lstm 4 | 5 | save_path: flambe_text_classification 6 | 7 | schedulesr: 8 | b1: !ray.HyperBandScheduler 9 | reward_attr: dev_metric 10 | 11 | redcue: 12 | b1: 1 13 | 14 | pipeline: 15 | b0: !TCProcessor 16 | dataset: !SSTDataset 17 | b1: !Trainer 18 | train_data: !link b0.train 19 | dev_data: !link b0.dev 20 | train_sampler: !BaseSampler 21 | batch_size: 32 22 | dev_sampler: !BaseSampler 23 | batch_size: 512 24 | model: !TextClassifier 25 | embedding: !EmbeddingEncoder 26 | input_size: !link b0.vocab_size 27 | embedding_size: 300 28 | encoder: !RNNEncoder 29 | input_size: 300 30 | rnn_type: lstm 31 | n_layers: !g [2, 3, 4] 32 | hidden_size: 256 33 | pooling: last 34 | decoder: !SoftmaxDecoder 35 | input_size: !link b1.model.encoder.rnn.hidden_size 36 | output_size: !link b0.num_labels 37 | optimizer: !torch.Adam 38 | params: !link b1.model.trainable_params 39 | max_steps: 2 40 | iter_per_step: 2 41 | b2: !Evaluator 42 | eval_data: !link b0.dev 43 | model: !link b1.model 44 | eval_sampler: !BaseSampler 45 | batch_size: 512 46 | -------------------------------------------------------------------------------- /tests/data/dummy_embeddings/test.txt: -------------------------------------------------------------------------------- 1 | this 0.1 0.2 0.3 0.4 2 | is 0.5 0.6 0.7 0.8 3 | a 0.9 0.1 0.2 0.3 4 | test 0.4 0.5 0.6 0.7 5 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/inference/flambe_inference/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe_inference.obj import DummyInferenceEngine 2 | 3 | 4 | __all__ = ["DummyInferenceEngine"] 5 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/inference/flambe_inference/obj.py: -------------------------------------------------------------------------------- 1 | from flambe import Component 2 | from flambe.nlp.classification import TextClassifier 3 | 4 | 5 | class DummyInferenceEngine(Component): 6 | 7 | def __init__(self, model: TextClassifier) -> None: 8 | self.model = model 9 | 10 | def run(self): 11 | print(self.model) 12 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/inference/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | setup( 5 | name='flambe-inference', 6 | version=0.0, 7 | description='Testing a script', 8 | packages=find_packages(), 9 | install_requires=['argparse'] 10 | ) 11 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/runnable/flambe_runnable/__init__.py: -------------------------------------------------------------------------------- 1 | from flambe_runnable.runnable import DummyRunnable 2 | 3 | 4 | __all__ = ["DummyRunnable"] 5 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/runnable/flambe_runnable/runnable.py: -------------------------------------------------------------------------------- 1 | from flambe.runnable import Runnable 2 | 3 | 4 | class DummyRunnable(Runnable): 5 | 6 | def run(self, **kwargs) -> None: 7 | print("Dummy Runnable") 8 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/runnable/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | setup( 5 | name='flambe-runnable', 6 | version=0.0, 7 | description='Dummy Runnable', 8 | packages=find_packages(), 9 | ) 10 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/script/flambe_script/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/data/dummy_extensions/script/flambe_script/__init__.py -------------------------------------------------------------------------------- /tests/data/dummy_extensions/script/flambe_script/train.py: -------------------------------------------------------------------------------- 1 | from flambe import log 2 | import argparse 3 | 4 | 5 | def my_script(arg1: str, arg2: str, kwarg1: int, kwarg2: str): 6 | """Test script""" 7 | for i in range(10): 8 | msg = f'arg1: {arg1}, ' 9 | msg += f'arg2: {arg2}, ' 10 | msg += f'kwarg1: {kwarg1}, ' 11 | msg += f'kwarg2: {kwarg2}' 12 | log(msg, float(i), global_step=i) 13 | 14 | 15 | if __name__ == "__main__": 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('arg1', type=str) 18 | parser.add_argument('arg2', type=str) 19 | parser.add_argument('--kwarg1', type=int) 20 | parser.add_argument('--kwarg2', type=str) 21 | args = parser.parse_args() 22 | my_script(args.arg1, args.arg2, args.kwarg1, args.kwarg2) 23 | -------------------------------------------------------------------------------- /tests/data/dummy_extensions/script/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | setup( 5 | name='flambe-script', 6 | version=0.0, 7 | description='Testing a script', 8 | packages=find_packages(), 9 | install_requires=['argparse'] 10 | ) 11 | -------------------------------------------------------------------------------- /tests/integration/end2end/chain_intrablock.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: basic-example 4 | 5 | pipeline: 6 | encoder: !PooledRNNEncoder 7 | input_size: 300 8 | rnn_type: lstm 9 | n_layers: 2 10 | hidden_size: 256 11 | dataset: !TabularDataset.from_path 12 | train_path: {top_level}/tests/data/dummy_tabular/train.csv 13 | val_path: {top_level}/tests/data/dummy_tabular/val.csv 14 | test_path: {top_level}/tests/data/dummy_tabular/test.csv 15 | sep: ',' 16 | transform: 17 | text: !TextField 18 | label: !LabelField 19 | model: !TextClassifier 20 | embedder: !Embedder 21 | embedding: !torch.Embedding 22 | num_embeddings: !@ dataset.text.vocab_size 23 | embedding_dim: 300 24 | encoder: !@ encoder 25 | output_layer: !SoftmaxLayer 26 | input_size: !@ model[embedder].encoder.rnn.hidden_size 27 | output_size: !@ dataset.label.vocab_size 28 | b1: !Trainer 29 | dataset: !@ dataset 30 | train_sampler: !BaseSampler 31 | val_sampler: !BaseSampler 32 | model: !@ model 33 | loss_fn: !torch.NLLLoss 34 | metric_fn: !Accuracy 35 | optimizer: !torch.Adam 36 | params: !@ b1[model].trainable_params 37 | max_steps: 2 38 | iter_per_step: 2 39 | b2: !Trainer 40 | dataset: !@ b1.dataset 41 | train_sampler: !BaseSampler 42 | val_sampler: !BaseSampler 43 | model: !TextClassifier 44 | embedder: !Embedder 45 | embedding: !torch.Embedding 46 | num_embeddings: !@ b1.model.embedder.embedding.num_embeddings 47 | embedding_dim: 300 48 | encoder: !PooledRNNEncoder 49 | input_size: !@ b2[model][embedder][embedding].embedding_dim 50 | # input_size: !g 51 | # - !@ b2[model][embedder][embedding].embedding_dim 52 | # - !@ b1.model.embedder.embedding.embedding_dim 53 | rnn_type: lstm 54 | n_layers: !g [2, 3] 55 | hidden_size: !@ b2[model][embedder][encoder][input_size] 56 | output_layer: !SoftmaxLayer 57 | input_size: !@ b2[model][embedder][encoder].rnn.hidden_size 58 | output_size: !@ dataset.label.vocab_size 59 | loss_fn: !torch.NLLLoss 60 | metric_fn: !Accuracy 61 | optimizer: !torch.Adam 62 | params: !@ b2[model].trainable_params 63 | max_steps: 2 64 | iter_per_step: 2 65 | b3: !Evaluator 66 | dataset: !@ b2.dataset 67 | model: !@ b2.model 68 | metric_fn: !Accuracy 69 | eval_sampler: !BaseSampler 70 | batch_size: 512 71 | 72 | schedulers: # Define how to schedule variants based on a metric 73 | b2: !ray.HyperBandScheduler 74 | reward_attr: dev_metric # This should be an attribute on the Trainer class 75 | reduce: # Only use the best variant in subsequent blocks 76 | b2: 1 77 | -------------------------------------------------------------------------------- /tests/integration/end2end/image.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: image_classification 4 | 5 | pipeline: 6 | model: !ImageClassifier 7 | encoder: !CNNEncoder 8 | input_channels: 1 9 | channels: [1] 10 | kernel_size: [5] 11 | output_layer: !MLPEncoder 12 | input_size: 576 13 | n_layers: 2 14 | output_size: 10 15 | output_activation: !torch.LogSoftmax 16 | hidden_size: 128 17 | 18 | train: !Trainer 19 | dataset: !MNISTDataset 20 | train_sampler: !BaseSampler 21 | batch_size: 64 22 | downsample: 0.01 23 | val_sampler: !BaseSampler 24 | downsample: 0.01 25 | model: !@ model 26 | loss_fn: !torch.NLLLoss 27 | metric_fn: !Accuracy 28 | optimizer: !torch.Adam 29 | params: !@ train[model].trainable_params 30 | max_steps: 1 31 | iter_per_step: 1 -------------------------------------------------------------------------------- /tests/integration/end2end/lm.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: language_modeling 4 | save_path: . 5 | 6 | pipeline: 7 | 1_train: !Trainer 8 | dataset: !TabularDataset.from_path 9 | train_path: {top_level}/tests/data/dummy_tabular/train.csv 10 | val_path: {top_level}/tests/data/dummy_tabular/val.csv 11 | sep: ',' 12 | columns: [text] 13 | transform: 14 | text: !LMField 15 | eos_token: 16 | train_sampler: !BaseSampler 17 | batch_size: 64 18 | val_sampler: !BaseSampler 19 | batch_size: 64 20 | model: !LanguageModel 21 | embedder: !Embedder 22 | embedding: !torch.Embedding 23 | num_embeddings: !@ 1_train[dataset].text.vocab_size 24 | embedding_dim: 300 25 | encoder: !RNNEncoder 26 | input_size: 300 27 | rnn_type: lstm 28 | n_layers: 2 29 | hidden_size: 256 30 | output_layer: !SoftmaxLayer 31 | input_size: !@ 1_train[model][embedder].encoder.rnn.hidden_size 32 | output_size: !@ 1_train[dataset].text.vocab_size 33 | loss_fn: !torch.NLLLoss 34 | metric_fn: !Perplexity 35 | optimizer: !torch.Adam 36 | params: !@ 1_train[model].trainable_params 37 | lower_is_better: True 38 | max_steps: 1 39 | iter_per_step: 1 -------------------------------------------------------------------------------- /tests/integration/end2end/random_tag.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: text_classification 4 | 5 | pipeline: 6 | b1: !Trainer 7 | dataset: !TabularDataset.from_path 8 | train_path: {top_level}/tests/data/dummy_tabular/train.csv 9 | val_path: {top_level}/tests/data/dummy_tabular/val.csv 10 | sep: ',' 11 | transform: 12 | text: !TextField 13 | label: !LabelField 14 | train_sampler: !BaseSampler 15 | val_sampler: !BaseSampler 16 | model: !TextClassifier 17 | embedder: !Embedder 18 | embedding: !torch.Embedding 19 | num_embeddings: !@ b1[dataset].text.vocab_size 20 | embedding_dim: 300 21 | encoder: !PooledRNNEncoder 22 | input_size: 300 23 | rnn_type: lstm 24 | n_layers: !s [1, 5, 2] 25 | hidden_size: 256 26 | output_layer: !SoftmaxLayer 27 | input_size: !@ b1[model][embedder].encoder.rnn.hidden_size 28 | output_size: !@ b1[dataset].label.vocab_size 29 | loss_fn: !torch.NLLLoss 30 | metric_fn: !Accuracy 31 | optimizer: !torch.Adam 32 | params: !@ b1[model].trainable_params 33 | max_steps: 1 34 | iter_per_step: 1 35 | -------------------------------------------------------------------------------- /tests/integration/end2end/script.yaml: -------------------------------------------------------------------------------- 1 | flambe_script: {top_level}/tests/data/dummy_extensions/script 2 | --- 3 | !Experiment 4 | 5 | name: script_test 6 | 7 | pipeline: 8 | stage_0: !Script 9 | script: flambe_script.train 10 | args: ['arg1', 'arg2'] 11 | kwargs: 12 | kwarg1: !g [1, 5] # Run a grid search over any arguments to your script 13 | kwarg2: 'foo' 14 | -------------------------------------------------------------------------------- /tests/integration/end2end/tc.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: text_classification 4 | save_path: . 5 | 6 | pipeline: 7 | b1: !Trainer 8 | dataset: !TabularDataset.from_path 9 | train_path: {top_level}/tests/data/dummy_tabular/train.csv 10 | val_path: {top_level}/tests/data/dummy_tabular/val.csv 11 | sep: ',' 12 | transform: 13 | text: !TextField 14 | label: !LabelField 15 | train_sampler: !BaseSampler 16 | val_sampler: !BaseSampler 17 | model: !TextClassifier 18 | embedder: !Embedder 19 | embedding: !torch.Embedding 20 | num_embeddings: !@ b1[dataset].text.vocab_size 21 | embedding_dim: 300 22 | encoder: !PooledRNNEncoder 23 | input_size: 300 24 | rnn_type: lstm 25 | n_layers: 2 26 | hidden_size: 256 27 | output_layer: !SoftmaxLayer 28 | input_size: !@ b1[model][embedder].encoder.rnn.hidden_size 29 | output_size: !@ b1[dataset].label.vocab_size 30 | loss_fn: !torch.NLLLoss 31 | metric_fn: !Accuracy 32 | optimizer: !torch.Adam 33 | params: !@ b1[model].trainable_params 34 | max_steps: 1 35 | iter_per_step: 1 36 | extra_validation_metrics: 37 | - !@ b1[metric_fn] 38 | - !Accuracy 39 | extra_training_metrics: 40 | - !Accuracy 41 | -------------------------------------------------------------------------------- /tests/integration/end2end/transformer.yaml: -------------------------------------------------------------------------------- 1 | !Experiment 2 | 3 | name: transformer 4 | 5 | pipeline: 6 | 7 | train: !Trainer 8 | dataset: !TabularDataset.from_path 9 | train_path: {top_level}/tests/data/dummy_tabular/train.csv 10 | val_path: {top_level}/tests/data/dummy_tabular/val.csv 11 | sep: ',' 12 | transform: 13 | text: !TextField 14 | lower: True 15 | label: !LabelField 16 | model: !TextClassifier 17 | embedder: !Embedder 18 | embedding: !Embeddings 19 | num_embeddings: !@ train[dataset].text.vocab_size 20 | embedding_dim: 300 21 | positional_encoding: True 22 | positional_learned: True 23 | embedding_dropout: 0.2 24 | encoder: !TransformerEncoder 25 | input_size: 300 26 | d_model: 300 27 | num_layers: 4 28 | nhead: 1 29 | dim_feedforward: 300 30 | dropout: 0.2 31 | pooling: !LastPooling 32 | output_layer: !SoftmaxLayer 33 | take_log: True 34 | input_size: 300 35 | output_size: !@ train[dataset].label.vocab_size 36 | train_sampler: !BaseSampler 37 | shuffle: True 38 | val_sampler: !BaseSampler 39 | shuffle: False 40 | loss_fn: !torch.NLLLoss 41 | metric_fn: !Accuracy 42 | optimizer: !torch.Adam 43 | params: !@ train[model].trainable_params 44 | max_steps: 1 45 | iter_per_step: 1 46 | -------------------------------------------------------------------------------- /tests/integration/test_examples.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from flambe.compile import Schema 3 | from flambe.experiment import Experiment 4 | from tempfile import TemporaryDirectory as tmpdir 5 | from tempfile import NamedTemporaryFile as tmpfile 6 | import subprocess 7 | import os 8 | from flambe.compile import yaml 9 | 10 | 11 | def _reduce_iterations(d): 12 | """Recursively update any iteration's config 13 | 14 | """ 15 | for k, v in d.items(): 16 | if k == 'max_steps' or k == 'iter_per_step' or k == 'epoch_per_step': 17 | d[k] = 1 18 | 19 | elif isinstance(v, Schema): 20 | _reduce_iterations(v) 21 | 22 | 23 | def _preprocess_experiment(fname, save_path): 24 | content = list(yaml.load_all(open(fname))) 25 | experiment = content[-1] 26 | if isinstance(experiment, Experiment): 27 | experiment.set_serializable_attr("save_path", save_path) 28 | _reduce_iterations(experiment.pipeline) 29 | return content 30 | 31 | return None 32 | 33 | 34 | def run_experiments(base, **kwargs): 35 | """Run all experiments found in base param. 36 | and check that flambe executes without errors. 37 | 38 | Before running the configs, it updates the save_path to 39 | be a tempdir and updates (potentially) the iteration's 40 | params (if found) to be 1. 41 | 42 | """ 43 | for fname in os.listdir(base): 44 | full_f = os.path.join(base, fname) 45 | if os.path.isfile(full_f) and fname.endswith('yaml'): 46 | with tmpdir() as d, tmpfile() as f, tmpfile('w') as t: 47 | content = open(full_f).read().format(**kwargs) 48 | t.write(content) 49 | t.flush() 50 | new_exp = _preprocess_experiment(t.name, d) 51 | if new_exp: 52 | yaml.dump_all(new_exp, f) 53 | ret = subprocess.run(['flambe', f.name, '-i']) 54 | assert ret.returncode == 0 55 | 56 | 57 | @pytest.mark.end2end 58 | def test_end2end_experiments(top_level): 59 | """Runs all experiments found in the integration's 60 | folder 61 | 62 | """ 63 | tests_base = os.path.dirname(os.path.dirname(__file__)) 64 | base = os.path.join(tests_base, "integration", "end2end") 65 | run_experiments(base, top_level=top_level) 66 | 67 | 68 | @pytest.mark.examples 69 | def test_examples_experiments(): 70 | """Runs all experiments found in top level examples 71 | folder 72 | 73 | """ 74 | tests_base = os.path.dirname(os.path.dirname(__file__)) 75 | base = os.path.join(os.path.dirname(tests_base), "examples") 76 | run_experiments(base) 77 | -------------------------------------------------------------------------------- /tests/integration/test_resources_experiment.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import subprocess 3 | import os 4 | 5 | 6 | def test_resources_config(): 7 | config = """ 8 | !Experiment 9 | 10 | name: random 11 | save_path: {} 12 | 13 | resources: 14 | train: {} 15 | 16 | pipeline: 17 | dataset: !TabularDataset.from_path 18 | train_path: !@ train 19 | 20 | """ 21 | 22 | with tempfile.TemporaryDirectory() as d, tempfile.NamedTemporaryFile(mode="w", suffix=".yaml") as f: 23 | test_data_folder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data') 24 | train_data = os.path.join(test_data_folder, 'dummy_tabular', 'train.csv') 25 | 26 | exp = config.format(d, train_data) 27 | f.write(exp) 28 | f.flush() 29 | ret = subprocess.run(['flambe', f.name]) 30 | assert ret.returncode == 0 31 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | typed-ast>=1.3.1 2 | flake8>=3.7.7 3 | mypy==0.761 4 | mypy-extensions>=0.4.1 5 | pytest>=4.0.2 6 | pytest-bandit>=0.3.1 7 | pytest-cache>=1.0 8 | pytest-cov>=2.4.0 9 | coverage>=5.0.3 10 | pytest-flake8>=1.0.4 11 | pytest-mock>=1.10.0 12 | pytest-pep8>=1.0.6 13 | moto>=1.3.8 14 | safety>=1.8.5 15 | bandit>=1.6.2 16 | responses>=0.10.6 17 | flake8-formatter-junit-xml==0.0.6 18 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/__init__.py -------------------------------------------------------------------------------- /tests/unit/compile/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/compile/__init__.py -------------------------------------------------------------------------------- /tests/unit/compile/test_extensions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from flambe.compile import extensions as exts 4 | 5 | 6 | def test_download_extensions(): 7 | extensions = { 8 | 'ext': './local/file', 9 | 'ext1': 'other/local/file', 10 | 'ext2': 'other', 11 | 'ext2': 'pypi==1.12.1', 12 | } 13 | 14 | ret = exts.download_extensions(extensions, None) 15 | for k, v in extensions.items(): 16 | assert k in ret 17 | assert v == ret[k] 18 | 19 | 20 | def test_is_installed_module(): 21 | assert exts.is_installed_module("pytest") is True 22 | assert exts.is_installed_module("some_inexistent_package_0987654321") is False 23 | -------------------------------------------------------------------------------- /tests/unit/compile/test_utils.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import mock 3 | 4 | 5 | from flambe.compile.utils import write_deps 6 | 7 | 8 | def test_write_deps(): 9 | dummy_dependencies = ['numpy==1.2.3', 'pip~=1.1.1', 'some_other-random dep'] 10 | with tempfile.NamedTemporaryFile() as tmpfile: 11 | write_deps(tmpfile.name, dummy_dependencies) 12 | 13 | assert tmpfile.read() == b'numpy==1.2.3\npip~=1.1.1\nsome_other-random dep' 14 | 15 | 16 | @mock.patch('flambe.compile.utils.get_frozen_deps') 17 | def test_write_deps_default(mock_deps): 18 | mock_deps.return_value = ['numpy==1.2.3', 'pip~=1.1.1', 'some_other-random dep'] 19 | with tempfile.NamedTemporaryFile() as tmpfile: 20 | write_deps(tmpfile.name) 21 | assert tmpfile.read() == b'numpy==1.2.3\npip~=1.1.1\nsome_other-random dep' 22 | mock_deps.assert_called_once() 23 | -------------------------------------------------------------------------------- /tests/unit/dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/dataset/__init__.py -------------------------------------------------------------------------------- /tests/unit/experiment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/experiment/__init__.py -------------------------------------------------------------------------------- /tests/unit/experiment/test_experiment.py: -------------------------------------------------------------------------------- 1 | from flambe.experiment import Experiment 2 | from flambe.runnable import RemoteEnvironment 3 | 4 | import mock 5 | import pytest 6 | 7 | 8 | @pytest.fixture 9 | def get_experiment(): 10 | def wrapped(**kwargs): 11 | return Experiment(name=kwargs.get('name', 'test'), 12 | pipeline=kwargs.get('pipeline', {}), 13 | resume=kwargs.get('resume', False), 14 | devices=kwargs.get('devices', None), 15 | save_path=kwargs.get('save_path', None), 16 | resources=kwargs.get('resources', None), 17 | search=kwargs.get('search', None), 18 | schedulers=kwargs.get('schedulers', None), 19 | reduce=kwargs.get('reduce', None), 20 | env=kwargs.get('env', None), 21 | max_failures=kwargs.get('max_failures', 1), 22 | stop_on_failure=kwargs.get('stop_on_failure', True), 23 | merge_plot=kwargs.get('merge_plot', True), 24 | user_provider=kwargs.get('user_provider', None)) 25 | return wrapped 26 | 27 | 28 | @pytest.fixture 29 | def get_env(): 30 | def wrapped(**kwargs): 31 | env = RemoteEnvironment( 32 | key=kwargs.get('key', 'my-key'), 33 | orchestrator_ip=kwargs.get('orchestrator_ip', '1.1.1.1'), 34 | factories_ips=kwargs.get('factories_ips', ['1.1.1.1']), 35 | user=kwargs.get('user', 'ubuntu'), 36 | local_user=kwargs.get('local_user', 'some_user'), 37 | ) 38 | return env 39 | return wrapped 40 | 41 | 42 | def test_get_user(get_experiment, get_env): 43 | exp = get_experiment(user_provider=lambda: "foobar") 44 | assert exp.get_user() == 'foobar' 45 | 46 | exp = get_experiment(env=get_env(local_user='barfoo')) 47 | assert exp.get_user() == 'barfoo' 48 | -------------------------------------------------------------------------------- /tests/unit/experiment/test_options.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from flambe.experiment import options 4 | 5 | 6 | def test_g_tag(): 7 | l = list(range(5)) 8 | grid = options.GridSearchOptions.from_sequence(l) 9 | 10 | for i, each in enumerate(grid): 11 | assert each == l[i] 12 | 13 | 14 | def test_s_tag(): 15 | # Test k 16 | 17 | for k in range(5, 1): 18 | l = [1, 10, k] 19 | opts = options.SampledUniformSearchOptions.from_sequence(l) 20 | 21 | assert len(opts) == k 22 | 23 | 24 | def test_s_tag_2(): 25 | # Test int sampling 26 | l = [1, 10, 1, 10] 27 | 28 | opts = options.SampledUniformSearchOptions.from_sequence(l) 29 | 30 | for each in opts: 31 | assert isinstance(each, int) 32 | 33 | 34 | 35 | def test_s_tag_3(): 36 | # Test float sampling 37 | 38 | l = [1, 10.1, 1] 39 | 40 | opts = options.SampledUniformSearchOptions.from_sequence(l) 41 | 42 | for each in opts: 43 | assert isinstance(each, float) 44 | 45 | l = [1.1, 10.1, 1] 46 | 47 | opts = options.SampledUniformSearchOptions.from_sequence(l) 48 | 49 | for each in opts: 50 | assert isinstance(each, float) 51 | 52 | 53 | l = [1.1, 10, 1] 54 | 55 | opts = options.SampledUniformSearchOptions.from_sequence(l) 56 | 57 | for each in opts: 58 | assert isinstance(each, float) 59 | 60 | 61 | def test_s_tag_4(): 62 | # Test decimals 63 | d = 5 64 | l = [1, 10.1, 5, d] 65 | 66 | opts = options.SampledUniformSearchOptions.from_sequence(l) 67 | 68 | for each in opts: 69 | decimals = len(str(each)[str(each).find('.') + 1:]) 70 | assert decimals <= d 71 | 72 | 73 | def test_s_tag_incorrect_params(): 74 | # Test decimals 75 | l = [1, 10.1, 5, 1, 1] 76 | 77 | with pytest.raises(ValueError): 78 | options.SampledUniformSearchOptions.from_sequence(l) 79 | 80 | 81 | l = [1] 82 | 83 | with pytest.raises(ValueError): 84 | options.SampledUniformSearchOptions.from_sequence(l) 85 | -------------------------------------------------------------------------------- /tests/unit/field/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/field/__init__.py -------------------------------------------------------------------------------- /tests/unit/learn/test_trainer.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from torch.nn import NLLLoss 4 | from torch.optim import Adam 5 | 6 | from flambe.learn import train 7 | 8 | from flambe.dataset import Dataset 9 | from flambe.compile import Schema, State, Component, Link 10 | from flambe.learn.utils import select_device 11 | from flambe.nn import Module # type: ignore[attr-defined] 12 | from flambe.sampler import BaseSampler 13 | from flambe.metric import Metric 14 | from flambe.logging import log 15 | 16 | 17 | class DummyDataset(Dataset): 18 | @property 19 | def train(self): 20 | return [['hello']] 21 | 22 | @property 23 | def val(self): 24 | pass 25 | 26 | @property 27 | def test(self): 28 | pass 29 | 30 | 31 | class DummyModel(Module): 32 | pass 33 | 34 | 35 | @pytest.fixture() 36 | def trainer(): 37 | return train.Trainer( 38 | dataset=DummyDataset(), 39 | train_sampler=BaseSampler(), 40 | val_sampler=BaseSampler(), 41 | model=DummyModel(), 42 | loss_fn=NLLLoss(), 43 | metric_fn=NLLLoss(), 44 | optimizer=Adam, 45 | extra_validation_metrics=[NLLLoss()] * 3 46 | ) 47 | 48 | 49 | def test_validation_metrics_property(trainer): 50 | assert trainer.validation_metrics == trainer.extra_validation_metrics 51 | assert len(trainer.validation_metrics) == 3 52 | -------------------------------------------------------------------------------- /tests/unit/metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/metrics/__init__.py -------------------------------------------------------------------------------- /tests/unit/metrics/test_loss.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | 4 | from flambe.metric import MultiLabelCrossEntropy, MultiLabelNLLLoss 5 | 6 | 7 | def test_cross_entropy_one_hot(): 8 | """Test cross entropy loss when one hot""" 9 | y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]]) 10 | y_true = torch.tensor([[1, 0], [1, 0]]) 11 | 12 | loss = MultiLabelCrossEntropy() 13 | assert abs(loss(y_pred, y_true).item() - 0.70429) < 1e-2 14 | 15 | 16 | def test_nllloss_one_hot(): 17 | """Test negative log likelihood loss when one hot""" 18 | y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]]) 19 | y_true = torch.tensor([[1, 0], [1, 0]]) 20 | 21 | loss = MultiLabelNLLLoss() 22 | assert abs(loss(y_pred, y_true).item() + 0.55) < 1e-2 23 | -------------------------------------------------------------------------------- /tests/unit/model/test_logistic_regression.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from flambe.model import LogisticRegression 4 | from torch import Tensor 5 | from numpy import isclose 6 | 7 | NUMERIC_PRECISION = 1e-2 8 | 9 | 10 | def test_number_of_parameters(): 11 | regression = LogisticRegression(2) 12 | assert regression.num_parameters() == 3 13 | 14 | regression = LogisticRegression(10) 15 | assert regression.num_parameters() == 11 16 | 17 | 18 | def test_forward_pass_with_target_param(): 19 | regression = LogisticRegression(2) 20 | forward = regression(Tensor([[1, 4]]), target=Tensor([[5, 6]])) 21 | transformed_target = forward[1] 22 | assert transformed_target.dtype == torch.float32 23 | assert isclose(transformed_target.numpy(), [[5., 6.]], rtol=NUMERIC_PRECISION).all() 24 | 25 | 26 | def test_forward_pass_is_sigmoid(): 27 | regression = LogisticRegression(2) 28 | 29 | set_parameters(regression, Tensor([[5, 6]]), Tensor([[3]])) 30 | 31 | assert isclose(regression(Tensor([[1, 4]])).item(), [[1.]], rtol=NUMERIC_PRECISION) 32 | assert isclose(regression(Tensor([[0, 0]])).item(), [[0.95]], rtol=NUMERIC_PRECISION) 33 | assert isclose(regression(Tensor([[1.5, 2.5]])).item(), [[1.]], rtol=NUMERIC_PRECISION) 34 | assert isclose(regression(Tensor([[-1, 0]])).item(), [[0.12]], rtol=NUMERIC_PRECISION) 35 | assert isclose(regression(Tensor([[-0.5, 0]])).item(), [[0.62]], rtol=NUMERIC_PRECISION) 36 | 37 | 38 | def set_parameters(model, weight, bias): 39 | """ 40 | This depends and has knowledge of the inner structure of objects. According 41 | to https://github.com/pytorch/pytorch/issues/565 there's no plan of adding 42 | a feature to inject weights/biases dependencies. 43 | Probably we can iterate this idea to patch this. 44 | """ 45 | for name, param in model.named_parameters(): 46 | if "bias" in name: 47 | param.data = bias 48 | if "weight" in name: 49 | param.data = weight 50 | -------------------------------------------------------------------------------- /tests/unit/nlp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/nlp/__init__.py -------------------------------------------------------------------------------- /tests/unit/nlp/classification/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/nlp/classification/__init__.py -------------------------------------------------------------------------------- /tests/unit/nlp/classification/test_tc_datasets.py: -------------------------------------------------------------------------------- 1 | from flambe.nlp.classification import SSTDataset, TRECDataset, NewsGroupDataset 2 | 3 | 4 | def test_dataset_sst(): 5 | dataset = SSTDataset() 6 | assert len(dataset.train) == 6920 7 | assert len(dataset.train[0]) == 2 8 | 9 | assert len(dataset.val) == 872 10 | assert len(dataset.val[0]) == 2 11 | 12 | assert len(dataset.test) == 1821 13 | assert len(dataset.test[0]) == 2 14 | 15 | 16 | def test_dataset_trec(): 17 | dataset = TRECDataset() 18 | assert len(dataset.train) == 5452 19 | assert len(dataset.train[0]) == 2 20 | 21 | assert len(dataset.val) == 0 22 | 23 | assert len(dataset.test) == 500 24 | assert len(dataset.test[0]) == 2 25 | 26 | def test_dataset_news(): 27 | try: 28 | from sklearn.datasets import fetch_20newsgroups 29 | 30 | dataset = NewsGroupDataset() 31 | 32 | assert len(dataset.train) == 11314 33 | assert len(dataset.train[0]) == 2 34 | 35 | assert len(dataset.val) == 0 36 | 37 | assert len(dataset.test) == 7532 38 | assert len(dataset.test[0]) == 2 39 | 40 | except ImportError: 41 | pass 42 | -------------------------------------------------------------------------------- /tests/unit/nlp/language_modeling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/nlp/language_modeling/__init__.py -------------------------------------------------------------------------------- /tests/unit/nlp/language_modeling/test_lm_datasets.py: -------------------------------------------------------------------------------- 1 | # TODO: uncomment after fixing circleCI 2 | 3 | # from flambe.nlp.language_modeling import PTBDataset, Wiki103, Enwiki8 4 | 5 | # def test_dataset_ptb(): 6 | # dataset = PTBDataset() 7 | # assert len(dataset.train) == 1 8 | # assert len(dataset.val) == 1 9 | # assert len(dataset.test) == 1 10 | 11 | 12 | # def test_dataset_wikitext103(): 13 | # dataset = Wiki103() 14 | # assert len(dataset.train) == 1 15 | # assert len(dataset.val) == 1 16 | # assert len(dataset.test) == 1 17 | 18 | 19 | # def test_dataset_enwiki8(): 20 | # dataset = Enwiki8() 21 | # assert len(dataset.train) == 1 22 | # assert len(dataset.val) == 1 23 | # assert len(dataset.test) == 1 24 | -------------------------------------------------------------------------------- /tests/unit/nn/test_mlp.py: -------------------------------------------------------------------------------- 1 | from flambe.nn import MLPEncoder 2 | import torch 3 | 4 | 5 | def test_forward_pass_1_layer(): 6 | input_size = 128 7 | output_size = 64 8 | batch_size = 32 9 | 10 | # no activation 11 | mlp_enc = MLPEncoder(input_size, output_size) 12 | _in = torch.rand((batch_size, input_size)) 13 | out = mlp_enc(_in) 14 | 15 | assert out.shape == torch.Size((batch_size, output_size)) 16 | 17 | # with activation 18 | mlp_enc = MLPEncoder( 19 | input_size, 20 | output_size, 21 | output_activation=torch.nn.ReLU(), 22 | ) 23 | _in = torch.rand((batch_size, input_size)) 24 | out = mlp_enc(_in) 25 | 26 | assert out.shape == torch.Size((batch_size, output_size)) 27 | 28 | 29 | def test_forward_pass_multi_layers(): 30 | input_size = 256 31 | hidden_size = 128 32 | output_size = 64 33 | batch_size = 32 34 | 35 | # no activation 36 | mlp_enc = MLPEncoder( 37 | input_size, 38 | output_size, 39 | n_layers=3, 40 | hidden_size=hidden_size, 41 | ) 42 | _in = torch.rand((batch_size, input_size)) 43 | out = mlp_enc(_in) 44 | 45 | assert out.shape == torch.Size((batch_size, output_size)) 46 | 47 | # with activation 48 | mlp_enc = MLPEncoder( 49 | input_size, 50 | output_size, 51 | n_layers=3, 52 | output_activation=torch.nn.ReLU(), 53 | hidden_size=hidden_size, 54 | hidden_activation=torch.nn.ReLU(), 55 | ) 56 | _in = torch.rand((batch_size, input_size)) 57 | out = mlp_enc(_in) 58 | 59 | assert out.shape == torch.Size((batch_size, output_size)) 60 | -------------------------------------------------------------------------------- /tests/unit/nn/test_rnn.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from flambe.nn import RNNEncoder 3 | import torch 4 | import mock 5 | 6 | 7 | @pytest.mark.parametrize("rnn_type", ['LSTM', 'random', '']) 8 | def test_invalid_type(rnn_type): 9 | with pytest.raises(ValueError): 10 | RNNEncoder( 11 | input_size=10, 12 | hidden_size=20, 13 | rnn_type=rnn_type 14 | ) 15 | 16 | 17 | def test_sru_kwargs(): 18 | rnn = RNNEncoder( 19 | input_size=10, 20 | hidden_size=20, 21 | rnn_type='sru', 22 | use_tanh=True 23 | ) 24 | 25 | for i in rnn.rnn.rnn_lst: 26 | assert i.activation == 'tanh' 27 | 28 | 29 | def test_invalid_sru_kwargs(): 30 | with pytest.raises(ValueError): 31 | _ = RNNEncoder( 32 | input_size=10, 33 | hidden_size=20, 34 | rnn_type='sru', 35 | use_tanh=True, 36 | some_invalid_param=123 37 | ) 38 | 39 | 40 | @pytest.mark.parametrize("rnn_type", ['lstm', 'gru', 'sru']) 41 | def test_forward_pass(rnn_type): 42 | input_size = 300 43 | output_size = 10 44 | seq_len = 20 45 | batch_len = 32 46 | rnn = RNNEncoder( 47 | input_size=input_size, 48 | hidden_size=output_size, 49 | n_layers=4, 50 | rnn_type=rnn_type) 51 | 52 | input_t = torch.rand(batch_len, seq_len, input_size) 53 | 54 | output, state = rnn(input_t) 55 | assert output.shape == torch.Size((batch_len, seq_len, output_size)) 56 | 57 | 58 | @pytest.mark.parametrize("rnn_type", ['lstm', 'gru', 'sru']) 59 | def test_transpose_on_forward_pass(rnn_type): 60 | input_size = 300 61 | output_size = 10 62 | rnn = RNNEncoder( 63 | input_size=input_size, 64 | hidden_size=output_size, 65 | n_layers=4, 66 | rnn_type=rnn_type) 67 | 68 | input_t = torch.rand(10, 10, input_size) 69 | 70 | input_t.transpose = mock.Mock(side_effect=input_t.transpose) 71 | output, state = rnn(input_t) 72 | 73 | input_t.transpose.assert_called() 74 | input_t.transpose.assert_called_with(0, 1) 75 | -------------------------------------------------------------------------------- /tests/unit/optim/test_scheduler.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from torch.optim import Adam 4 | 5 | from flambe.nn import MLPEncoder 6 | from flambe.optim import NoamScheduler, WarmupLinearScheduler 7 | 8 | 9 | def test_warmup_linear(): 10 | model = MLPEncoder(10, 10) 11 | optimizer = Adam(model.parameters(), lr=0.001) 12 | scheduler = WarmupLinearScheduler(optimizer, warmup=100, n_steps=200) 13 | 14 | assert scheduler.get_lr()[0] == 0 15 | scheduler.step() 16 | assert scheduler.get_lr()[0] == 1e-5 17 | 18 | for _ in range(99): 19 | scheduler.step() 20 | assert scheduler.get_lr()[0] == 1e-3 21 | for _ in range(100): 22 | scheduler.step() 23 | assert scheduler.get_lr()[0] == 0 24 | 25 | 26 | def test_noam(): 27 | model = MLPEncoder(10, 10) 28 | optimizer = Adam(model.parameters(), lr=0.001) 29 | scheduler = NoamScheduler(optimizer, warmup=100, d_model=512) 30 | 31 | assert scheduler.get_lr()[0] == 0 32 | scheduler.step() 33 | assert math.isclose(scheduler.get_lr()[0], 0.001 / (512 ** 0.5) / (100 ** 1.5)) 34 | -------------------------------------------------------------------------------- /tests/unit/remote/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/remote/__init__.py -------------------------------------------------------------------------------- /tests/unit/runnable/test_runnable.py: -------------------------------------------------------------------------------- 1 | from flambe.cluster import AWSCluster 2 | 3 | import pytest 4 | import tempfile 5 | import shutil 6 | import sys 7 | from io import StringIO 8 | import importlib 9 | import copy 10 | import configparser 11 | 12 | from flambe.runnable import Runnable 13 | 14 | @pytest.fixture 15 | def runnable(): 16 | class DummyRunnable(Runnable): 17 | def run(self, **kwargs) -> None: 18 | pass 19 | 20 | return DummyRunnable() 21 | 22 | 23 | @pytest.fixture 24 | def get_secrets(): 25 | t = tempfile.NamedTemporaryFile(mode="w+") 26 | 27 | def _get_secrets(secrets_content): 28 | t.write(secrets_content) 29 | t.flush() 30 | return t.name 31 | 32 | yield _get_secrets 33 | t.close() 34 | 35 | 36 | def test_valid_secrets(runnable, get_secrets): 37 | secrets = """ 38 | [SOME_SECTION] 39 | RANDOM = random 40 | """ 41 | 42 | runnable.inject_secrets(get_secrets(secrets)) 43 | 44 | 45 | def test_invalid_secrets(runnable, get_secrets): 46 | secrets = """ 47 | this is a text 48 | """ 49 | 50 | with pytest.raises(configparser.MissingSectionHeaderError): 51 | runnable.inject_secrets(get_secrets(secrets)) 52 | 53 | 54 | def test_invalid_secrets2(runnable, get_secrets): 55 | secrets = """ 56 | {json: tru} 57 | """ 58 | 59 | with pytest.raises(configparser.MissingSectionHeaderError): 60 | runnable.inject_secrets(get_secrets(secrets)) 61 | 62 | 63 | def test_no_secrets(runnable): 64 | assert len(runnable.config) == 1 65 | assert list(runnable.config.keys())[0] == 'DEFAULT' 66 | 67 | 68 | def test_secrets(runnable, get_secrets): 69 | secrets = """ 70 | [SOME_SECTION] 71 | RANDOM = random 72 | """ 73 | 74 | runnable.inject_secrets(get_secrets(secrets)) 75 | 76 | assert 'SOME_SECTION' in runnable.config 77 | assert 'RANDOM' in runnable.config['SOME_SECTION'] 78 | assert runnable.config['SOME_SECTION']['RANDOM'] == 'random' 79 | -------------------------------------------------------------------------------- /tests/unit/runner/test_args.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import argparse 3 | 4 | from flambe.runner.run import main 5 | from flambe.runnable.runnable import Runnable 6 | 7 | import mock 8 | 9 | 10 | @pytest.fixture(scope='function') 11 | def args(): 12 | args = argparse.Namespace() 13 | args.config = 'config.yaml' 14 | args.debug = False 15 | args.force = False 16 | args.verbose = False 17 | args.install_extensions = False 18 | args.cluster = None 19 | args.secrets = None 20 | return args 21 | 22 | 23 | class DummyRunnable(Runnable): 24 | 25 | def run(self, **kwargs) -> None: 26 | self.kwargs = kwargs 27 | 28 | 29 | @pytest.fixture(scope='function') 30 | def runnable(): 31 | return DummyRunnable() 32 | 33 | 34 | def test_debug_remote(args): 35 | args.debug = True 36 | args.cluster = "cluster.yaml" 37 | 38 | with pytest.raises(ValueError): 39 | main(args) 40 | 41 | 42 | @pytest.mark.parametrize('debug', [True, False]) 43 | @pytest.mark.parametrize('force', [True, False]) 44 | @mock.patch('flambe.runner.run.SafeExecutionContext.preprocess') 45 | def test_runnable_args(mock_preprocess, force, debug, runnable, args): 46 | mock_preprocess.return_value = (runnable, None) 47 | args.debug = debug 48 | args.force = force 49 | 50 | main(args) 51 | 52 | assert runnable.kwargs['debug'] == debug 53 | assert runnable.kwargs['force'] == force 54 | -------------------------------------------------------------------------------- /tests/unit/runner/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import tempfile 3 | import os 4 | 5 | from flambe.runner import utils 6 | 7 | 8 | MB = 2 ** 20 9 | 10 | 11 | def create_file(filename, size_MB=1): 12 | # From https://stackoverflow.com/a/8816154 13 | with open(filename, "wb") as out: 14 | out.truncate(size_MB * MB) 15 | 16 | 17 | @pytest.mark.parametrize("mbs", [1, 2, 3, 4]) 18 | def test_size_MB_file(mbs): 19 | with tempfile.NamedTemporaryFile("wb") as t: 20 | create_file(t.name, size_MB=mbs) 21 | assert utils.get_size_MB(t.name) == mbs 22 | 23 | 24 | @pytest.mark.parametrize("mbs", [1, 2, 3, 4]) 25 | def test_size_MB_folder(mbs): 26 | with tempfile.TemporaryDirectory() as t: 27 | create_file(os.path.join(t, '1.bin'), size_MB=mbs) 28 | create_file(os.path.join(t, '2.bin'), size_MB=mbs) 29 | create_file(os.path.join(t, '3.bin'), size_MB=mbs) 30 | create_file(os.path.join(t, '4.bin'), size_MB=mbs) 31 | assert utils.get_size_MB(t) == 4 * mbs 32 | 33 | 34 | def test_get_files(): 35 | with tempfile.TemporaryDirectory() as t: 36 | f1 = os.path.join(t, 'some_file.txt') 37 | os.mkdir(os.path.join(t, 'folder')) 38 | f2 = os.path.join(t, 'folder', 'some_file.txt') 39 | open(f1, 'w+').close() 40 | open(f2, 'w+').close() 41 | 42 | assert list(utils.get_files(t)) == [f1, f2] 43 | 44 | 45 | def test_get_files_invalid(): 46 | with pytest.raises(ValueError): 47 | utils.get_files('/some/non/existent/path/to/test') 48 | -------------------------------------------------------------------------------- /tests/unit/sampler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asappresearch/flambe/98f10f859fe9223fd2d1d76d430f77cdbddc0956/tests/unit/sampler/__init__.py -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [testenv] 2 | passenv = * 3 | commands = 4 | pip install -r {toxinidir}/requirements.txt 5 | pip install -r {toxinidir}/tests/requirements.txt 6 | pip install {toxinidir}/. 7 | 8 | mkdir -p test_results/bandit 9 | mkdir -p test_results/mypy 10 | mkdir -p test_results/pytest 11 | mkdir -p test_results/flake8 12 | 13 | safety check --full-report 14 | bandit -r {toxinidir}/flambe -c bandit.yaml -f xml -o test_results/bandit/report.xml 15 | flake8 {toxinidir}/flambe --format junit-xml --output-file test_results/flake8/flake8-report.xml 16 | pytest tests {posargs} 17 | coverage xml -i -o test_results/pytest/full_path_coverage.xml 18 | mypy {toxinidir}/flambe --junit-xml=test_results/mypy/junit-mypy-report.xml 19 | 20 | [pytest] 21 | addopts = -v -l -ra 22 | --junitxml test_results/pytest/junit-pytest-report.xml 23 | --cov-config tests/.coveragerc 24 | --cov flambe/ 25 | --cov-report html:test_results/pytest/cov_html 26 | --cov-report xml:test_results/pytest/cov.xml 27 | --cov-report term 28 | 29 | [flake8] 30 | max-line-length = 100 31 | max-doc-length = 72 32 | ignore = F821,F407,W504 #undefined_name, __future__, binary operator 33 | --------------------------------------------------------------------------------