├── api ├── __init__.py ├── Dockerfile ├── tests │ └── test_app.py └── app.py ├── model_core ├── __init__.py ├── weights │ └── readme.md ├── networks │ ├── __init__.py │ └── mlp.py ├── models │ ├── __init__.py │ └── base.py ├── tests │ ├── support │ │ └── readme.md │ └── test_predictor.py ├── datasets │ ├── __init__.py │ └── dataset.py ├── predictor.py └── util.py ├── data ├── raw │ ├── readme.md │ └── metadata.toml └── processed │ └── readme.md ├── tasks ├── test_api.sh ├── test_functionality.sh ├── test_validation.sh ├── run_api_docker.sh ├── prepare_sample_experiments.sh ├── train_predictor.sh ├── build_api_docker.sh └── lint.sh ├── requirements.in ├── environment.yml ├── setup.py ├── requirements-dev.in ├── training ├── util.py ├── experiments │ └── sample.json ├── prepare_experiments.py ├── gpu_manager.py └── run_experiment.py ├── evaluation └── evaluate_predictor.py ├── notebooks └── 00_exploratory_data_analysis.ipynb ├── Makefile ├── README.md ├── setup.md ├── .gitignore ├── requirements.txt └── requirements-dev.txt /api/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /model_core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/raw/readme.md: -------------------------------------------------------------------------------- 1 | # Info of your raw Dataset -------------------------------------------------------------------------------- /model_core/weights/readme.md: -------------------------------------------------------------------------------- 1 | # Save your weights here -------------------------------------------------------------------------------- /tasks/test_api.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pytest -s api 3 | -------------------------------------------------------------------------------- /tasks/test_functionality.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pytest -s nodel_core 3 | -------------------------------------------------------------------------------- /tasks/test_validation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pytest -s evaluation/evaluate* 3 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | boltons 2 | flask 3 | h5py 4 | numpy 5 | requests 6 | pytorch 7 | tqdm 8 | -------------------------------------------------------------------------------- /model_core/networks/__init__.py: -------------------------------------------------------------------------------- 1 | """Neural network code modules.""" 2 | from .mlp import mlp 3 | 4 | -------------------------------------------------------------------------------- /model_core/models/__init__.py: -------------------------------------------------------------------------------- 1 | """Model modules.""" 2 | # from .model01 import SomeModel 3 | 4 | 5 | -------------------------------------------------------------------------------- /model_core/tests/support/readme.md: -------------------------------------------------------------------------------- 1 | # Support data for evaluations 2 | Can be examplesof images, text, etc. -------------------------------------------------------------------------------- /tasks/run_api_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker run -p 8000:8000 --name api -it --rm model_core_api 3 | -------------------------------------------------------------------------------- /model_core/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """Dataset modules.""" 2 | # from .dataset01 import SomeDataset 3 | 4 | 5 | -------------------------------------------------------------------------------- /data/processed/readme.md: -------------------------------------------------------------------------------- 1 | # Processed Data ready for training 2 | 3 | 4 | **Note** 5 | STORE PRECESSED DATA HERE, BUT IGNORE IN GIT -------------------------------------------------------------------------------- /tasks/prepare_sample_experiments.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python training/prepare_experiments.py training/experiments/sample.json 3 | -------------------------------------------------------------------------------- /data/raw/metadata.toml: -------------------------------------------------------------------------------- 1 | url = 'https://..../datafile.zip' 2 | filename = 'datafile.zip' 3 | sha256 = 'f3c9e87a88a313e557c6d3548ed8a2a1af2dc3c4a678c5f3fc6f972ba4a50c55' -------------------------------------------------------------------------------- /tasks/train_predictor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python training/run_experiment.py --save '{"dataset": "Dataset", "model": "Model00", "network": "network01", "train_args": {"batch_size": 256}}' 3 | -------------------------------------------------------------------------------- /tasks/build_api_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sed 's/tensorflowORpytorch==/tensorflowORpytorch-cpu==/' requirements.txt > api/requirements.txt 4 | 5 | docker build -t model_core_api -f api/Dockerfile . 6 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: your_project_name 2 | channels: 3 | - defaults 4 | dependencies: 5 | - python=3.7 6 | - cudatoolkit=10.1 7 | - cudnn=7.6 8 | - pip 9 | - pip: 10 | - pip-tools 11 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='my-app', 5 | version='0.1', 6 | packages=find_packages(exclude=['tests']), 7 | install_requires=[ 8 | 'pylint', 9 | 'torch', 10 | 'torchvision' 11 | ] 12 | ) -------------------------------------------------------------------------------- /requirements-dev.in: -------------------------------------------------------------------------------- 1 | -c requirements.txt 2 | bandit 3 | black 4 | gpustat 5 | grequests 6 | itermplot 7 | jupyterlab 8 | matplotlib 9 | mypy 10 | nltk 11 | pycodestyle 12 | pydocstyle 13 | pylint 14 | pytest 15 | pyyaml 16 | tornado 17 | safety 18 | scipy 19 | pillow 20 | wandb 21 | -------------------------------------------------------------------------------- /model_core/networks/mlp.py: -------------------------------------------------------------------------------- 1 | """Define mlp network function.""" 2 | from typing import Tuple 3 | 4 | 5 | def mlp( 6 | input_shape: Tuple[int, ...], 7 | output_shape: Tuple[int, ...], 8 | layer_size: int = 128, 9 | dropout_amount: float = 0.2, 10 | num_layers: int = 3, 11 | ) -> ModelClass: 12 | """ 13 | Simple MLP 14 | """ 15 | return model 16 | -------------------------------------------------------------------------------- /training/util.py: -------------------------------------------------------------------------------- 1 | """Function to train a model.""" 2 | from time import time 3 | 4 | from model_core.datasets.dataset import Dataset 5 | from model_core.models.base import Model 6 | 7 | def train_model(model: Model, dataset: Dataset, epochs: int, batch_size: int, use_experiment_manager: bool = False) -> Model: 8 | """Write your Function to Train model. """ 9 | return model 10 | -------------------------------------------------------------------------------- /api/Dockerfile: -------------------------------------------------------------------------------- 1 | # The "buster" flavor of the official docker Python image is based on Debian and includes common packages. 2 | FROM python:3.7-buster 3 | 4 | # Create the working directory 5 | RUN set -ex && mkdir /repo 6 | WORKDIR /repo 7 | 8 | # Copy only the relevant directories to the working diretory 9 | COPY model_core/ ./model_core 10 | COPY api/ ./api 11 | 12 | # Install Python dependencies 13 | RUN set -ex && pip3 install -r api/requirements.txt 14 | 15 | # Run the web server 16 | EXPOSE 8000 17 | ENV PYTHONPATH /repo 18 | CMD python3 /repo/api/app.py 19 | -------------------------------------------------------------------------------- /model_core/tests/test_predictor.py: -------------------------------------------------------------------------------- 1 | """Tests for CharacterPredictor class.""" 2 | import os 3 | from pathlib import Path 4 | import unittest 5 | 6 | from model_core.predictor import Predictor 7 | 8 | SUPPORT_DIRNAME = Path(__file__).parents[0].resolve() / "support" / "support_data" 9 | 10 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 11 | 12 | 13 | class TestPredictor(unittest.TestCase): 14 | """Tests for the Predictor class.""" 15 | 16 | def test_filename(self): 17 | """Test that Predictor correctly predicts something.""" 18 | predictor = Predictor() 19 | 20 | -------------------------------------------------------------------------------- /evaluation/evaluate_predictor.py: -------------------------------------------------------------------------------- 1 | """Run validation test for your ML program.""" 2 | import os 3 | from pathlib import Path 4 | from time import time 5 | import unittest 6 | 7 | # from model_core.datasets import TextDataset ImageDataset 8 | # from model_core.predict import ImageClassifier, TextClassifier 9 | 10 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 11 | 12 | SUPPORT_DIRNAME = Path(__file__).parents[0].resolve() / "support" / "example_data" 13 | 14 | 15 | class TestEvaluateSomething(unittest.TestCase): 16 | def test_evaluate(self): 17 | """Write your own evaluation tests""" 18 | -------------------------------------------------------------------------------- /api/tests/test_app.py: -------------------------------------------------------------------------------- 1 | """Tests for web app.""" 2 | import os 3 | from pathlib import Path 4 | from unittest import TestCase 5 | 6 | from api.app import app 7 | 8 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 9 | 10 | REPO_DIRNAME = Path(__file__).parents[2].resolve() 11 | SUPPORT_DIRNAME = REPO_DIRNAME / "model_core" / "tests" / "support" / 12 | 13 | 14 | class TestIntegrations(TestCase): 15 | def setUp(self): 16 | self.app = app.test_client() 17 | 18 | def test_index(self): 19 | response = self.app.get("/") 20 | assert response.get_data().decode() == "Hello, world!" 21 | 22 | def test_predict(self): 23 | """Write your own tests """ 24 | -------------------------------------------------------------------------------- /tasks/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -uo pipefail 3 | set +e 4 | 5 | FAILURE=false 6 | 7 | echo "safety" 8 | safety check -r requirements.txt -r requirements-dev.txt || FAILURE=true 9 | 10 | echo "pylint" 11 | pylint api model_core training || FAILURE=true 12 | 13 | echo "pycodestyle" 14 | pycodestyle api model_core training || FAILURE=true 15 | 16 | echo "pydocstyle" 17 | pydocstyle api model_core training || FAILURE=true 18 | 19 | echo "mypy" 20 | mypy api model_core training || FAILURE=true 21 | 22 | echo "bandit" 23 | bandit -ll -r {api,model_core,training} || FAILURE=true 24 | 25 | echo "shellcheck" 26 | shellcheck tasks/*.sh || FAILURE=true 27 | 28 | if [ "$FAILURE" = true ]; then 29 | echo "Linting failed" 30 | exit 1 31 | fi 32 | echo "Linting passed" 33 | exit 0 34 | -------------------------------------------------------------------------------- /api/app.py: -------------------------------------------------------------------------------- 1 | """Flask web server serving predictions.""" 2 | import os 3 | 4 | from flask import Flask, request, jsonify 5 | 6 | # from model_core.predictor import Predictor 7 | import model_core.util as util 8 | 9 | os.environ["CUDA_VISIBLE_DEVICES"] = "" # Do not use GPU 10 | 11 | app = Flask(__name__) 12 | 13 | 14 | @app.route("/") 15 | def index(): 16 | """Provide simple health check route.""" 17 | return "Hello, world!" 18 | 19 | 20 | @app.route("/v1/predict", methods=["GET", "POST"]) 21 | def predict(): 22 | """Provide main prediction API route. Responds to both GET and POST requests.""" 23 | pass 24 | 25 | 26 | 27 | def main(): 28 | """Run the app.""" 29 | app.run(host="0.0.0.0", port=8000, debug=False) 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /notebooks/00_exploratory_data_analysis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "language_info": { 4 | "codemirror_mode": { 5 | "name": "ipython", 6 | "version": 3 7 | }, 8 | "file_extension": ".py", 9 | "mimetype": "text/x-python", 10 | "name": "python", 11 | "nbconvert_exporter": "python", 12 | "pygments_lexer": "ipython3", 13 | "version": 3 14 | }, 15 | "orig_nbformat": 2 16 | }, 17 | "nbformat": 4, 18 | "nbformat_minor": 2, 19 | "cells": [ 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "%load_ext autoreload\n", 27 | "%autoreload 2\n", 28 | "\n", 29 | "%matplotlib inline\n", 30 | "import matplotlib.pyplot as plt\n", 31 | "import numpy as np\n", 32 | "\n", 33 | "from importlib.util import find_spec\n", 34 | "if find_spec(\"model_core\") is None:\n", 35 | " import sys\n", 36 | " sys.path.append('..')\n" 37 | ] 38 | } 39 | ] 40 | } -------------------------------------------------------------------------------- /training/experiments/sample.json: -------------------------------------------------------------------------------- 1 | { 2 | "experiment_group": "Sample Experiments", 3 | "experiments": [ 4 | { 5 | "dataset": "Dataset", 6 | "model": "Model", 7 | "network": "mlp", 8 | "network_args": { 9 | "num_layers": 2 10 | }, 11 | "train_args": { 12 | "batch_size": 256 13 | } 14 | }, 15 | { 16 | "dataset": "Dataset", 17 | "model": "Model01", 18 | "network": "mlp", 19 | "network_args": { 20 | "num_layers": 4 21 | }, 22 | "train_args": { 23 | "batch_size": 256 24 | } 25 | }, 26 | { 27 | "dataset": "Dataset", 28 | "model": "Model02", 29 | "network": "cnn", 30 | "train_args": { 31 | "batch_size": 128 32 | } 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /model_core/predictor.py: -------------------------------------------------------------------------------- 1 | """Predictor class""" 2 | from typing import Tuple, Union 3 | 4 | import numpy as np 5 | 6 | from model_core.models import SomeModel 7 | from model_core.datasets import ImageDataset, TextDataset, SomeDataset 8 | import model_core.util as util 9 | 10 | 11 | class Predictor: 12 | """Wrap the model and predict something """ 13 | 14 | def __init__(self, dataset_cls=SomeDataset): 15 | self.model = SomeModel(dataset_cls=dataset_cls) 16 | self.model.load_weights() 17 | 18 | def predict(self, something_or_filename: Union[np.ndarray, str]) -> Tuple[str, float]: 19 | """Predict something""" 20 | if isinstance(something_or_filename, str): 21 | obj = util.read_something(something_or_filename) 22 | else: 23 | obj = something_or_filename 24 | return self.model.predict(obj) 25 | 26 | def evaluate(self, dataset): 27 | """Evaluate on a dataset.""" 28 | return self.model.evaluate(dataset.x_test, dataset.y_test) 29 | -------------------------------------------------------------------------------- /training/prepare_experiments.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Simple way to run experiments defined in a file.""" 3 | import argparse 4 | import json 5 | 6 | 7 | def run_experiments(experiments_filename): 8 | """Run experiments from file.""" 9 | with open(experiments_filename) as f: 10 | experiments_config = json.load(f) 11 | num_experiments = len(experiments_config["experiments"]) 12 | for ind in range(num_experiments): 13 | experiment_config = experiments_config["experiments"][ind] 14 | experiment_config["experiment_group"] = experiments_config["experiment_group"] 15 | print(f"python training/run_experiment.py --gpu=-1 '{json.dumps(experiment_config)}'") 16 | 17 | 18 | def main(): 19 | """Parse command-line arguments and run experiments from provided file.""" 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument("experiments_filename", type=str, help="Filename of JSON file of experiments to run.") 22 | args = parser.parse_args() 23 | run_experiments(args.experiments_filename) 24 | 25 | 26 | if __name__ == "__main__": 27 | main() 28 | -------------------------------------------------------------------------------- /model_core/datasets/dataset.py: -------------------------------------------------------------------------------- 1 | """Dataset class to be extended by dataset-specific classes.""" 2 | from pathlib import Path 3 | import argparse 4 | import os 5 | 6 | from model_core import util 7 | 8 | 9 | class Dataset: 10 | """Simple abstract class for datasets.""" 11 | 12 | @classmethod 13 | def data_dirname(cls): 14 | return Path(__file__).resolve().parents[2] / "data" 15 | 16 | def load_or_generate_data(self): 17 | pass 18 | 19 | def load_one_batch(self): 20 | pass 21 | 22 | 23 | def _download_raw_dataset(metadata): 24 | if os.path.exists(metadata["filename"]): 25 | return 26 | print(f"Downloading raw dataset from {metadata['url']}...") 27 | util.download_url(metadata["url"], metadata["filename"]) 28 | print("Computing SHA-256...") 29 | sha256 = util.compute_sha256(metadata["filename"]) 30 | if sha256 != metadata["sha256"]: 31 | raise ValueError("Downloaded data file SHA-256 does not match that listed in metadata document.") 32 | 33 | 34 | def _parse_args(): 35 | parser = argparse.ArgumentParser() 36 | # arguments 37 | return parser.parse_args() 38 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Makefile based on https://github.com/shreyashankar/create-ml-app 2 | 3 | .PHONY: help lint run 4 | 5 | # Makefile variables 6 | VENV_NAME:=venv 7 | PYTHON=${VENV_NAME}/bin/python3 8 | 9 | # Include your variables here 10 | RANDOM_SEED:=42 11 | NUM_EPOCHS:=15 12 | INPUT_DIM:=784 13 | HIDDEN_DIM:=128 14 | OUTPUT_DIM:=10 15 | 16 | .DEFAULT: help 17 | help: 18 | @echo "make venv" 19 | @echo " prepare development environment, use only once" 20 | @echo "make lint" 21 | @echo " run pylint" 22 | @echo "make run" 23 | @echo " run project" 24 | 25 | # Install dependencies whenever setup.py is changed. 26 | venv: $(VENV_NAME)/bin/activate 27 | $(VENV_NAME)/bin/activate: setup.py 28 | test -d $(VENV_NAME) || python3 -m venv $(VENV_NAME) 29 | ${PYTHON} -m pip install -U pip 30 | ${PYTHON} -m pip install -e . 31 | rm -rf ./*.egg-info 32 | touch $(VENV_NAME)/bin/activate 33 | 34 | # lint: venv 35 | # ${PYTHON} -m pylint main.py 36 | 37 | # run: venv 38 | # ${PYTHON} main.py --seed $(RANDOM_SEED) --num_epochs $(NUM_EPOCHS) --input_dim $(INPUT_DIM) --hidden_dim $(HIDDEN_DIM) --output_dim $(OUTPUT_DIM) 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /training/gpu_manager.py: -------------------------------------------------------------------------------- 1 | """GPUManager class.""" 2 | import os 3 | import time 4 | 5 | import gpustat 6 | import numpy as np 7 | from redlock import Redlock 8 | 9 | 10 | GPU_LOCK_TIMEOUT = 5000 # ms 11 | 12 | 13 | class GPUManager: 14 | """Class for allocating GPUs.""" 15 | 16 | def __init__(self, verbose: bool = False): 17 | self.lock_manager = Redlock([{"host": "localhost", "port": 6379, "db": 0}]) 18 | self.verbose = verbose 19 | 20 | def get_free_gpu(self): 21 | """ 22 | If some GPUs are available, try reserving one by checking out an exclusive redis lock. 23 | If none available or can't get lock, sleep and check again. 24 | """ 25 | while True: 26 | gpu_ind = self._get_free_gpu() 27 | if gpu_ind is not None: 28 | return gpu_ind 29 | if self.verbose: 30 | print(f"pid {os.getpid()} sleeping") 31 | time.sleep(GPU_LOCK_TIMEOUT / 1000) 32 | 33 | def _get_free_gpu(self): 34 | try: 35 | available_gpu_inds = [ 36 | gpu.index for gpu in gpustat.GPUStatCollection.new_query() if gpu.memory_used < 0.5 * gpu.memory_total 37 | ] 38 | except Exception: # pylint: disable=broad-except 39 | return [0] # Return dummy GPU index if no CUDA GPUs are installed 40 | 41 | if available_gpu_inds: 42 | gpu_ind = np.random.choice(available_gpu_inds) 43 | if self.verbose: 44 | print(f"pid {os.getpid()} picking gpu {gpu_ind}") 45 | if self.lock_manager.lock(f"gpu_{gpu_ind}", GPU_LOCK_TIMEOUT): 46 | return int(gpu_ind) 47 | if self.verbose: 48 | print(f"pid {os.getpid()} couldnt get lock") 49 | return None 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ML Production Template 2 | This codebase is a starting point to get your Machine Learning project into Production. 3 | 4 | This codebase is base on [Full Stack Deep Learning Course](https://course.fullstackdeeplearning.com/). 5 | 6 | ## Codebase 7 | 8 | **`notebooks`:** **Explore and visualize your data** 9 | 10 | **`tasks`** : **Convenience scripts for running frequent tests and training commands** 11 | 12 | **`training`**: **Logic for the training itself** 13 | 14 | - **`model_core`: the core code of were the model lives (p.e. `cat_recognizer`, `text_classifier`, `tumor detector`, etc)** 15 | - **`datasets`**: **Logic for downloading, preprocessing, augmenting, and loading data** 16 | - **`models`: Models wrap networks and add functionality like loss functions. saving, loading, and training** 17 | - **`networks` : Code for constructing neural networks (dumb input | output mappings)** 18 | - **`tests`: Regression tests for the models code. Make sure a trained model performs well on important examples.** 19 | - **`weights` : Weights of the production model** 20 | - `predictor.py`: **wrapper for model that allows you to do inference** 21 | - `utils.py` 22 | 23 | **`api`**: **Web server serving predictions. DockerFiles, Unit Tests, Flask, etc.** 24 | 25 | **`evaluation`**: **Run the validation tests** 26 | 27 | **`experiment_manager`**: **Settings of your experiment manager (**p.e. wandb, tensorboard**)** 28 | 29 | **`data`**: **use it for data versioning, storing data examples and metadata of your datasets. During training use it to store your raw and processed data but don't push or save the datasets into the repo.** 30 | 31 | ## Note 32 | I Recommend you to use it as a github template. Fork the repo, go to settings and the make it a template. 33 | 34 | This [ML Project Template](https://bit.ly/33zMFqw) might help you managing your project. 35 | 36 | -------------------------------------------------------------------------------- /setup.md: -------------------------------------------------------------------------------- 1 | # Setup 2 | 3 | ## 1. Clone the repo 4 | 5 | ```sh 6 | git clone https://github.com/DanielhCarranza/ml-production-template.gitt 7 | cd ml-production-template 8 | ``` 9 | 10 | ## 2. Set up the Python environment 11 | 12 | ### If on GCP AI Platform Notebooks or AWS EC2 Deep Learning instance 13 | 14 | Simply run ```pip install -r requirements.txt -r requirements-dev.txt```. 15 | 16 | Also, run ```export PYTHONPATH=.``` before executing any commands later on, or you will get errors like `ModuleNotFoundError: No module named 'model_core'`. 17 | 18 | In order to not have to set `PYTHONPATH` in every terminal you open, just add that line as the last line of the `~/.bashrc` file using a text editor of your choice (e.g. `nano ~/.bashrc`) 19 | 20 | ### If on own machine 21 | 22 | Run `conda env create` to create an environment called `ml-production-template`, as defined in `environment.yml`. 23 | This environment will provide us with the right Python version as well as the CUDA and CUDNN libraries. 24 | We will install Python libraries using `pip-sync`, however, which will let us do three nice things: 25 | 26 | 1. Separate out dev from production dependencies (`requirements-dev.in` vs `requirements.in`). 27 | 2. Have a lockfile of exact versions for all dependencies (the auto-generated `requirements-dev.txt` and `requirements.txt`). 28 | 3. Allow us to easily deploy to targets that may not support the `conda` environment. 29 | 30 | So, after running `conda env create`, activate the new environment and install the requirements: 31 | 32 | ```sh 33 | conda activate your_project_name 34 | pip-sync requirements.txt requirements-dev.txt 35 | ``` 36 | 37 | If you add, remove, or need to update versions of some requirements, edit the `.in` files, then run 38 | 39 | ``` 40 | pip-compile requirements.in && pip-compile requirements-dev.in 41 | ``` 42 | 43 | Now, every time you work in this directory, make sure to start your session with `conda activate your_project_name`. 44 | 45 | ### Run with Makefile 46 | 47 | ```sh 48 | cd your_project_folder 49 | make run 50 | ``` 51 | # Start Coding! 52 | 53 | -------------------------------------------------------------------------------- /model_core/util.py: -------------------------------------------------------------------------------- 1 | """Utility functions for model_core module.""" 2 | 3 | from concurrent.futures import as_completed, ThreadPoolExecutor 4 | from pathlib import Path 5 | from typing import Union 6 | from urllib.request import urlopen, urlretrieve 7 | import hashlib 8 | import os 9 | 10 | import numpy as np 11 | from tqdm import tqdm 12 | 13 | def read_image(): 14 | pass 15 | 16 | def read_text(): 17 | pass 18 | 19 | def compute_sha256(filename: Union[Path, str]): 20 | """Return SHA256 checksum of a file.""" 21 | with open(filename, "rb") as f: 22 | return hashlib.sha256(f.read()).hexdigest() 23 | 24 | 25 | class TqdmUpTo(tqdm): 26 | """From https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py""" 27 | 28 | def update_to(self, blocks=1, bsize=1, tsize=None): 29 | """ 30 | Parameters 31 | ---------- 32 | blocks : int, optional 33 | Number of blocks transferred so far [default: 1]. 34 | bsize : int, optional 35 | Size of each block (in tqdm units) [default: 1]. 36 | tsize : int, optional 37 | Total size (in tqdm units). If [default: None] remains unchanged. 38 | """ 39 | if tsize is not None: 40 | self.total = tsize 41 | self.update(blocks * bsize - self.n) 42 | 43 | 44 | def download_url(url, filename): 45 | """Download a file from url to filename, with a progress bar.""" 46 | with TqdmUpTo(unit="B", unit_scale=True, unit_divisor=1024, miniters=1) as t: 47 | urlretrieve(url, filename, reporthook=t.update_to, data=None) # nosec 48 | 49 | 50 | def download_urls(urls, filenames): 51 | """Download urls to filenames in a multi-threaded way.""" 52 | with ThreadPoolExecutor() as executor: 53 | futures = [executor.submit(urlretrieve, url, filename) for url, filename in zip(urls, filenames)] 54 | for future in tqdm(as_completed(futures), total=len(futures)): 55 | try: 56 | future.result() 57 | except Exception as e: 58 | print("Error", e) 59 | 60 | 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | 132 | .vscode/ 133 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile requirements.in 6 | # 7 | absl-py==0.9.0 # via tensorboard, tensorflow 8 | astunparse==1.6.3 # via tensorflow 9 | boltons==20.0.0 # via -r requirements.in 10 | cachetools==4.0.0 # via google-auth 11 | certifi==2019.11.28 # via requests 12 | chardet==3.0.4 # via requests 13 | click==7.1.1 # via flask 14 | editdistance==0.5.3 # via -r requirements.in 15 | flask==1.1.1 # via -r requirements.in 16 | gast==0.3.3 # via tensorflow 17 | google-auth-oauthlib==0.4.1 # via tensorboard 18 | google-auth==1.12.0 # via google-auth-oauthlib, tensorboard 19 | google-pasta==0.2.0 # via tensorflow 20 | grpcio==1.27.2 # via tensorboard, tensorflow 21 | h5py==2.10.0 # via -r requirements.in, tensorflow 22 | idna==2.9 # via requests 23 | itsdangerous==1.1.0 # via flask 24 | jinja2==2.11.1 # via flask 25 | keras-preprocessing==1.1.0 # via tensorflow 26 | markdown==3.2.1 # via tensorboard 27 | markupsafe==1.1.1 # via jinja2 28 | numpy==1.18.2 # via -r requirements.in, h5py, keras-preprocessing, opencv-python-headless, opt-einsum, scipy, tensorboard, tensorflow 29 | oauthlib==3.1.0 # via requests-oauthlib 30 | opencv-python-headless==4.2.0.32 # via -r requirements.in 31 | opt-einsum==3.2.0 # via tensorflow 32 | protobuf==3.11.3 # via tensorboard, tensorflow 33 | pyasn1-modules==0.2.8 # via google-auth 34 | pyasn1==0.4.8 # via pyasn1-modules, rsa 35 | requests-oauthlib==1.3.0 # via google-auth-oauthlib 36 | requests==2.23.0 # via -r requirements.in, requests-oauthlib, tensorboard 37 | rsa==4.0 # via google-auth 38 | scipy==1.4.1 # via tensorflow 39 | six==1.14.0 # via absl-py, astunparse, google-auth, google-pasta, grpcio, h5py, keras-preprocessing, protobuf, tensorboard, tensorflow 40 | tensorboard-plugin-wit==1.6.0.post2 # via tensorboard 41 | tensorboard==2.2.0 # via tensorflow 42 | termcolor==1.1.0 # via tensorflow 43 | toml==0.10.0 # via -r requirements.in 44 | tqdm==4.44.1 # via -r requirements.in 45 | urllib3==1.25.8 # via requests 46 | werkzeug==1.0.0 # via flask, tensorboard 47 | wheel==0.34.2 # via astunparse, tensorboard, tensorflow 48 | wrapt==1.11.2 # via -r requirements.in, tensorflow 49 | 50 | # The following packages are considered to be unsafe in a requirements file: 51 | # setuptools 52 | -------------------------------------------------------------------------------- /training/run_experiment.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Script to run an experiment.""" 3 | import argparse 4 | import json 5 | import importlib 6 | from typing import Dict 7 | import os 8 | 9 | from training.gpu_manager import GPUManager 10 | from training.util import train_model 11 | 12 | DEFAULT_TRAIN_ARGS = {"batch_size": 64, "epochs": 16} 13 | 14 | 15 | def run_experiment(experiment_config: Dict, save_weights: bool, gpu_ind: int, use_experiment_manager: bool = True): 16 | """ 17 | Run a training experiment. 18 | 19 | Parameters 20 | ---------- 21 | experiment_config (dict) 22 | Of the form 23 | { 24 | "dataset": "Dataset", 25 | "dataset_args": { 26 | "data_arg": 0.4, 27 | }, 28 | "model": "Model", 29 | "network": "neural_net", 30 | "network_args": { 31 | "hidden_size": 256, 32 | }, 33 | "train_args": { 34 | "batch_size": 128, 35 | "epochs": 10 36 | } 37 | } 38 | save_weights (bool) 39 | If True, will save the final model weights to a canonical location (see Model in models/base.py) 40 | gpu_ind (int) 41 | specifies which gpu to use 42 | use_experiment_manager (bool) 43 | sync training run to wandb, tensorboard, etc. 44 | """ 45 | print(f"Running experiment with config {experiment_config} on GPU {gpu_ind}") 46 | 47 | datasets_module = importlib.import_module("model_core.datasets") 48 | dataset_class_ = getattr(datasets_module, experiment_config["dataset"]) 49 | dataset_args = experiment_config.get("dataset_args", {}) 50 | dataset = dataset_class_(**dataset_args) 51 | dataset.load_or_generate_data() 52 | print(dataset) 53 | 54 | models_module = importlib.import_module("model_core.models") 55 | model_class_ = getattr(models_module, experiment_config["model"]) 56 | 57 | networks_module = importlib.import_module("model_core.networks") 58 | network_fn_ = getattr(networks_module, experiment_config["network"]) 59 | network_args = experiment_config.get("network_args", {}) 60 | model = model_class_( 61 | dataset_cls=dataset_class_, network_fn=network_fn_, dataset_args=dataset_args, network_args=network_args, 62 | ) 63 | print(model) 64 | 65 | experiment_config["train_args"] = { 66 | **DEFAULT_TRAIN_ARGS, 67 | **experiment_config.get("train_args", {}), 68 | } 69 | experiment_config["experiment_group"] = experiment_config.get("experiment_group", None) 70 | experiment_config["gpu_ind"] = gpu_ind 71 | 72 | train_model( 73 | model, 74 | dataset, 75 | epochs=experiment_config["train_args"]["epochs"], 76 | batch_size=experiment_config["train_args"]["batch_size"], 77 | use_experiment_manager=use_experiment_manager, 78 | ) 79 | score = model.evaluate(dataset.x_test, dataset.y_test) 80 | print(f"Test evaluation: {score}") 81 | 82 | if save_weights: 83 | model.save_weights() 84 | 85 | 86 | def _parse_args(): 87 | """Parse command-line arguments.""" 88 | parser = argparse.ArgumentParser() 89 | parser.add_argument("--gpu", type=int, default=0, help="Provide index of GPU to use.") 90 | parser.add_argument( 91 | "--save", 92 | default=False, 93 | dest="save", 94 | action="store_true", 95 | help="If true, then final weights will be saved to canonical, version-controlled location", 96 | ) 97 | parser.add_argument( 98 | "experiment_config", 99 | type=str, 100 | help='Experimenet JSON (\'{"dataset": "Dataset", "model": "Model", "network": "mlp"}\'', 101 | ) 102 | args = parser.parse_args() 103 | return args 104 | 105 | 106 | def main(): 107 | """Run experiment.""" 108 | args = _parse_args() 109 | if args.gpu < 0: 110 | gpu_manager = GPUManager() 111 | args.gpu = gpu_manager.get_free_gpu() 112 | 113 | experiment_config = json.loads(args.experiment_config) 114 | os.environ["CUDA_VISIBLE_DEVICES"] = f"{args.gpu}" 115 | run_experiment(experiment_config, args.save, args.gpu) 116 | 117 | 118 | if __name__ == "__main__": 119 | main() 120 | -------------------------------------------------------------------------------- /model_core/models/base.py: -------------------------------------------------------------------------------- 1 | """Model class, to be extended by specific types of models.""" 2 | import os 3 | import pickle 4 | from pathlib import Path 5 | from typing import Callable, Dict, Optional, Union, List, Tuple 6 | 7 | import numpy as np 8 | 9 | import torch 10 | import torch.nn as nn 11 | 12 | 13 | DIRNAME = Path(__file__).parents[1].resolve() / "weights" 14 | 15 | 16 | class Model: 17 | """Base class, to be subclassed by predictors for specific type of data.""" 18 | 19 | def __init__( 20 | self, 21 | dataset_cls: type, 22 | network_fn: Callable, 23 | dataset_args: Dict = None, 24 | network_args: Dict = None, 25 | ): 26 | self.name = f"{self.__class__.__name__}_{dataset_cls.__name__}_{network_fn.__name__}" 27 | 28 | if dataset_args is None: 29 | dataset_args = {} 30 | self.data = dataset_cls(**dataset_args) 31 | 32 | if network_args is None: 33 | network_args = {} 34 | self.network = network_fn(**network_args) 35 | self.network.summary() 36 | 37 | self.batch_augment_fn: Optional[Callable] = None 38 | self.batch_format_fn: Optional[Callable] = None 39 | 40 | @property 41 | def weights_filename(self) -> str: 42 | DIRNAME.mkdir(parents=True, exist_ok=True) 43 | return str(DIRNAME / f"{self.name}_weights.h5") 44 | 45 | def fit( 46 | self, dataset, batch_size: int = 32, epochs: int = 10, augment_val: bool = True, callbacks: list = None, 47 | ): 48 | raise NotImplementedError 49 | 50 | def evaluate(self, x: np.ndarray, y: np.ndarray, batch_size: int = 16, _verbose: bool = False): 51 | raise NotImplementedError 52 | 53 | def loss(self): 54 | pass 55 | 56 | def optimizer(self): 57 | pass 58 | 59 | def metrics(self): 60 | pass 61 | 62 | def load_weights(self): 63 | self.network.load_weights(self.weights_filename) 64 | 65 | def save_weights(self): 66 | self.network.save_weights(self.weights_filename) 67 | 68 | 69 | class TorchModelBase(Model): 70 | def __init__(self, 71 | dataset_cls: type, 72 | network_fn: Callable, 73 | dataset_args: Dict = None, 74 | network_args: Dict = None, 75 | optimizer_cls:Callable=torch.optim.Adam, 76 | optimizer_args:Dict=None, 77 | device=None, 78 | ): 79 | super().__init__(dataset_cls, network_fn, dataset_args, network_args) 80 | 81 | if optimizer_args is None: 82 | self.optimizer_args = {} 83 | else: 84 | self.optimizer_args=optimizer_args 85 | self.optimizer_cls = optimizer_cls 86 | 87 | if device is None: 88 | self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu" ) 89 | 90 | def optimizer_fn(self): 91 | return self.optimizer_cls(self.model.parameters(), **self.optimizer_args) 92 | 93 | def loss_fn(self, **loss_args): 94 | return nn.CrossEntropyLoss(**loss_args) 95 | 96 | def metrics(self, preds, yb): 97 | acc = (preds.argmax(-1)==yb).float() 98 | return acc 99 | 100 | def fit(self, *args): 101 | self.model=self.network().to(self.device) 102 | self.optimizer = self.optimizer_fn() 103 | self.loss = self.loss_fn() 104 | self.model.train() 105 | self.acc, self.error = [], [] 106 | for i, (xb, yb) in enumerate(self.data): 107 | preds = self.model(xb) 108 | self.loss(preds, yb) 109 | self.loss.backward() 110 | self.optimizer.step() 111 | self.optimizer.zero_grad() 112 | self.acc.append(self.metrics(preds, yb)) 113 | self.error.append(self.loss.item()) 114 | return self 115 | 116 | 117 | def evaluate(self, xb, yb, device:str=None): 118 | device = self.device if device is None else torch.device(device) 119 | 120 | self.model.to(device) 121 | self.model.eval() 122 | with torch.no_grad(): 123 | for i, (xb, yb) in enumerate(self.data): 124 | preds = self.model(xb) 125 | self.loss(preds, yb) 126 | self.metrics(preds, yb) 127 | return self 128 | 129 | def to_pickle(self, output_filename:str): 130 | """ 131 | Serialize the entire class instance 132 | """ 133 | self.model = self.model.cpu() 134 | with open(output_filename, 'wb') as f: 135 | pickle.dump(self, f) 136 | 137 | @staticmethod 138 | def from_pickle(src_filename:str): 139 | """ 140 | Load an entire class instance onto the CPU. 141 | """ 142 | with open(src_filename, 'rb') as f: 143 | return pickle.load(f) 144 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile requirements-dev.in 6 | # 7 | appdirs==1.4.3 # via black, virtualenv 8 | astroid==2.3.3 # via pylint 9 | attrs==19.3.0 # via black, jsonschema, pytest 10 | backcall==0.1.0 # via ipython 11 | bandit==1.6.2 # via -r requirements-dev.in 12 | black==19.10b0 # via -r requirements-dev.in 13 | bleach==3.1.4 # via nbconvert 14 | blessings==1.7 # via gpustat 15 | certifi==2019.11.28 # via -c requirements.txt, pipenv, requests, sentry-sdk 16 | chardet==3.0.4 # via -c requirements.txt, requests 17 | click==7.1.1 # via -c requirements.txt, black, safety, wandb 18 | configparser==5.0.0 # via wandb 19 | cycler==0.10.0 # via matplotlib 20 | decorator==4.4.2 # via ipython, traitlets 21 | defusedxml==0.6.0 # via nbconvert 22 | distlib==0.3.0 # via virtualenv 23 | docker-pycreds==0.4.0 # via wandb 24 | dparse==0.5.0 # via safety 25 | entrypoints==0.3 # via nbconvert 26 | filelock==3.0.12 # via virtualenv 27 | gevent==1.4.0 # via grequests 28 | gitdb==4.0.2 # via gitpython 29 | gitpython==3.1.0 # via bandit, wandb 30 | gpustat==0.6.0 # via -r requirements-dev.in 31 | gql==0.2.0 # via wandb 32 | gradescope-utils==0.3.1 # via -r requirements-dev.in 33 | graphql-core==1.1 # via gql 34 | greenlet==0.4.15 # via gevent 35 | grequests==0.4.0 # via -r requirements-dev.in 36 | idna==2.9 # via -c requirements.txt, requests 37 | importlib-metadata==1.6.0 # via jsonschema, pluggy, pytest, virtualenv 38 | ipykernel==5.2.0 # via notebook 39 | ipython-genutils==0.2.0 # via nbformat, notebook, traitlets 40 | ipython==7.13.0 # via ipykernel 41 | isort==4.3.21 # via pylint 42 | itermplot==0.331 # via -r requirements-dev.in 43 | jedi==0.16.0 # via ipython 44 | jinja2==2.11.1 # via -c requirements.txt, jupyterlab, jupyterlab-server, nbconvert, notebook 45 | json5==0.9.4 # via jupyterlab-server 46 | jsonschema==3.2.0 # via jupyterlab-server, nbformat 47 | jupyter-client==6.1.2 # via ipykernel, notebook 48 | jupyter-core==4.6.3 # via jupyter-client, nbconvert, nbformat, notebook 49 | jupyterlab-server==1.0.7 # via jupyterlab 50 | jupyterlab==2.0.1 # via -r requirements-dev.in 51 | kiwisolver==1.1.0 # via matplotlib 52 | lazy-object-proxy==1.4.3 # via astroid 53 | markupsafe==1.1.1 # via -c requirements.txt, jinja2 54 | matplotlib==3.2.1 # via -r requirements-dev.in, itermplot 55 | mccabe==0.6.1 # via pylint 56 | mistune==0.8.4 # via nbconvert 57 | more-itertools==8.2.0 # via pytest 58 | mypy-extensions==0.4.3 # via mypy 59 | mypy==0.770 # via -r requirements-dev.in 60 | nbconvert==5.6.1 # via notebook 61 | nbformat==5.0.4 # via nbconvert, notebook 62 | nltk==3.4.5 # via -r requirements-dev.in 63 | notebook>=6.1.5 # via jupyterlab, jupyterlab-server 64 | numpy==1.18.2 # via -c requirements.txt, itermplot, matplotlib, scipy 65 | nvidia-ml-py3==7.352.0 # via gpustat, wandb 66 | packaging==20.3 # via dparse, pytest, safety 67 | pandocfilters==1.4.2 # via nbconvert 68 | parso==0.6.2 # via jedi 69 | pathspec==0.7.0 # via black 70 | pathtools==0.1.2 # via watchdog 71 | pbr==5.4.4 # via stevedore 72 | pexpect==4.8.0 # via ipython 73 | pickleshare==0.7.5 # via ipython 74 | pillow==7.1.0 # via -r requirements-dev.in 75 | pipenv==2018.11.26 # via dparse 76 | pluggy==0.13.1 # via pytest 77 | prometheus-client==0.7.1 # via notebook 78 | promise==2.3 # via gql, graphql-core 79 | prompt-toolkit==3.0.5 # via ipython 80 | psutil==5.7.0 # via gpustat, wandb 81 | ptyprocess==0.6.0 # via pexpect, terminado 82 | py==1.8.1 # via pytest 83 | pycodestyle==2.5.0 # via -r requirements-dev.in 84 | pydocstyle==5.0.2 # via -r requirements-dev.in 85 | pygments==2.6.1 # via ipython, nbconvert 86 | pylint==2.4.4 # via -r requirements-dev.in 87 | pyparsing==2.4.6 # via matplotlib, packaging 88 | pyrsistent==0.16.0 # via jsonschema 89 | pytest==5.4.1 # via -r requirements-dev.in 90 | python-dateutil==2.8.1 # via jupyter-client, matplotlib, wandb 91 | pyyaml==5.3.1 # via -r requirements-dev.in, bandit, dparse, wandb 92 | pyzmq==19.0.0 # via jupyter-client, notebook 93 | redis==3.4.1 # via redlock-py 94 | redlock-py==1.0.8 # via -r requirements-dev.in 95 | regex==2020.2.20 # via black 96 | requests==2.23.0 # via -c requirements.txt, gql, grequests, safety, wandb 97 | safety==1.8.7 # via -r requirements-dev.in 98 | scipy==1.4.1 # via -c requirements.txt, -r requirements-dev.in 99 | send2trash==1.5.0 # via notebook 100 | sentry-sdk==0.14.3 # via wandb 101 | shortuuid==1.0.1 # via wandb 102 | six==1.14.0 # via -c requirements.txt, astroid, bandit, bleach, blessings, cycler, docker-pycreds, gpustat, gql, graphql-core, itermplot, jsonschema, nltk, packaging, promise, pyrsistent, python-dateutil, stevedore, traitlets, virtualenv, wandb 103 | smmap==3.0.1 # via gitdb 104 | snowballstemmer==2.0.0 # via pydocstyle 105 | stevedore==1.32.0 # via bandit 106 | subprocess32==3.5.4 # via wandb 107 | terminado==0.8.3 # via notebook 108 | testpath==0.4.4 # via nbconvert 109 | toml==0.10.0 # via -c requirements.txt, black, dparse 110 | tornado==6.0.4 # via -r requirements-dev.in, ipykernel, jupyter-client, jupyterlab, notebook, terminado 111 | traitlets==4.3.3 # via ipykernel, ipython, jupyter-client, jupyter-core, nbconvert, nbformat, notebook 112 | typed-ast==1.4.1 # via astroid, black, mypy 113 | typing-extensions==3.7.4.1 # via mypy 114 | urllib3==1.25.8 # via -c requirements.txt, requests, sentry-sdk 115 | virtualenv-clone==0.5.4 # via pipenv 116 | virtualenv==20.0.15 # via pipenv 117 | wandb==0.8.31 # via -r requirements-dev.in 118 | watchdog==0.10.2 # via wandb 119 | wcwidth==0.1.9 # via prompt-toolkit, pytest 120 | webencodings==0.5.1 # via bleach 121 | wrapt==1.11.2 # via -c requirements.txt, astroid 122 | zipp==3.1.0 # via importlib-metadata 123 | 124 | # The following packages are considered to be unsafe in a requirements file: 125 | # pip 126 | # setuptools 127 | --------------------------------------------------------------------------------