├── tests ├── __init__.py ├── conftest.py ├── dataset_test.py ├── training_test.py └── tokenizer_test.py ├── calbert ├── __init__.py ├── utils.py ├── model.py ├── __main__.py ├── tokenizer.py ├── download_data.py ├── lamb.py ├── reporting.py ├── dataset.py └── training.py ├── .tool-versions ├── dist ├── tokenizer-cased │ └── ca.cased.30000.model └── tokenizer-uncased │ └── ca.uncased.30000.model ├── setup.cfg ├── .gitignore ├── config ├── config.yaml └── model │ ├── base.yaml │ ├── large.yaml │ ├── tiny.yaml │ ├── xlarge.yaml │ └── xxlarge.yaml ├── .vscode ├── settings.json ├── launch.json └── .ropeproject │ └── config.py ├── local.deepkit.yml ├── test.deepkit.yml ├── docker └── Dockerfile ├── Makefile ├── deepkit.yml ├── pyproject.toml ├── .github └── workflows │ └── main.yml ├── spotty.yaml ├── README.md └── poetry.lock /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /calbert/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | python 3.7.8 2 | -------------------------------------------------------------------------------- /dist/tokenizer-cased/ca.cased.30000.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codegram/calbert/master/dist/tokenizer-cased/ca.cased.30000.model -------------------------------------------------------------------------------- /dist/tokenizer-uncased/ca.uncased.30000.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codegram/calbert/master/dist/tokenizer-uncased/ca.uncased.30000.model -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E402, E501, F401, F841, W503, E203 3 | 4 | [tool:pytest] 5 | pytestenvvars__env_files = .env 6 | norecursedirs = .git 7 | addopts = --testdox 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | __pycache__ 3 | .vscode/.ropeproject/objectdb 4 | run/ 5 | runs/ 6 | *.txt 7 | docker/pyproject.toml 8 | docker/poetry.lock 9 | docker/calbert 10 | docker/config 11 | docker/dist 12 | wandb/ 13 | *.pkl 14 | final-checkpoint/ 15 | model/ 16 | testy.py 17 | models/ 18 | *.egg-info 19 | *.log 20 | export/ 21 | .mypy_cache/ 22 | -------------------------------------------------------------------------------- /config/config.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | valid_split: 0.04 3 | 4 | vocab: 5 | max_size: 30000 6 | lowercase: True 7 | 8 | seed: 42 9 | 10 | training: 11 | max_seq_length: 512 12 | masked_lm_prob: 0.10 13 | weight_decay: 0.0 14 | learning_rate: .00176 15 | 16 | defaults: 17 | - model: tiny 18 | - hydra/job_logging: colorlog 19 | 20 | hydra: 21 | run: 22 | dir: run 23 | -------------------------------------------------------------------------------- /calbert/utils.py: -------------------------------------------------------------------------------- 1 | "Random utils used here and there" 2 | 3 | __all__ = ["normalize_path"] 4 | 5 | from pathlib import Path 6 | from hydra.utils import to_absolute_path 7 | 8 | 9 | def normalize_path(p: Path) -> Path: 10 | "Converts a path into absolute gathering Hydra's original directory" 11 | try: 12 | return Path(to_absolute_path(str(p))) 13 | except AttributeError: # if we're not in Hydra 14 | return p.absolute() 15 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": "/Users/txus/Library/Caches/pypoetry/virtualenvs/calbert-rFhS7Vgj-py3.7/bin/python", 3 | "python.linting.pylamaEnabled": false, 4 | "python.linting.flake8Enabled": true, 5 | "python.linting.enabled": true, 6 | "python.testing.pytestArgs": [ 7 | "tests" 8 | ], 9 | "python.testing.unittestEnabled": false, 10 | "python.testing.nosetestsEnabled": false, 11 | "python.testing.pytestEnabled": true 12 | } 13 | -------------------------------------------------------------------------------- /config/model/base.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | name: base 3 | attention_probs_dropout_prob: 0.1 4 | hidden_act: "gelu" 5 | hidden_dropout_prob: 0.1 6 | hidden_size: 768 7 | embedding_size: 128 8 | initializer_range: 0.02 9 | intermediate_size: 3072 10 | max_position_embeddings: 512 11 | num_attention_heads: 12 12 | num_hidden_layers: 12 13 | num_hidden_groups: 1 14 | net_structure_type: 0 15 | gap_size: 0 16 | num_memory_blocks: 0 17 | inner_group_num: 1 18 | down_scale_factor: 1 19 | type_vocab_size: 2 20 | ln_type: "postln" 21 | -------------------------------------------------------------------------------- /config/model/large.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | name: large 3 | attention_probs_dropout_prob: 0.1 4 | hidden_act: "gelu" 5 | hidden_dropout_prob: 0.1 6 | hidden_size: 1024 7 | embedding_size: 128 8 | initializer_range: 0.02 9 | intermediate_size: 4096 10 | max_position_embeddings: 512 11 | num_attention_heads: 16 12 | num_hidden_layers: 24 13 | num_hidden_groups: 1 14 | net_structure_type: 0 15 | gap_size: 0 16 | num_memory_blocks: 0 17 | inner_group_num: 1 18 | down_scale_factor: 1 19 | type_vocab_size: 2 20 | ln_type: "postln" 21 | -------------------------------------------------------------------------------- /config/model/tiny.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | name: tiny 3 | attention_probs_dropout_prob: 0.0 4 | hidden_act: "gelu" 5 | hidden_dropout_prob: 0.0 6 | hidden_size: 312 7 | embedding_size: 128 8 | initializer_range: 0.02 9 | intermediate_size: 1248 10 | max_position_embeddings: 512 11 | num_attention_heads: 12 12 | num_hidden_layers: 4 13 | num_hidden_groups: 1 14 | net_structure_type: 0 15 | gap_size: 0 16 | num_memory_blocks: 0 17 | inner_group_num: 1 18 | down_scale_factor: 1 19 | type_vocab_size: 2 20 | ln_type: "postln" 21 | -------------------------------------------------------------------------------- /config/model/xlarge.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | name: xlarge 3 | attention_probs_dropout_prob: 0.1 4 | hidden_act: "gelu" 5 | hidden_dropout_prob: 0.1 6 | hidden_size: 2048 7 | embedding_size: 128 8 | initializer_range: 0.02 9 | intermediate_size: 8192 10 | max_position_embeddings: 512 11 | num_attention_heads: 16 12 | num_hidden_layers: 24 13 | num_hidden_groups: 1 14 | net_structure_type: 0 15 | gap_size: 0 16 | num_memory_blocks: 0 17 | inner_group_num: 1 18 | down_scale_factor: 1 19 | type_vocab_size: 2 20 | ln_type: "postln" 21 | -------------------------------------------------------------------------------- /config/model/xxlarge.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | name: xxlarge 3 | attention_probs_dropout_prob: 0.0 4 | hidden_act: "gelu" 5 | hidden_dropout_prob: 0.0 6 | hidden_size: 4096 7 | embedding_size: 128 8 | initializer_range: 0.02 9 | intermediate_size: 16384 10 | max_position_embeddings: 512 11 | num_attention_heads: 64 12 | num_hidden_layers: 12 13 | num_hidden_groups: 1 14 | net_structure_type: 0 15 | gap_size: 0 16 | num_memory_blocks: 0 17 | inner_group_num: 1 18 | down_scale_factor: 1 19 | type_vocab_size: 2 20 | ln_type: "postln" 21 | -------------------------------------------------------------------------------- /local.deepkit.yml: -------------------------------------------------------------------------------- 1 | image: codegram/calbert:latest 2 | 3 | files: 4 | - config 5 | - calbert 6 | - dist 7 | 8 | output: 9 | - run 10 | - export 11 | - models 12 | 13 | config: 14 | max_items: 5 15 | train_batch_size: 1 16 | eval_batch_size: 1 17 | 18 | command: python -m calbert train --tokenizer-path dist/tokenizer-uncased/ca.uncased.30000.model --train-path dist/data/train_subset.txt --valid-path dist/data/valid_subset.txt --train-batch-size {{train_batch_size}} --eval-batch-size {{eval_batch_size}} --max-items {{max_items}} --export-path export --deepkit 19 | -------------------------------------------------------------------------------- /test.deepkit.yml: -------------------------------------------------------------------------------- 1 | image: codegram/calbert:latest 2 | 3 | docker: 4 | binds: 5 | - "~/data:/data" 6 | 7 | files: 8 | - config 9 | - calbert 10 | - dist 11 | 12 | output: 13 | - run 14 | - export 15 | - models 16 | 17 | config: 18 | version: tiny 19 | # maxing out 16 GB of GPU ram in a P100 20 | train_batch_size: 52 21 | eval_batch_size: 88 22 | 23 | command: python -m calbert train --tokenizer-path dist/tokenizer-uncased/ca.uncased.30000.model --train-path /data/calbert/train.txt --valid-path /data/calbert/valid.txt --train-batch-size {{train_batch_size}} --eval-batch-size {{eval_batch_size}} --export-path export --fp16 --deepkit model={{version}} 24 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime 2 | 3 | ENV MODE=${MODE} \ 4 | PYTHONFAULTHANDLER=1 \ 5 | PYTHONUNBUFFERED=1 \ 6 | PYTHONHASHSEED=random \ 7 | PIP_NO_CACHE_DIR=off \ 8 | PIP_DISABLE_PIP_VERSION_CHECK=on \ 9 | PIP_DEFAULT_TIMEOUT=100 \ 10 | POETRY_VERSION=1.0.5 11 | 12 | WORKDIR /workspace 13 | 14 | RUN pip install -U pip && \ 15 | pip install --no-cache-dir poetry==${POETRY_VERSION} 16 | 17 | COPY poetry.lock pyproject.toml /workspace/ 18 | 19 | # Project initialization: 20 | RUN poetry config virtualenvs.create false \ 21 | && poetry install $(test "$MODE" == production && echo "--no-dev") --no-interaction --no-ansi 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | deps: pyproject.toml 2 | poetry install 3 | 4 | docker-prepare: 5 | rm -fr docker/config docker/dist docker/calbert docker/pyproject.toml docker/poetry.lock 6 | cp pyproject.toml docker 7 | cp poetry.lock docker 8 | mkdir -p docker/calbert 9 | cp calbert/*.py docker/calbert 10 | cp -r config docker/ 11 | cp -r dist docker/ 12 | 13 | docker-build: docker-prepare 14 | docker build -t codegram/calbert ./docker 15 | 16 | docker-push: 17 | docker push codegram/calbert:latest 18 | 19 | test: 20 | poetry run py.test tests 21 | 22 | lint: 23 | poetry run flake8 calbert/*.py tests/*.py 24 | 25 | clean: 26 | rm -fr run calbert/__pycache__ 27 | 28 | .PHONY: test cast lint clean docker docker-push 29 | -------------------------------------------------------------------------------- /deepkit.yml: -------------------------------------------------------------------------------- 1 | image: codegram/calbert:latest 2 | 3 | docker: 4 | binds: 5 | - "/data:~/data" 6 | 7 | files: 8 | - config 9 | - calbert 10 | - dist 11 | 12 | output: 13 | - run 14 | - export 15 | - models 16 | 17 | config: 18 | version: tiny 19 | train_batch_size: 32 20 | eval_batch_size: 40 21 | 22 | tasks: 23 | download_data: 24 | command: python -m calbert download_data --out-dir /data 25 | 26 | train: 27 | command: python -m calbert train --tokenizer-path dist/tokenizer-uncased/ca.uncased.30000.model --train-path /data/train.txt --valid-path /data/valid.txt --train-batch-size {{train_batch_size}} --eval-batch-size {{eval_batch_size}} --fp16 --deepkit --export-path /workspace/export model={{version}} 28 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "calbert" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Txus "] 6 | 7 | [tool.poetry.dependencies] 8 | python = "^3.7" 9 | hydra-core = "^0.11.3" 10 | hydra-colorlog = "^0.1.4" 11 | sentencepiece = "^0.1.86" 12 | tokenizers = "^0.5" 13 | fastai2 = ">=0.0.17" 14 | fastscript = ">=0.1.4" 15 | fastcore = ">=0.1.17" 16 | torch = "~1.5" 17 | transformers = "^2.8.0" 18 | ipykernel = "^5.1.3" 19 | deepkit = "^1.0.5" 20 | 21 | [tool.poetry.dev-dependencies] 22 | pytest = "^5.3.4" 23 | flake8 = "^3.7.9" 24 | rope = "^0.16.0" 25 | black = "^19.10b0" 26 | pytest-concurrent = "^0.2.2" 27 | pytest-testdox = "^1.2.1" 28 | 29 | [build-system] 30 | requires = ["poetry>=1.0.5"] 31 | build-backend = "poetry.masonry.api" 32 | 33 | [tool.poetry.scripts] 34 | calbert = 'calbert:main' 35 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python: Module", 9 | "type": "python", 10 | "request": "launch", 11 | "module": "calbert", 12 | "args": [ 13 | "train", 14 | "--tokenizer-path", 15 | "dist/tokenizer-uncased/ca.uncased.30000.model", 16 | "--train-path", 17 | "dist/data/train_subset.txt", 18 | "--valid-path", 19 | "dist/data/valid_subset.txt", 20 | "--train-batch-size", 21 | "4", 22 | "--eval-batch-size", 23 | "2", 24 | "--max-items", 25 | "10" 26 | ] 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v1 11 | - name: Set up Python 3.7 12 | uses: actions/setup-python@v1 13 | with: 14 | python-version: 3.7 15 | - name: Install Poetry 16 | run: pip install poetry 17 | - name: Install deps 18 | run: poetry install 19 | - name: Lint 20 | run: make lint 21 | - name: Test 22 | run: make test 23 | 24 | - name: Prepare Docker files 25 | run: make docker-prepare 26 | - uses: docker/build-push-action@v1 27 | with: 28 | path: docker 29 | username: ${{ secrets.DOCKER_USERNAME }} 30 | password: ${{ secrets.DOCKER_PASSWORD }} 31 | repository: codegram/calbert 32 | tags: latest 33 | tag_with_sha: true 34 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | training_text = [ 4 | # 2 sentences 5 | "Porto posat l'esquinç al peu sense sutura marejant metges i perdius i això no es cura. D'altra banda tampoc he anat al metge.", 6 | # a single sentence 7 | "La sang s’ha cuit fins a tornar-se dura i passa el temps i passa i això no es cura.", 8 | # 3 sentences 9 | "Camí de massa ampla tessitura estintolada, encara sobre la corda insegura. Sens dubte. Per tant, res.", 10 | ] 11 | 12 | validation_text = [ 13 | "La corda insegura s'ha cuit malament. L'haurem de tornar a coure.", 14 | "De fet no sabien que plouria. Malgrat tot havien portat xubasquero.", 15 | ] 16 | 17 | 18 | class InputData: 19 | def __init__(self, which="train"): 20 | self.which = which 21 | self.file = tempfile.NamedTemporaryFile(mode="w+", encoding="utf-8") 22 | 23 | def __enter__(self, **args): 24 | self.file.__enter__(**args) 25 | for text in training_text if self.which == "train" else validation_text: 26 | self.file.write(text + "\n") 27 | self.file.flush() 28 | return self.file.name 29 | 30 | def __exit__(self, exc_type, exc_value, tb): 31 | self.file.__exit__(exc_type, exc_value, tb) 32 | 33 | 34 | def folder(): 35 | return tempfile.TemporaryDirectory() 36 | -------------------------------------------------------------------------------- /calbert/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from transformers import AlbertForMaskedLM 3 | 4 | 5 | class CalbertForMaskedLM(AlbertForMaskedLM): 6 | def __init__(self, config): 7 | super().__init__(config) 8 | 9 | def forward(self, input): 10 | input_ids, masked_lm_labels, attention_mask, token_type_ids = input.permute( 11 | 1, 0, 2 12 | ) 13 | 14 | position_ids = None 15 | head_mask = None 16 | inputs_embeds = None 17 | 18 | outputs = self.albert( 19 | input_ids=input_ids, 20 | attention_mask=attention_mask, 21 | token_type_ids=token_type_ids, 22 | position_ids=position_ids, 23 | head_mask=head_mask, 24 | inputs_embeds=inputs_embeds, 25 | ) 26 | sequence_outputs = outputs[0] 27 | 28 | prediction_scores = self.predictions(sequence_outputs) 29 | 30 | outputs = (prediction_scores,) + outputs[ 31 | 2: 32 | ] # Add hidden states and attention if they are here 33 | if masked_lm_labels is not None: 34 | loss_fct = torch.nn.CrossEntropyLoss() 35 | masked_lm_loss = loss_fct( 36 | prediction_scores.view(-1, self.config.vocab_size), 37 | masked_lm_labels.reshape(-1), 38 | ) 39 | outputs = (masked_lm_loss,) + outputs 40 | 41 | return outputs 42 | -------------------------------------------------------------------------------- /spotty.yaml: -------------------------------------------------------------------------------- 1 | project: 2 | name: calbert 3 | syncFilters: 4 | - exclude: 5 | - .git/* 6 | - .idea/* 7 | - '*/__pycache__/*' 8 | - .mypy_cache/* 9 | - export/* 10 | - run/* 11 | - docker/* 12 | - .vscode/* 13 | 14 | container: 15 | projectDir: /workspace/project 16 | file: codegram/calbert:latest 17 | volumeMounts: 18 | - name: workspace 19 | mountPath: /workspace 20 | workingDir: /workspace/project 21 | commands: python -m calbert download_data --out-dir /workspace/data 22 | 23 | 24 | instances: 25 | - name: i1 26 | provider: gcp 27 | parameters: 28 | zone: europe-west4-a 29 | onDemandInstance: false 30 | dockerDataRoot: /docker 31 | volumes: 32 | - name: workspace 33 | parameters: 34 | size: 100 35 | deletionPolicy: retain 36 | - name: docker 37 | parameters: 38 | size: 10 39 | mountDir: /docker 40 | deletionPolicy: retain 41 | 42 | machineType: n1-standard-8 43 | gpu: 44 | type: nvidia-tesla-v100 45 | count: 1 46 | 47 | scripts: 48 | test: python -m calbert train --tokenizer-path dist/tokenizer-uncased/ca.uncased.30000.model --train-path dist/data/train_subset.txt --valid-path dist/data/valid_subset.txt --train-batch-size 32 --eval-batch-size 32 --fp16 --export-path /workspace/test-export model=tiny 49 | train: python -m calbert train --tokenizer-path dist/tokenizer-uncased/ca.uncased.30000.model --train-path /workspace/data/train.txt --valid-path /workspace/data/valid.txt --train-batch-size 32 --eval-batch-size 32 --fp16 --export-path /workspace/export model=tiny 50 | -------------------------------------------------------------------------------- /calbert/__main__.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | import logging 3 | import sys 4 | import argparse 5 | from pathlib import Path 6 | 7 | from calbert import tokenizer, training, download_data 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | TASK_WITH_ARGS = (None, None) 12 | 13 | VALID_COMMANDS = ["tokenizer", "train", "download_data"] 14 | 15 | TASKS = { 16 | "tokenizer": tokenizer.train, 17 | "train": training.train, 18 | "download_data": download_data.run, 19 | } 20 | PARSERS = { 21 | "tokenizer": tokenizer.arguments, 22 | "train": training.arguments, 23 | "download_data": download_data.arguments, 24 | } 25 | 26 | 27 | def parse(command): 28 | parser = PARSERS[command]() 29 | parser.add_argument("override", nargs="*", help="config overrides") 30 | args = parser.parse_args() 31 | override = args.override 32 | del args.override 33 | return args, override 34 | 35 | 36 | @hydra.main(config_path="../config/config.yaml", strict=True) 37 | def main(cfg): 38 | task, args = TASK_WITH_ARGS 39 | task(args, cfg) 40 | 41 | 42 | if __name__ == "__main__": 43 | if len(sys.argv) < 2: 44 | log.error(f"Must provide valid command: {', '.join(VALID_COMMANDS)}") 45 | exit(-1) 46 | gpu = None 47 | if sys.argv[1].startswith('--gpu'): # distributed training 48 | gpu = sys.argv[1] 49 | del sys.argv[1] 50 | cmd = sys.argv[1] 51 | if cmd not in VALID_COMMANDS: 52 | log.error(f"Invalid command {cmd}: must be one {', '.join(VALID_COMMANDS)}") 53 | exit(-1) 54 | del sys.argv[1] 55 | if gpu: 56 | sys.argv.append(gpu) 57 | args, override = parse(cmd) 58 | sys.argv = [sys.argv[0]] + override 59 | TASK_WITH_ARGS = (TASKS[cmd], args) 60 | main() 61 | -------------------------------------------------------------------------------- /calbert/tokenizer.py: -------------------------------------------------------------------------------- 1 | 2 | import logging 3 | import argparse 4 | import collections 5 | from pathlib import Path 6 | 7 | import sentencepiece as spm 8 | from transformers import AlbertTokenizer 9 | 10 | from .utils import normalize_path 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | def load(cfg, vocab_path: Path) -> AlbertTokenizer: 16 | return AlbertTokenizer(str(vocab_path.absolute()), keep_accents=True, do_lower_case=cfg.vocab.lowercase) 17 | 18 | 19 | def arguments() -> argparse.ArgumentParser: 20 | parser = argparse.ArgumentParser(description="Train a tokenizer on some raw text") 21 | parser.add_argument("--input-file", type=Path, required=True) 22 | parser.add_argument("--out-dir", type=Path, required=True) 23 | parser.add_argument("--num-threads", type=int, default=32) 24 | return parser 25 | 26 | 27 | def train(args, cfg) -> AlbertTokenizer: 28 | log.info(f"Training tokenizer: {args}") 29 | 30 | out_dir = normalize_path(args.out_dir) 31 | out_dir.mkdir(parents=True, exist_ok=True) 32 | 33 | vocab_size = cfg.vocab.max_size 34 | 35 | name = [str(out_dir) + '/ca'] 36 | name.append("uncased" if cfg.vocab.lowercase else "cased") 37 | name.append(str(vocab_size)) 38 | prefix = ".".join(name) 39 | 40 | log.info(f'Will save to {prefix}') 41 | 42 | rule = '_cf' if cfg.vocab.lowercase else '' 43 | 44 | cmd = f"--num_threads={args.num_threads} --normalization_rule_name=nmt_nfkc{rule} --input={str(args.input_file.absolute())} --model_prefix={prefix} --vocab_size={vocab_size} --pad_id=0 --unk_id=1 --eos_id=-1 --bos_id=-1 --control_symbols=[CLS],[SEP],[MASK] --user_defined_symbols=(,),',\",-,.,–,£,€,$,·,´ --shuffle_input_sentence=true --input_sentence_size=5000000 --character_coverage=0.99995 --model_type=unigram" 45 | 46 | spm.SentencePieceTrainer.Train(cmd) 47 | 48 | return prefix 49 | -------------------------------------------------------------------------------- /calbert/download_data.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | from pathlib import Path 4 | import urllib.request 5 | import os 6 | import math 7 | 8 | from calbert.utils import normalize_path 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | def arguments() -> argparse.ArgumentParser: 14 | parser = argparse.ArgumentParser(description="Download OSCAR dataset") 15 | parser.add_argument( 16 | "--out-dir", 17 | type=Path, 18 | required=True, 19 | help="The folder where to store the raw data", 20 | ) 21 | parser.add_argument( 22 | "--force-download", 23 | type=bool, 24 | default=False, 25 | help="Whether to redownload the dataset even if it is already there", 26 | ) 27 | parser.add_argument( 28 | "--force-split", 29 | type=bool, 30 | default=False, 31 | help="Whether to split the dataset even if it is already split", 32 | ) 33 | return parser 34 | 35 | 36 | def run(args, cfg): 37 | out_dir = normalize_path(args.out_dir) 38 | out_dir.mkdir(parents=True, exist_ok=True) 39 | 40 | if not args.force_download and (out_dir / "dataset.txt.gz").exists(): 41 | log.info("Raw compressed dataset already exists --all good!") 42 | else: 43 | log.warning("Downloading raw compressed dataset") 44 | 45 | urllib.request.urlretrieve( 46 | "https://traces1.inria.fr/oscar/files/Compressed/ca_dedup.txt.gz", 47 | out_dir / "dataset.txt.gz", 48 | ) 49 | 50 | if ( 51 | not args.force_split 52 | and (out_dir / "train.txt").exists() 53 | and (out_dir / "valid.txt").exists() 54 | ): 55 | log.info("Dataset is already split into train/valid --all good!") 56 | else: 57 | log.info("Calculating dataset size") 58 | n = int( 59 | os.popen(f"gunzip -c {str(out_dir)}/dataset.txt.gz | wc -l").read().strip() 60 | ) 61 | training_size = math.floor(n * (1 - cfg.data.valid_split) / 1.0) 62 | valid_size = n - training_size 63 | log.info( 64 | f"Splitting dataset in {training_size} training examples and {valid_size} validation examples ({cfg.data.valid_split * 100}%)" 65 | ) 66 | os.system( 67 | f"gunzip -c {str(out_dir)}/dataset.txt.gz | split -l {training_size} - {str(out_dir)}/ && mv {str(out_dir)}/aa {str(out_dir)}/train.txt && mv {str(out_dir)}/ab {str(out_dir)}/valid.txt" 68 | ) 69 | -------------------------------------------------------------------------------- /tests/dataset_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import random 4 | 5 | from calbert.dataset import CalbertDataset, Tokenize, Mask, Ignore, SentencePair 6 | from fastai2.data.all import DataLoader, TfmdDL, Datasets, Transform, stop 7 | from fastai2.text.data import TensorText 8 | from fastai2.basics import L 9 | 10 | from .conftest import InputData, folder 11 | from .tokenizer_test import train_tokenizer 12 | 13 | 14 | @pytest.fixture(scope="module") 15 | def dataset(): 16 | with InputData("train") as train_file: 17 | yield train_file 18 | 19 | 20 | @pytest.fixture(scope="module") 21 | def tokenizer(which="train"): 22 | with InputData(which) as train_file: 23 | with folder() as outdir: 24 | yield train_tokenizer((train_file, outdir))[0] 25 | 26 | 27 | @pytest.mark.describe("dataset.CalbertDataset") 28 | class TestCalbertDataset: 29 | @pytest.mark.it("Returns pairs of sentences") 30 | def test_iter(self, dataset): 31 | ds = iter(CalbertDataset(dataset)) 32 | assert next(ds) == SentencePair( 33 | "Porto posat l'esquinç al peu sense sutura marejant metges i perdius i això no es cura.", 34 | "D'altra banda tampoc he anat al metge.", 35 | ) 36 | assert next(ds) == SentencePair( 37 | "Camí de massa ampla tessitura estintolada, encara sobre la corda insegura.", 38 | "Sens dubte.", 39 | ) 40 | 41 | @pytest.mark.it("Returns pairs of sentences up to a limit") 42 | def test_iter_with_max_items(self, dataset): 43 | ds = iter(CalbertDataset(dataset, max_items=1)) 44 | assert next(ds) == SentencePair( 45 | "Porto posat l'esquinç al peu sense sutura marejant metges i perdius i això no es cura.", 46 | "D'altra banda tampoc he anat al metge.", 47 | ) 48 | try: 49 | next(ds) 50 | assert False 51 | except StopIteration: 52 | assert True 53 | 54 | 55 | @pytest.mark.describe("dataset.Tokenization") 56 | class TestTokenization: 57 | @pytest.mark.it("Returns tokenized pairs of sentences") 58 | def test_tokenize(self, dataset, tokenizer): 59 | ds = CalbertDataset(dataset) 60 | tfms = [Tokenize(tokenizer, max_seq_len=12)] 61 | train_ds = Datasets(ds, tfms=tfms) 62 | 63 | encoded = next(iter(train_ds))[0][0] 64 | assert train_ds.decode([TensorText(encoded)]) == ("port d'al",) 65 | 66 | 67 | @pytest.mark.describe("dataset.Mask") 68 | class TestMask: 69 | @pytest.mark.it("Masks tokens with a probability") 70 | def test_mask(self, dataset, tokenizer): 71 | ds = CalbertDataset(dataset) 72 | tfms = [Tokenize(tokenizer, max_seq_len=12), Mask(tokenizer, probability=1.0)] 73 | train_ds = Datasets(ds, tfms=[tfms, [Ignore()]]) 74 | 75 | inputs, other = next(iter(train_ds)) 76 | 77 | assert inputs[0].size(0) == 12 78 | assert tokenizer.mask_token_id in inputs[0] 79 | -------------------------------------------------------------------------------- /tests/training_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import re 4 | import tempfile 5 | import glob 6 | import argparse 7 | import pytest 8 | from pathlib import Path 9 | 10 | from omegaconf import OmegaConf 11 | import torch 12 | 13 | from calbert import dataset, training, tokenizer 14 | from calbert.dataset import Tokenize, SentencePair, mask_tokens 15 | from calbert.model import CalbertForMaskedLM 16 | from transformers import AlbertForMaskedLM 17 | 18 | from .tokenizer_test import train_tokenizer 19 | from .conftest import InputData, folder 20 | 21 | 22 | @pytest.fixture(scope="module") 23 | def training_args_cfg(): 24 | with InputData("train") as train_file: 25 | with InputData("valid") as valid_file: 26 | with folder() as tokenizer_dir: 27 | tok, prefix = train_tokenizer((train_file, tokenizer_dir)) 28 | 29 | training_args = training.arguments().parse_args( 30 | [ 31 | "--tokenizer-path", 32 | f"{prefix}.model", 33 | "--train-path", 34 | str(train_file), 35 | "--valid-path", 36 | str(valid_file), 37 | "--train-batch-size", 38 | "1", 39 | "--eval-batch-size", 40 | "1", 41 | "--epochs", 42 | "1", 43 | "--max-items", 44 | "1", 45 | ] 46 | ) 47 | 48 | training_config = [ 49 | "training.max_seq_length=4", 50 | "training.masked_lm_prob=0.1", 51 | "training.weight_decay=0.0", 52 | "training.learning_rate=5e-05", 53 | "seed=42", 54 | "model.name=test", 55 | "model.hidden_size=312", 56 | "model.embedding_size=64", 57 | "model.initializer_range=0.02", 58 | "model.intermediate_size=312", 59 | "model.max_position_embeddings=128", 60 | "model.num_attention_heads=4", 61 | "vocab.lowercase=False", 62 | "vocab.max_size=10", 63 | ] 64 | 65 | training_cfg = OmegaConf.from_dotlist(training_config) 66 | 67 | yield training_args, training_cfg, tok 68 | 69 | 70 | @pytest.mark.describe("training.train") 71 | class TestTraining: 72 | @pytest.mark.it("Trains the model") 73 | def test_process(self, training_args_cfg): 74 | args, cfg, tok = training_args_cfg 75 | 76 | learn = training.train(args, cfg) 77 | 78 | model = learn.model 79 | 80 | tokenize = Tokenize(tok, max_seq_len=cfg.training.max_seq_length) 81 | 82 | (token_ids, attention_mask, type_ids) = tokenize( 83 | SentencePair("Hola com anem?", "Molt bé i tu?"), 84 | ) 85 | 86 | masked_token_ids, labels = mask_tokens( 87 | token_ids, 88 | tok=tok, 89 | ignore_index=dataset.IGNORE_INDEX, 90 | probability=cfg.training.masked_lm_prob, 91 | ) 92 | 93 | batch_inputs = masked_token_ids.unsqueeze(0) 94 | 95 | model.__class__ = AlbertForMaskedLM 96 | 97 | predictions = model(batch_inputs, token_type_ids=type_ids.unsqueeze(0),)[0][0] 98 | 99 | assert predictions.shape == (cfg.training.max_seq_length, len(tok)) 100 | 101 | model.__class__ = CalbertForMaskedLM 102 | 103 | learn.validate() 104 | 105 | perplexity = learn.metrics[0].value.item() 106 | 107 | assert perplexity > 0 108 | -------------------------------------------------------------------------------- /tests/tokenizer_test.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pytest 3 | import tempfile 4 | import glob 5 | import argparse 6 | from pathlib import Path 7 | from typing import Tuple 8 | 9 | from transformers import AlbertTokenizer 10 | from omegaconf import OmegaConf 11 | 12 | from calbert.tokenizer import arguments, train, load 13 | 14 | from .conftest import InputData, folder 15 | 16 | 17 | def train_tokenizer(in_and_out: Tuple[str, str]) -> Tuple[AlbertTokenizer, str]: 18 | input_file, outdir = in_and_out 19 | args, cfg = tokenizer_args_and_cfg(input_file, outdir) 20 | 21 | prefix = train(args, cfg) 22 | return load(cfg, Path(f"{prefix}.model")), prefix 23 | 24 | 25 | def tokenizer_args_and_cfg( 26 | input_file: str, outdir: str 27 | ) -> Tuple[argparse.Namespace, OmegaConf]: 28 | args = arguments().parse_args(["--input-file", input_file, "--out-dir", outdir]) 29 | config = [ 30 | "vocab.max_size=44", 31 | "vocab.lowercase=True", 32 | "training.max_seq_length=12", 33 | ] 34 | cfg = OmegaConf.from_dotlist(config) 35 | return args, cfg 36 | 37 | 38 | @pytest.fixture(scope="module") 39 | def tokenizer(which="train") -> (str, str): 40 | with InputData(which) as train_file: 41 | with folder() as outdir: 42 | yield train_tokenizer((train_file, outdir))[0] 43 | 44 | 45 | @pytest.fixture(scope="module") 46 | def input_file_and_outdir(which="train") -> (str, str): 47 | with InputData(which) as train_file: 48 | with folder() as outdir: 49 | yield train_file, outdir 50 | 51 | 52 | @pytest.mark.describe("tokenizer") 53 | class TestTokenizer: 54 | @pytest.mark.it("Trains a tokenizer on some corpus") 55 | def test_train(self, input_file_and_outdir): 56 | t, outdir = train_tokenizer(input_file_and_outdir) 57 | 58 | tokens = t.tokenize("Hola, com anem? Tot bé?") 59 | print(tokens) 60 | assert tokens == [ 61 | "▁", 62 | "h", 63 | "o", 64 | "la", 65 | ",", 66 | "▁c", 67 | "o", 68 | "m", 69 | "▁", 70 | "a", 71 | "n", 72 | "e", 73 | "m", 74 | "?", 75 | "▁", 76 | "t", 77 | "o", 78 | "t", 79 | "▁", 80 | "b", 81 | "é?", 82 | ] 83 | 84 | assert len(t) == 44 85 | assert t._convert_token_to_id("") == 0 86 | assert t._convert_token_to_id("") == 1 87 | assert t._convert_token_to_id("[CLS]") == 2 88 | assert t._convert_token_to_id("[SEP]") == 3 89 | assert t._convert_token_to_id("[MASK]") == 4 90 | 91 | @pytest.mark.it("Encodes single sentences BERT-style with CLS and SEP") 92 | def test_sequence_builders(self, tokenizer): 93 | text = tokenizer.encode("Hola, com anem?") 94 | text_2 = tokenizer.encode("Tot bé?") 95 | 96 | encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) 97 | encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) 98 | 99 | assert encoded_sentence == [tokenizer.cls_token_id] + text + [ 100 | tokenizer.sep_token_id 101 | ] 102 | assert encoded_pair == [tokenizer.cls_token_id] + text + [ 103 | tokenizer.sep_token_id 104 | ] + text_2 + [tokenizer.sep_token_id] 105 | 106 | @pytest.mark.it("Saves the tokenizer's vocab and model") 107 | def test_saves_tokenizer(self, input_file_and_outdir): 108 | t, outdir = train_tokenizer(input_file_and_outdir) 109 | 110 | got = list(glob.glob(outdir + "*")) 111 | got.sort() 112 | expected = [ 113 | outdir + ".vocab", 114 | outdir + ".model", 115 | ] 116 | expected.sort() 117 | assert got == expected 118 | -------------------------------------------------------------------------------- /.vscode/.ropeproject/config.py: -------------------------------------------------------------------------------- 1 | # The default ``config.py`` 2 | # flake8: noqa 3 | 4 | 5 | def set_prefs(prefs): 6 | """This function is called before opening the project""" 7 | 8 | # Specify which files and folders to ignore in the project. 9 | # Changes to ignored resources are not added to the history and 10 | # VCSs. Also they are not returned in `Project.get_files()`. 11 | # Note that ``?`` and ``*`` match all characters but slashes. 12 | # '*.pyc': matches 'test.pyc' and 'pkg/test.pyc' 13 | # 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc' 14 | # '.svn': matches 'pkg/.svn' and all of its children 15 | # 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o' 16 | # 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o' 17 | prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', 18 | '.hg', '.svn', '_svn', '.git', '.tox'] 19 | 20 | # Specifies which files should be considered python files. It is 21 | # useful when you have scripts inside your project. Only files 22 | # ending with ``.py`` are considered to be python files by 23 | # default. 24 | # prefs['python_files'] = ['*.py'] 25 | 26 | # Custom source folders: By default rope searches the project 27 | # for finding source folders (folders that should be searched 28 | # for finding modules). You can add paths to that list. Note 29 | # that rope guesses project source folders correctly most of the 30 | # time; use this if you have any problems. 31 | # The folders should be relative to project root and use '/' for 32 | # separating folders regardless of the platform rope is running on. 33 | # 'src/my_source_folder' for instance. 34 | # prefs.add('source_folders', 'src') 35 | 36 | # You can extend python path for looking up modules 37 | # prefs.add('python_path', '~/python/') 38 | 39 | # Should rope save object information or not. 40 | prefs['save_objectdb'] = True 41 | prefs['compress_objectdb'] = False 42 | 43 | # If `True`, rope analyzes each module when it is being saved. 44 | prefs['automatic_soa'] = True 45 | # The depth of calls to follow in static object analysis 46 | prefs['soa_followed_calls'] = 0 47 | 48 | # If `False` when running modules or unit tests "dynamic object 49 | # analysis" is turned off. This makes them much faster. 50 | prefs['perform_doa'] = True 51 | 52 | # Rope can check the validity of its object DB when running. 53 | prefs['validate_objectdb'] = True 54 | 55 | # How many undos to hold? 56 | prefs['max_history_items'] = 32 57 | 58 | # Shows whether to save history across sessions. 59 | prefs['save_history'] = True 60 | prefs['compress_history'] = False 61 | 62 | # Set the number spaces used for indenting. According to 63 | # :PEP:`8`, it is best to use 4 spaces. Since most of rope's 64 | # unit-tests use 4 spaces it is more reliable, too. 65 | prefs['indent_size'] = 4 66 | 67 | # Builtin and c-extension modules that are allowed to be imported 68 | # and inspected by rope. 69 | prefs['extension_modules'] = [] 70 | 71 | # Add all standard c-extensions to extension_modules list. 72 | prefs['import_dynload_stdmods'] = True 73 | 74 | # If `True` modules with syntax errors are considered to be empty. 75 | # The default value is `False`; When `False` syntax errors raise 76 | # `rope.base.exceptions.ModuleSyntaxError` exception. 77 | prefs['ignore_syntax_errors'] = False 78 | 79 | # If `True`, rope ignores unresolvable imports. Otherwise, they 80 | # appear in the importing namespace. 81 | prefs['ignore_bad_imports'] = False 82 | 83 | # If `True`, rope will insert new module imports as 84 | # `from import ` by default. 85 | prefs['prefer_module_from_imports'] = False 86 | 87 | # If `True`, rope will transform a comma list of imports into 88 | # multiple separate import statements when organizing 89 | # imports. 90 | prefs['split_imports'] = False 91 | 92 | # If `True`, rope will remove all top-level import statements and 93 | # reinsert them at the top of the module when making changes. 94 | prefs['pull_imports_to_top'] = True 95 | 96 | # If `True`, rope will sort imports alphabetically by module name instead 97 | # of alphabetically by import statement, with from imports after normal 98 | # imports. 99 | prefs['sort_imports_alphabetically'] = False 100 | 101 | # Location of implementation of 102 | # rope.base.oi.type_hinting.interfaces.ITypeHintingFactory In general 103 | # case, you don't have to change this value, unless you're an rope expert. 104 | # Change this value to inject you own implementations of interfaces 105 | # listed in module rope.base.oi.type_hinting.providers.interfaces 106 | # For example, you can add you own providers for Django Models, or disable 107 | # the search type-hinting in a class hierarchy, etc. 108 | prefs['type_hinting_factory'] = ( 109 | 'rope.base.oi.type_hinting.factory.default_type_hinting_factory') 110 | 111 | 112 | def project_opened(project): 113 | """This function is called after opening the project""" 114 | # Do whatever you like here! 115 | -------------------------------------------------------------------------------- /calbert/lamb.py: -------------------------------------------------------------------------------- 1 | """Lamb optimizer. (from https://github.com/cybertronai/pytorch-lamb)""" 2 | 3 | import collections 4 | import math 5 | 6 | import torch 7 | from tensorboardX import SummaryWriter 8 | from torch.optim import Optimizer 9 | 10 | 11 | def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int): 12 | """Log a histogram of trust ratio scalars in across layers.""" 13 | results = collections.defaultdict(list) 14 | for group in optimizer.param_groups: 15 | for p in group["params"]: 16 | state = optimizer.state[p] 17 | for i in ("weight_norm", "adam_norm", "trust_ratio"): 18 | if i in state: 19 | results[i].append(state[i]) 20 | 21 | for k, v in results.items(): 22 | event_writer.add_histogram(f"lamb/{k}", torch.tensor(v), token_count) 23 | 24 | 25 | class Lamb(Optimizer): 26 | r"""Implements Lamb algorithm. 27 | It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. 28 | Arguments: 29 | params (iterable): iterable of parameters to optimize or dicts defining 30 | parameter groups 31 | lr (float, optional): learning rate (default: 1e-3) 32 | betas (Tuple[float, float], optional): coefficients used for computing 33 | running averages of gradient and its square (default: (0.9, 0.999)) 34 | eps (float, optional): term added to the denominator to improve 35 | numerical stability (default: 1e-8) 36 | weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 37 | adam (bool, optional): always use trust ratio = 1, which turns this into 38 | Adam. Useful for comparison purposes. 39 | .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: 40 | https://arxiv.org/abs/1904.00962 41 | """ 42 | 43 | def __init__( 44 | self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False 45 | ): 46 | if not 0.0 <= lr: 47 | raise ValueError("Invalid learning rate: {}".format(lr)) 48 | if not 0.0 <= eps: 49 | raise ValueError("Invalid epsilon value: {}".format(eps)) 50 | if not 0.0 <= betas[0] < 1.0: 51 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) 52 | if not 0.0 <= betas[1] < 1.0: 53 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) 54 | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) 55 | self.adam = adam 56 | super(Lamb, self).__init__(params, defaults) 57 | 58 | def step(self, closure=None): 59 | """Performs a single optimization step. 60 | Arguments: 61 | closure (callable, optional): A closure that reevaluates the model 62 | and returns the loss. 63 | """ 64 | loss = None 65 | if closure is not None: 66 | loss = closure() 67 | 68 | for group in self.param_groups: 69 | for p in group["params"]: 70 | if p.grad is None: 71 | continue 72 | grad = p.grad.data 73 | if grad.is_sparse: 74 | raise RuntimeError( 75 | "Lamb does not support sparse gradients, consider SparseAdam instad." 76 | ) 77 | 78 | state = self.state[p] 79 | 80 | # State initialization 81 | if len(state) == 0: 82 | state["step"] = 0 83 | # Exponential moving average of gradient values 84 | state["exp_avg"] = torch.zeros_like(p.data) 85 | # Exponential moving average of squared gradient values 86 | state["exp_avg_sq"] = torch.zeros_like(p.data) 87 | 88 | exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] 89 | beta1, beta2 = group["betas"] 90 | 91 | state["step"] += 1 92 | 93 | # Decay the first and second moment running average coefficient 94 | # m_t 95 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 96 | # v_t 97 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 98 | 99 | # Paper v3 does not use debiasing. 100 | # bias_correction1 = 1 - beta1 ** state['step'] 101 | # bias_correction2 = 1 - beta2 ** state['step'] 102 | # Apply bias to lr to avoid broadcast. 103 | step_size = group[ 104 | "lr" 105 | ] # * math.sqrt(bias_correction2) / bias_correction1 106 | 107 | weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10) 108 | 109 | adam_step = exp_avg / exp_avg_sq.sqrt().add(group["eps"]) 110 | if group["weight_decay"] != 0: 111 | adam_step.add_(group["weight_decay"], p.data) 112 | 113 | adam_norm = adam_step.pow(2).sum().sqrt() 114 | if weight_norm == 0 or adam_norm == 0: 115 | trust_ratio = 1 116 | else: 117 | trust_ratio = weight_norm / adam_norm 118 | state["weight_norm"] = weight_norm 119 | state["adam_norm"] = adam_norm 120 | state["trust_ratio"] = trust_ratio 121 | if self.adam: 122 | trust_ratio = 1 123 | 124 | p.data.add_(-step_size * trust_ratio, adam_step) 125 | 126 | return loss 127 | -------------------------------------------------------------------------------- /calbert/reporting.py: -------------------------------------------------------------------------------- 1 | import deepkit 2 | import torch 3 | import math 4 | import logging 5 | 6 | from fastai2.basics import Recorder, Callback, random 7 | from fastai2.distributed import rank_distrib, num_distrib 8 | from calbert.tokenizer import AlbertTokenizer 9 | from calbert.model import CalbertForMaskedLM 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | class DeepkitCallback(Callback): 15 | "A `Callback` to report metrics to Deepkit" 16 | run_after = Recorder 17 | 18 | def __init__(self, args, cfg, tokenizer: AlbertTokenizer): 19 | super(DeepkitCallback).__init__() 20 | self.args = args 21 | self.cfg = cfg 22 | self.experiment: deepkit.Experiment = args.experiment 23 | self.gpus = num_distrib() if num_distrib() > 0 else 1 24 | self.tokenizer = tokenizer 25 | self.n_preds = 4 26 | 27 | def begin_fit(self): 28 | self.run = rank_distrib() == 0 29 | # FIXME: look into why it doesn't work 30 | # self.experiment.watch_torch_model(self.learn.model) 31 | if self.run: 32 | self.total_examples = len(self.dls.train_ds) 33 | self.total_batches = math.floor( 34 | self.total_examples / self.args.train_batch_size / self.gpus 35 | ) 36 | self.log_every_batches = self.total_batches / 25 37 | self.valid_dl = self.dls.valid.new( 38 | self.dls.valid_ds, 39 | bs=self.n_preds, 40 | rank=rank_distrib(), 41 | world_size=num_distrib(), 42 | ) 43 | 44 | def begin_epoch(self): 45 | self.experiment.iteration(self.epoch, total=self.args.epochs) 46 | 47 | def after_validate(self): 48 | if self.run: 49 | self._write_stats() 50 | 51 | def after_batch(self): 52 | if not self.learn.training: 53 | return 54 | 55 | if self.run: 56 | self.experiment.batch( 57 | self.learn.train_iter, 58 | size=self.args.train_batch_size, 59 | total=self.total_batches, 60 | ) 61 | self.experiment.log_metric("train_loss", self.smooth_loss) 62 | self.experiment.log_metric("raw_loss", self.loss) 63 | if self.learn.train_iter % self.log_every_batches == 0: # log some insights 64 | b, _ = self.valid_dl.one_batch() 65 | with torch.no_grad(): 66 | model = ( 67 | self.learn.model.module 68 | if hasattr(self.learn.model, "module") 69 | else self.learn.model 70 | ) 71 | kls = model.__class__ 72 | model.__class__ = CalbertForMaskedLM 73 | 74 | try: 75 | sources = [ 76 | self.tokenizer.decode(x[0]).replace("", "") for x in b 77 | ] 78 | masks = b[:, 1] 79 | filt = masks != -100 80 | labels = [ 81 | self.tokenizer.convert_ids_to_tokens( 82 | masks[idx][filt[idx]], skip_special_tokens=False 83 | ) 84 | for idx, f in enumerate(filt) 85 | ] 86 | 87 | _, prediction_scores = model(b) 88 | if prediction_scores.size(0) == 0: 89 | return # weird bug? 90 | log.info(prediction_scores.size()) 91 | log.info(filt.size()) 92 | log.info(self.learn.training) 93 | log.info(self.learn.train_iter) 94 | predicteds = [ 95 | self.tokenizer.convert_ids_to_tokens( 96 | torch.argmax(pscore[filt[i]], dim=1), 97 | skip_special_tokens=False, 98 | ) 99 | for i, pscore in enumerate(prediction_scores) 100 | ] 101 | insight = [ 102 | { 103 | "text": source, 104 | "correct+predicted": list( 105 | zip(labels[idx], predicteds[idx]) 106 | ), 107 | } 108 | for idx, source in enumerate(sources) 109 | ] 110 | self.experiment.log_insight(insight, name="predictions") 111 | except Exception as e: 112 | log.error(f"Error during reporting: {e}") 113 | finally: 114 | model.__class__ = kls 115 | 116 | def after_epoch(self): 117 | if self.run: 118 | self.experiment.iteration(self.epoch + 1, total=self.args.epochs) 119 | name = f"model_{self.epoch}" 120 | self.learn.save(name) 121 | self.experiment.add_output_file( 122 | str(self.learn.path / "models" / f"{name}.pth") 123 | ) 124 | 125 | def after_fit(self): 126 | if self.run: 127 | self.learn.save("final") 128 | self.experiment.add_output_file( 129 | str(self.learn.path / "models" / "final.pth") 130 | ) 131 | self.run = True 132 | 133 | def _write_stats(self): 134 | metric_names = list(self.recorder.metric_names).copy() 135 | values = list(self.recorder.log).copy() 136 | 137 | del metric_names[-1] 138 | 139 | if len(metric_names) - len(values) == 1: 140 | del metric_names[1] # learn.validate() means there is no train_loss 141 | 142 | assert len(values) == len(metric_names) 143 | 144 | for n, s in zip(metric_names, values): 145 | if n not in ["epoch"]: 146 | self.experiment.log_metric(n, float(f"{s:.6f}")) 147 | -------------------------------------------------------------------------------- /calbert/dataset.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Tuple 3 | import itertools 4 | import re 5 | 6 | import torch 7 | from fastai2.basics import Transform, to_device, default_device 8 | from fastai2.text.data import TensorText 9 | from fastai2.data.core import TfmdDL, DataLoaders, Datasets 10 | from torch.utils.data import Dataset, TensorDataset, IterableDataset, DataLoader 11 | from tqdm import tqdm, trange 12 | from transformers import AlbertTokenizer 13 | from collections import namedtuple 14 | 15 | SentencePair = namedtuple("SentencePair", ["first", "second"]) 16 | 17 | IGNORE_INDEX = -100 # Pytorch CrossEntropyLoss defaults to ignoring -100 18 | 19 | 20 | punctuation = re.compile(r"[\.!\?]+") 21 | 22 | 23 | def sentence_pairs(filename, min_length=8, max_items=None): 24 | with open(filename, encoding="utf-8") as f: 25 | counter = 0 26 | for line in f: 27 | sentences = [ 28 | s.strip() 29 | for s in punctuation.split(line) 30 | if len(s) >= min_length and " " in s 31 | ] 32 | for a, b in itertools.zip_longest(sentences[:-1], sentences[1:]): 33 | if (not max_items) or (max_items and counter < max_items): 34 | counter += 1 35 | yield SentencePair(a + ".", b + ".") 36 | 37 | 38 | class Tokenize(Transform): 39 | order = 17 40 | 41 | def __init__(self, tokenizer: AlbertTokenizer, max_seq_len: int): 42 | self.tokenizer = tokenizer 43 | self.max_seq_len = max_seq_len 44 | 45 | def encodes(self, inp: SentencePair) -> TensorText: 46 | tokenized = self.tokenizer.batch_encode_plus( 47 | [inp], 48 | max_length=self.max_seq_len, 49 | add_special_tokens=True, 50 | pad_to_max_length=True, 51 | return_tensors="pt", 52 | ) 53 | return TensorText( 54 | torch.stack( 55 | [ 56 | tokenized["input_ids"].squeeze(), 57 | tokenized["attention_mask"].squeeze(), 58 | tokenized["token_type_ids"].squeeze(), 59 | ] 60 | ) 61 | ) 62 | 63 | def decodes(self, encoded: TensorText): 64 | enc = encoded if encoded.ndim == 1 else encoded[0] 65 | return self.tokenizer.decode( 66 | enc, skip_special_tokens=True, clean_up_tokenization_spaces=False 67 | ) 68 | 69 | 70 | class CalbertDataset(IterableDataset): 71 | def __init__(self, dataset_path: Path, max_items=None): 72 | super(CalbertDataset, self).__init__() 73 | self.path = dataset_path 74 | self.max_items = max_items 75 | 76 | def __iter__(self): 77 | return sentence_pairs(self.path, max_items=self.max_items) 78 | 79 | 80 | def mask_tokens( 81 | inputs: torch.Tensor, tok: AlbertTokenizer, ignore_index: int, probability: float, 82 | ) -> Tuple[torch.Tensor, torch.Tensor]: 83 | """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ 84 | special_tokens_mask = ( 85 | (inputs == tok.cls_token_id) 86 | | (inputs == tok.pad_token_id) 87 | | (inputs == tok.sep_token_id) 88 | ) 89 | 90 | labels = inputs.clone() 91 | # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) 92 | probability_matrix = torch.full(labels.shape, probability) 93 | probability_matrix.masked_fill_(special_tokens_mask, value=0.0) 94 | probability_matrix.masked_fill_(special_tokens_mask, value=0.0) 95 | masked_indices = torch.bernoulli(probability_matrix).bool() 96 | labels[~masked_indices] = ignore_index # We only compute loss on masked tokens 97 | 98 | # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) 99 | indices_replaced = ( 100 | torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices 101 | ) 102 | inputs[indices_replaced] = tok.mask_token_id 103 | 104 | # 10% of the time, we replace masked input tokens with random word 105 | indices_random = ( 106 | torch.bernoulli(torch.full(labels.shape, 0.5)).bool() 107 | & masked_indices 108 | & ~indices_replaced 109 | ) 110 | 111 | le = len(tok) 112 | random_words = torch.randint(le, labels.shape, dtype=torch.long) 113 | inputs[indices_random] = random_words[indices_random] 114 | 115 | # The rest of the time (10% of the time) we keep the masked input tokens unchanged 116 | return inputs, labels 117 | 118 | 119 | class Mask(Transform): 120 | order = 18 121 | 122 | def __init__(self, tok: AlbertTokenizer, probability: float): 123 | self.tok = tok 124 | self.probability = probability 125 | 126 | def encodes(self, example: TensorText): 127 | ids, attention_masks, token_type_ids = example 128 | masked_ids, labels = mask_tokens( 129 | ids, 130 | tok=self.tok, 131 | probability=self.probability, 132 | ignore_index=IGNORE_INDEX, # PyTorch CrossEntropyLoss defaults to ignoring -100 133 | ) 134 | return torch.stack([masked_ids, labels, attention_masks, token_type_ids]) 135 | 136 | 137 | class Ignore(Transform): 138 | def encodes(self, x): 139 | return 0 140 | 141 | 142 | def dataloaders( 143 | args, cfg, tokenizer: AlbertTokenizer, tds: CalbertDataset, vds: CalbertDataset, 144 | ) -> DataLoaders: 145 | tfms = [ 146 | Tokenize(tokenizer, max_seq_len=cfg.training.max_seq_length), 147 | Mask(tok=tokenizer, probability=cfg.training.masked_lm_prob), 148 | ] 149 | 150 | train_ds = Datasets(tds, tfms=[tfms, [Ignore()]]) 151 | valid_ds = Datasets(vds, tfms=[tfms, [Ignore()]]) 152 | 153 | return DataLoaders( 154 | TfmdDL( 155 | train_ds, 156 | batch_size=args.train_batch_size, 157 | num_workers=0, 158 | device=default_device(), 159 | pin_memory=False, 160 | ), 161 | TfmdDL( 162 | valid_ds, 163 | batch_size=args.eval_batch_size, 164 | num_workers=0, 165 | device=default_device(), 166 | pin_memory=False, 167 | ), 168 | ) 169 | -------------------------------------------------------------------------------- /calbert/training.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from collections import ChainMap 3 | from typing import Tuple, List 4 | import argparse 5 | import logging 6 | from functools import partial 7 | 8 | from fastprogress import fastprogress 9 | import deepkit 10 | import torch 11 | import torch.nn as nn 12 | from fastai2.basics import ( 13 | Learner, 14 | Transform, 15 | random, 16 | noop, 17 | to_device, 18 | default_device, 19 | ) 20 | from fastai2.callback import progress, schedule, fp16 21 | from fastai2.callback.all import SaveModelCallback, ReduceLROnPlateau 22 | from fastai2.distributed import ( 23 | rank_distrib, 24 | DistributedTrainer, 25 | distrib_ctx, 26 | num_distrib, 27 | ) 28 | from fastai2.metrics import accuracy, Perplexity 29 | from fastai2.data.core import TfmdDL, DataLoaders, Datasets 30 | from fastai2.text.data import TensorText 31 | from fastai2.optimizer import Lamb 32 | 33 | from transformers import ( 34 | AlbertConfig, 35 | AlbertForMaskedLM, 36 | ) 37 | from transformers.modeling_albert import AlbertMLMHead 38 | 39 | from calbert.reporting import DeepkitCallback 40 | from calbert.dataset import CalbertDataset, Tokenize, dataloaders as build_dataloaders 41 | from calbert.model import CalbertForMaskedLM 42 | from calbert.tokenizer import AlbertTokenizer, load as load_tokenizer 43 | from calbert.utils import normalize_path 44 | 45 | fastprogress.MAX_COLS = 80 46 | 47 | log = logging.getLogger(__name__) 48 | 49 | IGNORE_INDEX = -100 # Pytorch CrossEntropyLoss defaults to ignoring -100 50 | 51 | 52 | def arguments() -> argparse.ArgumentParser: 53 | parser = argparse.ArgumentParser(description="Train ALBERT") 54 | parser.add_argument( 55 | "--tokenizer-path", 56 | type=Path, 57 | required=True, 58 | help="The path to the sentencepiece *model* (ca.{uncased|cased}.VOCABSIZE.model)", 59 | ) 60 | parser.add_argument( 61 | "--train-path", required=True, type=Path, help="Where the train.txt file lives", 62 | ) 63 | parser.add_argument( 64 | "--valid-path", required=True, type=Path, help="Where the valid.txt file lives", 65 | ) 66 | parser.add_argument( 67 | "--export-path", 68 | default=None, 69 | type=Path, 70 | help="The optional output directory where to save the model in HuggingFace format", 71 | ) 72 | 73 | parser.add_argument( 74 | "--train-batch-size", 75 | default=128, 76 | type=int, 77 | help="Batch size across all GPUs/CPUs for training.", 78 | ) 79 | parser.add_argument( 80 | "--eval-batch-size", 81 | default=128, 82 | type=int, 83 | help="Batch size across all GPUs/CPUs for evaluation.", 84 | ) 85 | parser.add_argument( 86 | "--epochs", default=1, type=int, help="Number of epochs to train", 87 | ) 88 | 89 | parser.add_argument( 90 | "--max-items", 91 | default=None, 92 | type=int, 93 | help="Number of sentence pairs to use (defaults to all)", 94 | ) 95 | 96 | parser.add_argument( 97 | "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision", 98 | ) 99 | parser.add_argument( 100 | "--deepkit", 101 | action="store_true", 102 | help="Whether to log metrics and insights to Deepkit", 103 | ) 104 | parser.add_argument( 105 | "--gpu", default=None, type=int, 106 | ) 107 | return parser 108 | 109 | 110 | def albert_config(cfg, args) -> AlbertConfig: 111 | model_name = ( 112 | f"calbert-{cfg.model.name}-{'uncased' if cfg.vocab.lowercase else 'cased'}" 113 | ) 114 | 115 | return AlbertConfig(vocab_size=cfg.vocab.max_size, **dict(cfg.model)) 116 | 117 | 118 | def initialize_model(cfg, args, tokenizer: AlbertTokenizer) -> CalbertForMaskedLM: 119 | config = albert_config(cfg, args) 120 | model = CalbertForMaskedLM(config) 121 | 122 | model_to_resize = ( 123 | model.module if hasattr(model, "module") else model 124 | ) # Take care of distributed/parallel training 125 | model_to_resize.resize_token_embeddings(len(tokenizer)) 126 | return to_device(model, default_device()) 127 | 128 | 129 | def dataloaders(args, cfg, tokenizer: AlbertTokenizer, max_items=None) -> DataLoaders: 130 | train_ds = CalbertDataset(args.train_path, max_items=max_items,) 131 | valid_ds = CalbertDataset(args.valid_path, max_items=max_items,) 132 | 133 | return build_dataloaders(args, cfg, tokenizer, train_ds, valid_ds) 134 | 135 | 136 | def get_learner( 137 | args, 138 | cfg, 139 | dataloaders: DataLoaders, 140 | model: CalbertForMaskedLM, 141 | tokenizer: AlbertTokenizer, 142 | use_deepkit: False, 143 | ) -> Learner: 144 | learner = Learner( 145 | dataloaders, 146 | model, 147 | loss_func=lambda out, _: out[0], 148 | opt_func=partial(Lamb, lr=0.1, wd=cfg.training.weight_decay), 149 | metrics=[Perplexity()], 150 | ) 151 | cbs = [] 152 | if use_deepkit: 153 | cbs.extend([DeepkitCallback(args, cfg, tokenizer)]) 154 | learner.add_cbs(cbs) 155 | return learner 156 | 157 | 158 | def set_config(experiment, key, val): 159 | if key not in ["_resolver_cache", "content", "flags"] and val is not None: 160 | if isinstance(val, int) or isinstance(val, float): 161 | experiment.set_config(key, val) 162 | else: 163 | experiment.set_config(key, str(val)) 164 | 165 | 166 | def train(args, cfg) -> Learner: 167 | if torch.cuda.is_available(): 168 | n_gpu = torch.cuda.device_count() 169 | if args.gpu is None: 170 | args.gpu = list(range(n_gpu))[0] 171 | torch.cuda.set_device(args.gpu) 172 | else: 173 | n_gpu = None 174 | args.gpu = -1 175 | 176 | use_deepkit = args.deepkit and rank_distrib() == 0 177 | if use_deepkit: 178 | experiment = deepkit.experiment() 179 | 180 | for key, val in vars(args).items(): 181 | set_config(experiment, key, val) 182 | 183 | for key, val in dict(cfg.vocab).items(): 184 | set_config(experiment, f"vocab.{key}", val) 185 | for key, val in dict(cfg.training).items(): 186 | set_config(experiment, f"training.{key}", val) 187 | for key, val in dict(cfg.model).items(): 188 | set_config(experiment, f"model.{key}", val) 189 | 190 | args.experiment = experiment 191 | 192 | run_tags = [ 193 | cfg.model.name, 194 | "uncased" if cfg.vocab.lowercase else "cased", 195 | f"sl{cfg.training.max_seq_length}", 196 | ] 197 | 198 | model_name = "-".join(run_tags[0:3]) 199 | 200 | args.tokenizer_path = normalize_path(args.tokenizer_path) 201 | args.train_path = normalize_path(args.train_path) 202 | args.valid_path = normalize_path(args.valid_path) 203 | 204 | tokenizer = load_tokenizer(cfg, args.tokenizer_path) 205 | 206 | model = initialize_model(cfg, args, tokenizer=tokenizer) 207 | 208 | dls = dataloaders(args, cfg, tokenizer=tokenizer, max_items=args.max_items) 209 | dls.to(default_device()) 210 | 211 | learn = get_learner(args, cfg, dataloaders=dls, model=model, tokenizer=tokenizer, use_deepkit=use_deepkit) 212 | 213 | if args.fp16: 214 | learn = learn.to_fp16() 215 | 216 | logging.basicConfig( 217 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", 218 | datefmt="%m/%d/%Y %H:%M:%S", 219 | level=logging.INFO, 220 | ) 221 | 222 | log.info(f"Model device is {model.device}, loader device is {dls[0].device}") 223 | 224 | if rank_distrib() == 0: 225 | log.info(f"Pretraining ALBERT: {args}") 226 | log.info(f"Configuration: {cfg.pretty()}") 227 | 228 | if args.max_items: 229 | log.info(f"Sentence pairs limited to {args.max_items}") 230 | else: 231 | log.info("Processing all sentence pairs") 232 | log.info( 233 | "GPUs: %s, 16-bits training: %s", torch.cuda.device_count(), args.fp16, 234 | ) 235 | 236 | if num_distrib() > 1: 237 | DistributedTrainer.fup = True 238 | 239 | with learn.distrib_ctx( 240 | cuda_id=args.gpu 241 | ): # distributed traing requires "-m fastai2.launch" 242 | log.info(f"Training in distributed data parallel context on GPU {args.gpu}") 243 | learn.fit_one_cycle(args.epochs, lr_max=cfg.training.learning_rate) 244 | 245 | learn.model.eval() 246 | 247 | if args.export_path: 248 | args.export_path = normalize_path(args.export_path) 249 | args.export_path.mkdir(parents=True, exist_ok=True) 250 | model_to_save = model.module if hasattr(model, "module") else model 251 | model_to_save.__class__ = AlbertForMaskedLM 252 | torch.save(model_to_save.state_dict(), args.export_path / "pytorch_model.bin") 253 | model_to_save.config.to_json_file(args.export_path / "config.json") 254 | tokenizer.save_pretrained(args.export_path) 255 | if use_deepkit: 256 | for file in args.export_path.glob("*"): 257 | args.experiment.add_output_file(str(file)) 258 | 259 | return learn 260 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # calbert ![](https://github.com/codegram/calbert/workflows/Tests/badge.svg) 2 | 3 | A Catalan ALBERT (A Lite BERT), Google's take on self-supervised learning of language representations. 4 | 5 | It's trained on a corpus of **19.557.475 sentence pairs** (containing 729 million unique words) extracted from the Catalan subset of [Inria's OSCAR](https://traces1.inria.fr/oscar/) dataset. We use the a validation set of 833.259 sentence pairs to evaluate the model. 6 | 7 | You can read the original [ALBERT paper here](https://arxiv.org/pdf/1909.11942.pdf). 8 | 9 | ## Pre-trained models 10 | 11 | They are available at HuggingFace's [Model Hub page](https://huggingface.co/codegram) 12 | 13 | | Model | Arch. | Training data | Play with it | Visualize it | 14 | | ----------------------------------- | -------------- | ---------------------- | ------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | 15 | | `codegram` / `calbert-tiny-uncased` | Tiny (uncased) | OSCAR (4.3 GB of text) | [Card on Model Hub](https://huggingface.co/codegram/calbert-tiny-uncased) | [Visualize in exBERT](https://huggingface.co/exbert/?model=codegram/calbert-tiny-uncased&modelKind=bidirectional&sentence=M%27agradaria%20força%20saber-ne%20més) | 16 | | `codegram` / `calbert-base-uncased` | Base (uncased) | OSCAR (4.3 GB of text) | [Card on Model Hub](https://huggingface.co/codegram/calbert-base-uncased) | [Visualize in exBERT](https://huggingface.co/exbert/?model=codegram/calbert-base-uncased&modelKind=bidirectional&sentence=M%27agradaria%20força%20saber-ne%20més) | 17 | 18 | ## How to use it? 19 | 20 | You just need the `transformers` library. Nothing else to clone or install. 21 | 22 | To choose which model version to use (`tiny`, or `base`), consider that smaller models are less powerful, but nimbler and less resource-hungry to run. 23 | 24 | ```bash 25 | pip install transformers 26 | ``` 27 | 28 | ```python 29 | from transformers import AutoModel, AutoTokenizer 30 | 31 | tokenizer = AutoTokenizer.from_pretrained("codegram/calbert-base-uncased") 32 | model = AutoModel.from_pretrained("codegram/calbert-base-uncased") 33 | 34 | model.eval() # disable dropout 35 | ``` 36 | 37 | Now onto the two main use cases that you can do. 38 | 39 | ### Predicting a missing word in a Catalan sentence 40 | 41 | This is the simplest use case, yet not the most useful. Still, here it is! Whatever words you want to mask, use the special token `[MASK]` to indicate it. The model will output the most likely candidates for the masked word. 42 | 43 | ```python 44 | from transformers import pipeline 45 | 46 | calbert_fill_mask = pipeline("fill-mask", model="codegram/calbert-base-uncased", tokenizer="codegram/calbert-base-uncased") 47 | results = calbert_fill_mask("M'agrada [MASK] això") 48 | # results 49 | # [{'sequence': "[CLS] m'agrada molt aixo[SEP]", 'score': 0.614592969417572, 'token': 61}, 50 | # {'sequence': "[CLS] m'agrada moltíssim aixo[SEP]", 'score': 0.06058056280016899, 'token': 4867}, 51 | # {'sequence': "[CLS] m'agrada més aixo[SEP]", 'score': 0.017195818945765495, 'token': 43}, 52 | # {'sequence': "[CLS] m'agrada llegir aixo[SEP]", 'score': 0.016321714967489243, 'token': 684}, 53 | # {'sequence': "[CLS] m'agrada escriure aixo[SEP]", 'score': 0.012185849249362946, 'token': 1306}] 54 | ``` 55 | 56 | ### Extracting a feature vector from a Catalan sentence or document 57 | 58 | The extracted feature vector can be used to index documents as dense vectors in ElasticSearch for example, and perform similarity searches. 59 | 60 | Another use case is _Natural Language Understanding_ --using these vectors as abstract representations of documents/sentences that can be used as input to other downstream models such as classifiers. 61 | 62 | Here's how to extract the vectors from a sentence or document: 63 | 64 | ```python 65 | import torch 66 | # Tokenize in sub-words with SentencePiece 67 | tokenized_sentence = tokenizer.tokenize("M'és una mica igual") 68 | # ['▁m', "'", 'es', '▁una', '▁mica', '▁igual'] 69 | 70 | # 1-hot encode and add special starting and end tokens 71 | encoded_sentence = tokenizer.encode(tokenized_sentence) 72 | # [2, 109, 7, 71, 36, 371, 1103, 3] 73 | # NB: Can be done in one step : tokenize.encode("M'és una mica igual") 74 | 75 | # Feed tokens to Calbert as a torch tensor (batch dim 1) 76 | encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0) 77 | embeddings, _ = model(encoded_sentence) 78 | embeddings.size() 79 | # torch.Size([1, 8, 768]) 80 | embeddings.detach() 81 | # tensor([[[-0.0261, 0.1166, -0.1075, ..., -0.0368, 0.0193, 0.0017], 82 | # [ 0.1289, -0.2252, 0.9881, ..., -0.1353, 0.3534, 0.0734], 83 | # [-0.0328, -1.2364, 0.9466, ..., 0.3455, 0.7010, -0.2085], 84 | # ..., 85 | # [ 0.0397, -1.0228, -0.2239, ..., 0.2932, 0.1248, 0.0813], 86 | # [-0.0261, 0.1165, -0.1074, ..., -0.0368, 0.0193, 0.0017], 87 | # [-0.1934, -0.2357, -0.2554, ..., 0.1831, 0.6085, 0.1421]]]) 88 | ``` 89 | 90 | ## Credits 91 | 92 | This is part of the applied research we do at [Codegram](https://codegram.com) (who is to thank for the time and the compute!). 93 | 94 | This would have been a ton of pain to build without [Huggingface](http://huggingface.co)'s powerful [transformers](http://github.com/huggingface/transformers) and [tokenizers](http://github.com/huggingface/tokenizers) libraries. Thank you for making NLP actually nice to work with! 95 | 96 | Also, thanks to Google Research for creating and open-sourcing [ALBERT](https://github.com/google-research/ALBERT) in the first place. 97 | 98 | ## What on earth is an ALBERT 99 | 100 | ALBERT is a Language Model, that is, a neural network that can learn sequences with certain structure, such as sentences in natural language (but not only natural language!). 101 | 102 | But how do they learn language? Different language models are trained with different **pretext tasks**, namely challenges that you give them so that they can learn how language works. The idea is that in order to get reaosnably good at this one task they must indirectly learn the grammar of the language, and even its semantics and style. 103 | 104 | Traditional (also known as **causal**) language models are usually trained with the task of **predicting the next word** in a sequence, like this: 105 | 106 | - Input: "the dog was eating very [BLANK]" 107 | - Correct output: "quickly" 108 | 109 | However, ALBERT is of another family called **masked language models**. In this family, the pretext task they have to learn is similar, but instead of always predicting the last word in a sequence, some words in the sentence are randomly turned into blanks (or **masked**), like this: 110 | 111 | - Input: "the [BLANK] was eating very [BLANK]" 112 | - Correct output: "dog", "quickly" 113 | 114 | This task is a little more difficult, and more importantly, requires understanding the context surrounding a blank much better. 115 | 116 | ### How are those pretext tasks anything more than a pointless waste of electricity 117 | 118 | Turns out, once a language model gets really, really good at this rather pointless pretext task, it can be easily repurposed for much more interesting tasks. 119 | 120 | Once a language learns grammar and semantics, it can become a very good classifier of sentences, and even whole documents, for example. 121 | 122 | If you then teach it to classify tweets or documents into categories (or identify sentiment, or toxicity for example) it no longer sees just a bunch of confusing characters, but rather it's "reading" the document at a much more abstract level, so it can "make sense" of it much more readily. (Note the air quotes, this is not magic but it is probably the closest thing.) 123 | 124 | ### Why ALBERT in Catalan 125 | 126 | Because there are no language models in Catalan! And there's a lot of Catalan text to be processed. (In Catalonia). 127 | 128 | ## Setup 129 | 130 | For dependency management we use [Poetry](https://python-poetry.org) (and Docker of course). 131 | 132 | ```bash 133 | pip install -U poetry 134 | poetry install 135 | poetry shell 136 | ``` 137 | 138 | The production image to train the model is under `docker/`, and it's called `codegram/calbert`. It contains all the latest dependencies, but no code -- Deepkit will ship the code in every experiment (read on to learn more about Deepkit). 139 | 140 | ## Dataset and tokenizers 141 | 142 | All config lives under `config`. There you can control parameters related to training, tokenizing, and everything, and even choose which version of the model to train. 143 | 144 | All configuration is overridable, since it's [Hydra](https://hydra.cc) configuration. Check their docs. 145 | 146 | ### Getting the dataset 147 | 148 | A tiny subset of the dataset lives under `dist/data` so that you can train a small model and do quick experiments locally. 149 | 150 | To download the full dataset and automatically split it in training / validation, just run this command: 151 | 152 | ```bash 153 | python -m calbert download_data --out-dir dataset 154 | ``` 155 | 156 | ### Re-training the tokenizers 157 | 158 | The pretrained tokenizers are at `dist/tokenizer-{cased,uncased}`. They are trained only on the full training set. 159 | 160 | If you want to re-train the tokenizer (by default uncased): 161 | 162 | ```bash 163 | python -m calbert train_tokenizer --input-file dataset/train.txt --out-dir tokenizer 164 | ``` 165 | 166 | To train the cased one, just override the appropriate Hydra configuration: 167 | 168 | ```bash 169 | python -m calbert train_tokenizer --input-file dataset/train.txt --out-dir tokenizer vocab.lowercase=False 170 | ``` 171 | 172 | ## Training and running experiments 173 | 174 | We use [Deepkit](https://deepkit.ai) to run and keep track of experiments. Download it for free for your platform of choice if you'd like to run locally, or check their docs to run against their free community server. 175 | 176 | ### Training a test model 177 | 178 | To make sure everything works, let's train a test model with the actual Docker image in Deepkit: 179 | 180 | ```bash 181 | deepkit run test.deepkit.yml 182 | ``` 183 | 184 | By default it will train it in your local Deepkit instance, using your CPU. Read [their docs](https://deepkit.ai/documentation/getting-started) to learn how to customize your runs. 185 | 186 | ### Training on a cluster 187 | 188 | Configure a cluster in your local Deepkit with at least one machine with a GPU. 189 | 190 | ```bash 191 | deepkit run --cluster 192 | ``` 193 | 194 | ### Sharing the model with the world 195 | 196 | Once you have a trained model, you can export it to be used as a HuggingFace transformers standard model. 197 | 198 | For example, let's imagine you trained a `base-uncased` model and you want to export it. 199 | 200 | Download the `export` folder from the outputs in your Deepkit run, and run: 201 | 202 | ```bash 203 | mv export calbert-base-uncased 204 | transformers-cli login 205 | transformers-cli upload export 206 | ``` 207 | 208 | ### Running tests 209 | 210 | ```bash 211 | make test 212 | ``` 213 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | category = "dev" 3 | description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." 4 | name = "appdirs" 5 | optional = false 6 | python-versions = "*" 7 | version = "1.4.4" 8 | 9 | [[package]] 10 | category = "main" 11 | description = "Disable App Nap on OS X 10.9" 12 | marker = "sys_platform == \"darwin\" or platform_system == \"Darwin\"" 13 | name = "appnope" 14 | optional = false 15 | python-versions = "*" 16 | version = "0.1.0" 17 | 18 | [[package]] 19 | category = "dev" 20 | description = "Atomic file writes." 21 | marker = "sys_platform == \"win32\"" 22 | name = "atomicwrites" 23 | optional = false 24 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 25 | version = "1.4.0" 26 | 27 | [[package]] 28 | category = "dev" 29 | description = "Classes Without Boilerplate" 30 | name = "attrs" 31 | optional = false 32 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 33 | version = "19.3.0" 34 | 35 | [package.extras] 36 | azure-pipelines = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "pytest-azurepipelines"] 37 | dev = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "pre-commit"] 38 | docs = ["sphinx", "zope.interface"] 39 | tests = ["coverage", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] 40 | 41 | [[package]] 42 | category = "main" 43 | description = "Specifications for callback functions passed in to an API" 44 | name = "backcall" 45 | optional = false 46 | python-versions = "*" 47 | version = "0.1.0" 48 | 49 | [[package]] 50 | category = "dev" 51 | description = "The uncompromising code formatter." 52 | name = "black" 53 | optional = false 54 | python-versions = ">=3.6" 55 | version = "19.10b0" 56 | 57 | [package.dependencies] 58 | appdirs = "*" 59 | attrs = ">=18.1.0" 60 | click = ">=6.5" 61 | pathspec = ">=0.6,<1" 62 | regex = "*" 63 | toml = ">=0.9.4" 64 | typed-ast = ">=1.4.0" 65 | 66 | [package.extras] 67 | d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] 68 | 69 | [[package]] 70 | category = "main" 71 | description = "The Blis BLAS-like linear algebra library, as a self-contained C-extension." 72 | name = "blis" 73 | optional = false 74 | python-versions = "*" 75 | version = "0.4.1" 76 | 77 | [package.dependencies] 78 | numpy = ">=1.15.0" 79 | 80 | [[package]] 81 | category = "main" 82 | description = "The AWS SDK for Python" 83 | name = "boto3" 84 | optional = false 85 | python-versions = "*" 86 | version = "1.13.23" 87 | 88 | [package.dependencies] 89 | botocore = ">=1.16.23,<1.17.0" 90 | jmespath = ">=0.7.1,<1.0.0" 91 | s3transfer = ">=0.3.0,<0.4.0" 92 | 93 | [[package]] 94 | category = "main" 95 | description = "Low-level, data-driven core of boto 3." 96 | name = "botocore" 97 | optional = false 98 | python-versions = "*" 99 | version = "1.16.23" 100 | 101 | [package.dependencies] 102 | docutils = ">=0.10,<0.16" 103 | jmespath = ">=0.7.1,<1.0.0" 104 | python-dateutil = ">=2.1,<3.0.0" 105 | 106 | [package.dependencies.urllib3] 107 | python = "<3.4.0 || >=3.5.0" 108 | version = ">=1.20,<1.26" 109 | 110 | [[package]] 111 | category = "main" 112 | description = "Super lightweight function registries for your library" 113 | name = "catalogue" 114 | optional = false 115 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" 116 | version = "1.0.0" 117 | 118 | [package.dependencies] 119 | [package.dependencies.importlib-metadata] 120 | python = "<3.8" 121 | version = ">=0.20" 122 | 123 | [[package]] 124 | category = "main" 125 | description = "Python package for providing Mozilla's CA Bundle." 126 | name = "certifi" 127 | optional = false 128 | python-versions = "*" 129 | version = "2020.4.5.1" 130 | 131 | [[package]] 132 | category = "main" 133 | description = "Universal encoding detector for Python 2 and 3" 134 | name = "chardet" 135 | optional = false 136 | python-versions = "*" 137 | version = "3.0.4" 138 | 139 | [[package]] 140 | category = "main" 141 | description = "Composable command line interface toolkit" 142 | name = "click" 143 | optional = false 144 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 145 | version = "7.1.2" 146 | 147 | [[package]] 148 | category = "main" 149 | description = "Cross-platform colored terminal text." 150 | marker = "sys_platform == \"win32\"" 151 | name = "colorama" 152 | optional = false 153 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 154 | version = "0.4.3" 155 | 156 | [[package]] 157 | category = "main" 158 | description = "Log formatting with colors!" 159 | name = "colorlog" 160 | optional = false 161 | python-versions = "*" 162 | version = "4.1.0" 163 | 164 | [package.dependencies] 165 | colorama = "*" 166 | 167 | [[package]] 168 | category = "main" 169 | description = "Composable style cycles" 170 | name = "cycler" 171 | optional = false 172 | python-versions = "*" 173 | version = "0.10.0" 174 | 175 | [package.dependencies] 176 | six = "*" 177 | 178 | [[package]] 179 | category = "main" 180 | description = "Manage calls to calloc/free through Cython" 181 | name = "cymem" 182 | optional = false 183 | python-versions = "*" 184 | version = "2.0.3" 185 | 186 | [[package]] 187 | category = "main" 188 | description = "Decorators for Humans" 189 | name = "decorator" 190 | optional = false 191 | python-versions = ">=2.6, !=3.0.*, !=3.1.*" 192 | version = "4.4.2" 193 | 194 | [[package]] 195 | category = "main" 196 | description = "Python SDK for Deepkit" 197 | name = "deepkit" 198 | optional = false 199 | python-versions = "*" 200 | version = "1.0.5" 201 | 202 | [package.dependencies] 203 | Pillow = ">=4.0.0" 204 | PyYAML = ">=5.0.0" 205 | numpy = "*" 206 | psutil = ">=5.7.0" 207 | rx = ">=1.5" 208 | typedload = ">=1.20" 209 | websockets = ">=8.1" 210 | 211 | [package.extras] 212 | pytorch = ["torch"] 213 | 214 | [[package]] 215 | category = "main" 216 | description = "Docutils -- Python Documentation Utilities" 217 | name = "docutils" 218 | optional = false 219 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 220 | version = "0.15.2" 221 | 222 | [[package]] 223 | category = "main" 224 | description = "Version 2 of the fastai library" 225 | name = "fastai2" 226 | optional = false 227 | python-versions = ">=3.6" 228 | version = "0.0.17" 229 | 230 | [package.dependencies] 231 | fastcore = "*" 232 | fastprogress = ">=0.1.22" 233 | matplotlib = "*" 234 | pandas = "*" 235 | pillow = "*" 236 | pyyaml = "*" 237 | requests = "*" 238 | scikit-learn = "*" 239 | scipy = "*" 240 | spacy = "*" 241 | torch = ">=1.3.0" 242 | torchvision = ">=0.5" 243 | 244 | [package.extras] 245 | dev = ["nbdev"] 246 | 247 | [[package]] 248 | category = "main" 249 | description = "Python supercharged for fastai development" 250 | name = "fastcore" 251 | optional = false 252 | python-versions = ">=3.6" 253 | version = "0.1.17" 254 | 255 | [package.dependencies] 256 | numpy = "*" 257 | 258 | [package.extras] 259 | dev = ["nbdev", "matplotlib", "pillow", "torch", "pandas"] 260 | 261 | [[package]] 262 | category = "main" 263 | description = "A nested progress with plotting options for fastai" 264 | name = "fastprogress" 265 | optional = false 266 | python-versions = ">=3.6" 267 | version = "0.2.3" 268 | 269 | [[package]] 270 | category = "main" 271 | description = "A fast way to turn your python function into a script" 272 | name = "fastscript" 273 | optional = false 274 | python-versions = ">=3.6" 275 | version = "0.1.4" 276 | 277 | [[package]] 278 | category = "main" 279 | description = "A platform independent file lock." 280 | name = "filelock" 281 | optional = false 282 | python-versions = "*" 283 | version = "3.0.12" 284 | 285 | [[package]] 286 | category = "dev" 287 | description = "the modular source code checker: pep8 pyflakes and co" 288 | name = "flake8" 289 | optional = false 290 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" 291 | version = "3.8.2" 292 | 293 | [package.dependencies] 294 | mccabe = ">=0.6.0,<0.7.0" 295 | pycodestyle = ">=2.6.0a1,<2.7.0" 296 | pyflakes = ">=2.2.0,<2.3.0" 297 | 298 | [package.dependencies.importlib-metadata] 299 | python = "<3.8" 300 | version = "*" 301 | 302 | [[package]] 303 | category = "main" 304 | description = "Clean single-source support for Python 3 and 2" 305 | name = "future" 306 | optional = false 307 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 308 | version = "0.18.2" 309 | 310 | [[package]] 311 | category = "main" 312 | description = "Enables colorlog for Hydra apps" 313 | name = "hydra-colorlog" 314 | optional = false 315 | python-versions = "*" 316 | version = "0.1.4" 317 | 318 | [package.dependencies] 319 | colorlog = "*" 320 | hydra-core = "*" 321 | 322 | [[package]] 323 | category = "main" 324 | description = "A framework for elegantly configuring complex applications" 325 | name = "hydra-core" 326 | optional = false 327 | python-versions = "*" 328 | version = "0.11.3" 329 | 330 | [package.dependencies] 331 | omegaconf = ">=1.4,<1.5" 332 | 333 | [package.extras] 334 | dev = ["black", "coverage", "flake8", "flake8-copyright", "nox", "pre-commit", "pytest", "setuptools", "towncrier", "twine"] 335 | 336 | [[package]] 337 | category = "main" 338 | description = "Internationalized Domain Names in Applications (IDNA)" 339 | name = "idna" 340 | optional = false 341 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 342 | version = "2.9" 343 | 344 | [[package]] 345 | category = "main" 346 | description = "Read metadata from Python packages" 347 | marker = "python_version < \"3.8\"" 348 | name = "importlib-metadata" 349 | optional = false 350 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" 351 | version = "1.6.0" 352 | 353 | [package.dependencies] 354 | zipp = ">=0.5" 355 | 356 | [package.extras] 357 | docs = ["sphinx", "rst.linker"] 358 | testing = ["packaging", "importlib-resources"] 359 | 360 | [[package]] 361 | category = "main" 362 | description = "IPython Kernel for Jupyter" 363 | name = "ipykernel" 364 | optional = false 365 | python-versions = ">=3.5" 366 | version = "5.3.0" 367 | 368 | [package.dependencies] 369 | appnope = "*" 370 | ipython = ">=5.0.0" 371 | jupyter-client = "*" 372 | tornado = ">=4.2" 373 | traitlets = ">=4.1.0" 374 | 375 | [package.extras] 376 | test = ["pytest (!=5.3.4)", "pytest-cov", "flaky", "nose"] 377 | 378 | [[package]] 379 | category = "main" 380 | description = "IPython: Productive Interactive Computing" 381 | name = "ipython" 382 | optional = false 383 | python-versions = ">=3.6" 384 | version = "7.15.0" 385 | 386 | [package.dependencies] 387 | appnope = "*" 388 | backcall = "*" 389 | colorama = "*" 390 | decorator = "*" 391 | jedi = ">=0.10" 392 | pexpect = "*" 393 | pickleshare = "*" 394 | prompt-toolkit = ">=2.0.0,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.1.0" 395 | pygments = "*" 396 | setuptools = ">=18.5" 397 | traitlets = ">=4.2" 398 | 399 | [package.extras] 400 | all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.14)", "pygments", "qtconsole", "requests", "testpath"] 401 | doc = ["Sphinx (>=1.3)"] 402 | kernel = ["ipykernel"] 403 | nbconvert = ["nbconvert"] 404 | nbformat = ["nbformat"] 405 | notebook = ["notebook", "ipywidgets"] 406 | parallel = ["ipyparallel"] 407 | qtconsole = ["qtconsole"] 408 | test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.14)"] 409 | 410 | [[package]] 411 | category = "main" 412 | description = "Vestigial utilities from IPython" 413 | name = "ipython-genutils" 414 | optional = false 415 | python-versions = "*" 416 | version = "0.2.0" 417 | 418 | [[package]] 419 | category = "main" 420 | description = "An autocompletion tool for Python that can be used for text editors." 421 | name = "jedi" 422 | optional = false 423 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 424 | version = "0.17.0" 425 | 426 | [package.dependencies] 427 | parso = ">=0.7.0" 428 | 429 | [package.extras] 430 | qa = ["flake8 (3.7.9)"] 431 | testing = ["colorama", "docopt", "pytest (>=3.9.0,<5.0.0)"] 432 | 433 | [[package]] 434 | category = "main" 435 | description = "JSON Matching Expressions" 436 | name = "jmespath" 437 | optional = false 438 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 439 | version = "0.10.0" 440 | 441 | [[package]] 442 | category = "main" 443 | description = "Lightweight pipelining: using Python functions as pipeline jobs." 444 | name = "joblib" 445 | optional = false 446 | python-versions = ">=3.6" 447 | version = "0.15.1" 448 | 449 | [[package]] 450 | category = "main" 451 | description = "Jupyter protocol implementation and client libraries" 452 | name = "jupyter-client" 453 | optional = false 454 | python-versions = ">=3.5" 455 | version = "6.1.3" 456 | 457 | [package.dependencies] 458 | jupyter-core = ">=4.6.0" 459 | python-dateutil = ">=2.1" 460 | pyzmq = ">=13" 461 | tornado = ">=4.1" 462 | traitlets = "*" 463 | 464 | [package.extras] 465 | test = ["ipykernel", "ipython", "mock", "pytest"] 466 | 467 | [[package]] 468 | category = "main" 469 | description = "Jupyter core package. A base package on which Jupyter projects rely." 470 | name = "jupyter-core" 471 | optional = false 472 | python-versions = "!=3.0,!=3.1,!=3.2,!=3.3,!=3.4,>=2.7" 473 | version = "4.6.3" 474 | 475 | [package.dependencies] 476 | pywin32 = ">=1.0" 477 | traitlets = "*" 478 | 479 | [[package]] 480 | category = "main" 481 | description = "A fast implementation of the Cassowary constraint solver" 482 | name = "kiwisolver" 483 | optional = false 484 | python-versions = ">=3.6" 485 | version = "1.2.0" 486 | 487 | [[package]] 488 | category = "main" 489 | description = "Python plotting package" 490 | name = "matplotlib" 491 | optional = false 492 | python-versions = ">=3.6" 493 | version = "3.2.1" 494 | 495 | [package.dependencies] 496 | cycler = ">=0.10" 497 | kiwisolver = ">=1.0.1" 498 | numpy = ">=1.11" 499 | pyparsing = ">=2.0.1,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" 500 | python-dateutil = ">=2.1" 501 | 502 | [[package]] 503 | category = "dev" 504 | description = "McCabe checker, plugin for flake8" 505 | name = "mccabe" 506 | optional = false 507 | python-versions = "*" 508 | version = "0.6.1" 509 | 510 | [[package]] 511 | category = "dev" 512 | description = "More routines for operating on iterables, beyond itertools" 513 | name = "more-itertools" 514 | optional = false 515 | python-versions = ">=3.5" 516 | version = "8.3.0" 517 | 518 | [[package]] 519 | category = "main" 520 | description = "Cython bindings for MurmurHash" 521 | name = "murmurhash" 522 | optional = false 523 | python-versions = "*" 524 | version = "1.0.2" 525 | 526 | [[package]] 527 | category = "main" 528 | description = "NumPy is the fundamental package for array computing with Python." 529 | name = "numpy" 530 | optional = false 531 | python-versions = ">=3.5" 532 | version = "1.18.5" 533 | 534 | [[package]] 535 | category = "main" 536 | description = "A flexible configuration library" 537 | name = "omegaconf" 538 | optional = false 539 | python-versions = "*" 540 | version = "1.4.1" 541 | 542 | [package.dependencies] 543 | PyYAML = "*" 544 | six = "*" 545 | 546 | [package.extras] 547 | coverage = ["coveralls"] 548 | dev = ["black", "coveralls", "flake8", "pre-commit", "pytest", "nox", "towncrier", "twine"] 549 | dev27 = ["nox", "pre-commit", "pytest", "twine", "coveralls", "flake8"] 550 | lint = ["black", "flake8"] 551 | 552 | [[package]] 553 | category = "dev" 554 | description = "Core utilities for Python packages" 555 | name = "packaging" 556 | optional = false 557 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 558 | version = "20.4" 559 | 560 | [package.dependencies] 561 | pyparsing = ">=2.0.2" 562 | six = "*" 563 | 564 | [[package]] 565 | category = "main" 566 | description = "Powerful data structures for data analysis, time series, and statistics" 567 | name = "pandas" 568 | optional = false 569 | python-versions = ">=3.6.1" 570 | version = "1.0.4" 571 | 572 | [package.dependencies] 573 | numpy = ">=1.13.3" 574 | python-dateutil = ">=2.6.1" 575 | pytz = ">=2017.2" 576 | 577 | [package.extras] 578 | test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"] 579 | 580 | [[package]] 581 | category = "main" 582 | description = "A Python Parser" 583 | name = "parso" 584 | optional = false 585 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 586 | version = "0.7.0" 587 | 588 | [package.extras] 589 | testing = ["docopt", "pytest (>=3.0.7)"] 590 | 591 | [[package]] 592 | category = "dev" 593 | description = "Utility library for gitignore style pattern matching of file paths." 594 | name = "pathspec" 595 | optional = false 596 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 597 | version = "0.8.0" 598 | 599 | [[package]] 600 | category = "main" 601 | description = "Pexpect allows easy control of interactive console applications." 602 | marker = "sys_platform != \"win32\"" 603 | name = "pexpect" 604 | optional = false 605 | python-versions = "*" 606 | version = "4.8.0" 607 | 608 | [package.dependencies] 609 | ptyprocess = ">=0.5" 610 | 611 | [[package]] 612 | category = "main" 613 | description = "Tiny 'shelve'-like database with concurrency support" 614 | name = "pickleshare" 615 | optional = false 616 | python-versions = "*" 617 | version = "0.7.5" 618 | 619 | [[package]] 620 | category = "main" 621 | description = "Python Imaging Library (Fork)" 622 | name = "pillow" 623 | optional = false 624 | python-versions = ">=3.5" 625 | version = "7.1.2" 626 | 627 | [[package]] 628 | category = "main" 629 | description = "The smartest command line arguments parser in the world" 630 | name = "plac" 631 | optional = false 632 | python-versions = "*" 633 | version = "1.1.3" 634 | 635 | [[package]] 636 | category = "dev" 637 | description = "plugin and hook calling mechanisms for python" 638 | name = "pluggy" 639 | optional = false 640 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 641 | version = "0.13.1" 642 | 643 | [package.dependencies] 644 | [package.dependencies.importlib-metadata] 645 | python = "<3.8" 646 | version = ">=0.12" 647 | 648 | [package.extras] 649 | dev = ["pre-commit", "tox"] 650 | 651 | [[package]] 652 | category = "main" 653 | description = "Cython hash table that trusts the keys are pre-hashed" 654 | name = "preshed" 655 | optional = false 656 | python-versions = "*" 657 | version = "3.0.2" 658 | 659 | [package.dependencies] 660 | cymem = ">=2.0.2,<2.1.0" 661 | murmurhash = ">=0.28.0,<1.1.0" 662 | 663 | [[package]] 664 | category = "main" 665 | description = "Library for building powerful interactive command lines in Python" 666 | name = "prompt-toolkit" 667 | optional = false 668 | python-versions = ">=3.6.1" 669 | version = "3.0.5" 670 | 671 | [package.dependencies] 672 | wcwidth = "*" 673 | 674 | [[package]] 675 | category = "main" 676 | description = "Cross-platform lib for process and system monitoring in Python." 677 | name = "psutil" 678 | optional = false 679 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 680 | version = "5.7.0" 681 | 682 | [package.extras] 683 | enum = ["enum34"] 684 | 685 | [[package]] 686 | category = "main" 687 | description = "Run a subprocess in a pseudo terminal" 688 | marker = "sys_platform != \"win32\"" 689 | name = "ptyprocess" 690 | optional = false 691 | python-versions = "*" 692 | version = "0.6.0" 693 | 694 | [[package]] 695 | category = "dev" 696 | description = "library with cross-python path, ini-parsing, io, code, log facilities" 697 | name = "py" 698 | optional = false 699 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 700 | version = "1.8.1" 701 | 702 | [[package]] 703 | category = "dev" 704 | description = "Python style guide checker" 705 | name = "pycodestyle" 706 | optional = false 707 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 708 | version = "2.6.0" 709 | 710 | [[package]] 711 | category = "dev" 712 | description = "passive checker of Python programs" 713 | name = "pyflakes" 714 | optional = false 715 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 716 | version = "2.2.0" 717 | 718 | [[package]] 719 | category = "main" 720 | description = "Pygments is a syntax highlighting package written in Python." 721 | name = "pygments" 722 | optional = false 723 | python-versions = ">=3.5" 724 | version = "2.6.1" 725 | 726 | [[package]] 727 | category = "main" 728 | description = "Python parsing module" 729 | name = "pyparsing" 730 | optional = false 731 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 732 | version = "2.4.7" 733 | 734 | [[package]] 735 | category = "dev" 736 | description = "pytest: simple powerful testing with Python" 737 | name = "pytest" 738 | optional = false 739 | python-versions = ">=3.5" 740 | version = "5.4.3" 741 | 742 | [package.dependencies] 743 | atomicwrites = ">=1.0" 744 | attrs = ">=17.4.0" 745 | colorama = "*" 746 | more-itertools = ">=4.0.0" 747 | packaging = "*" 748 | pluggy = ">=0.12,<1.0" 749 | py = ">=1.5.0" 750 | wcwidth = "*" 751 | 752 | [package.dependencies.importlib-metadata] 753 | python = "<3.8" 754 | version = ">=0.12" 755 | 756 | [package.extras] 757 | checkqa-mypy = ["mypy (v0.761)"] 758 | testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] 759 | 760 | [[package]] 761 | category = "dev" 762 | description = "Concurrently execute test cases with multithread, multiprocess and gevent" 763 | name = "pytest-concurrent" 764 | optional = false 765 | python-versions = "*" 766 | version = "0.2.2" 767 | 768 | [package.dependencies] 769 | psutil = ">=5.2.2" 770 | pytest = ">=3.1.1" 771 | 772 | [[package]] 773 | category = "dev" 774 | description = "A testdox format reporter for pytest" 775 | name = "pytest-testdox" 776 | optional = false 777 | python-versions = "*" 778 | version = "1.2.1" 779 | 780 | [package.dependencies] 781 | pytest = ">=3.7.0" 782 | six = ">=1.11.0" 783 | 784 | [[package]] 785 | category = "main" 786 | description = "Extensions to the standard Python datetime module" 787 | name = "python-dateutil" 788 | optional = false 789 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" 790 | version = "2.8.1" 791 | 792 | [package.dependencies] 793 | six = ">=1.5" 794 | 795 | [[package]] 796 | category = "main" 797 | description = "World timezone definitions, modern and historical" 798 | name = "pytz" 799 | optional = false 800 | python-versions = "*" 801 | version = "2020.1" 802 | 803 | [[package]] 804 | category = "main" 805 | description = "Python for Window Extensions" 806 | marker = "sys_platform == \"win32\"" 807 | name = "pywin32" 808 | optional = false 809 | python-versions = "*" 810 | version = "227" 811 | 812 | [[package]] 813 | category = "main" 814 | description = "YAML parser and emitter for Python" 815 | name = "pyyaml" 816 | optional = false 817 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 818 | version = "5.3.1" 819 | 820 | [[package]] 821 | category = "main" 822 | description = "Python bindings for 0MQ" 823 | name = "pyzmq" 824 | optional = false 825 | python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*" 826 | version = "19.0.1" 827 | 828 | [[package]] 829 | category = "main" 830 | description = "Alternative regular expression module, to replace re." 831 | name = "regex" 832 | optional = false 833 | python-versions = "*" 834 | version = "2020.5.14" 835 | 836 | [[package]] 837 | category = "main" 838 | description = "Python HTTP for Humans." 839 | name = "requests" 840 | optional = false 841 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 842 | version = "2.23.0" 843 | 844 | [package.dependencies] 845 | certifi = ">=2017.4.17" 846 | chardet = ">=3.0.2,<4" 847 | idna = ">=2.5,<3" 848 | urllib3 = ">=1.21.1,<1.25.0 || >1.25.0,<1.25.1 || >1.25.1,<1.26" 849 | 850 | [package.extras] 851 | security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] 852 | socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7)", "win-inet-pton"] 853 | 854 | [[package]] 855 | category = "dev" 856 | description = "a python refactoring library..." 857 | name = "rope" 858 | optional = false 859 | python-versions = "*" 860 | version = "0.16.0" 861 | 862 | [package.extras] 863 | dev = ["pytest"] 864 | 865 | [[package]] 866 | category = "main" 867 | description = "Reactive Extensions (Rx) for Python" 868 | name = "rx" 869 | optional = false 870 | python-versions = ">=3.6.0" 871 | version = "3.1.0" 872 | 873 | [[package]] 874 | category = "main" 875 | description = "An Amazon S3 Transfer Manager" 876 | name = "s3transfer" 877 | optional = false 878 | python-versions = "*" 879 | version = "0.3.3" 880 | 881 | [package.dependencies] 882 | botocore = ">=1.12.36,<2.0a.0" 883 | 884 | [[package]] 885 | category = "main" 886 | description = "SacreMoses" 887 | name = "sacremoses" 888 | optional = false 889 | python-versions = "*" 890 | version = "0.0.43" 891 | 892 | [package.dependencies] 893 | click = "*" 894 | joblib = "*" 895 | regex = "*" 896 | six = "*" 897 | tqdm = "*" 898 | 899 | [[package]] 900 | category = "main" 901 | description = "A set of python modules for machine learning and data mining" 902 | name = "scikit-learn" 903 | optional = false 904 | python-versions = ">=3.6" 905 | version = "0.23.1" 906 | 907 | [package.dependencies] 908 | joblib = ">=0.11" 909 | numpy = ">=1.13.3" 910 | scipy = ">=0.19.1" 911 | threadpoolctl = ">=2.0.0" 912 | 913 | [package.extras] 914 | alldeps = ["numpy (>=1.13.3)", "scipy (>=0.19.1)"] 915 | 916 | [[package]] 917 | category = "main" 918 | description = "SciPy: Scientific Library for Python" 919 | name = "scipy" 920 | optional = false 921 | python-versions = ">=3.5" 922 | version = "1.4.1" 923 | 924 | [package.dependencies] 925 | numpy = ">=1.13.3" 926 | 927 | [[package]] 928 | category = "main" 929 | description = "SentencePiece python wrapper" 930 | name = "sentencepiece" 931 | optional = false 932 | python-versions = "*" 933 | version = "0.1.91" 934 | 935 | [[package]] 936 | category = "main" 937 | description = "Python 2 and 3 compatibility utilities" 938 | name = "six" 939 | optional = false 940 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 941 | version = "1.15.0" 942 | 943 | [[package]] 944 | category = "main" 945 | description = "Industrial-strength Natural Language Processing (NLP) in Python" 946 | name = "spacy" 947 | optional = false 948 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" 949 | version = "2.2.4" 950 | 951 | [package.dependencies] 952 | blis = ">=0.4.0,<0.5.0" 953 | catalogue = ">=0.0.7,<1.1.0" 954 | cymem = ">=2.0.2,<2.1.0" 955 | murmurhash = ">=0.28.0,<1.1.0" 956 | numpy = ">=1.15.0" 957 | plac = ">=0.9.6,<1.2.0" 958 | preshed = ">=3.0.2,<3.1.0" 959 | requests = ">=2.13.0,<3.0.0" 960 | setuptools = "*" 961 | srsly = ">=1.0.2,<1.1.0" 962 | thinc = "7.4.0" 963 | tqdm = ">=4.38.0,<5.0.0" 964 | wasabi = ">=0.4.0,<1.1.0" 965 | 966 | [package.extras] 967 | cuda = ["cupy (>=5.0.0b4)"] 968 | cuda100 = ["cupy-cuda100 (>=5.0.0b4)"] 969 | cuda80 = ["cupy-cuda80 (>=5.0.0b4)"] 970 | cuda90 = ["cupy-cuda90 (>=5.0.0b4)"] 971 | cuda91 = ["cupy-cuda91 (>=5.0.0b4)"] 972 | cuda92 = ["cupy-cuda92 (>=5.0.0b4)"] 973 | ja = ["fugashi (>=0.1.3)"] 974 | ko = ["natto-py (0.9.0)"] 975 | lookups = ["spacy-lookups-data (>=0.0.5,<0.2.0)"] 976 | th = ["pythainlp (>=2.0)"] 977 | 978 | [[package]] 979 | category = "main" 980 | description = "Modern high-performance serialization utilities for Python" 981 | name = "srsly" 982 | optional = false 983 | python-versions = "*" 984 | version = "1.0.2" 985 | 986 | [[package]] 987 | category = "main" 988 | description = "Practical Machine Learning for NLP" 989 | name = "thinc" 990 | optional = false 991 | python-versions = "*" 992 | version = "7.4.0" 993 | 994 | [package.dependencies] 995 | blis = ">=0.4.0,<0.5.0" 996 | catalogue = ">=0.0.7,<1.1.0" 997 | cymem = ">=2.0.2,<2.1.0" 998 | murmurhash = ">=0.28.0,<1.1.0" 999 | numpy = ">=1.7.0" 1000 | plac = ">=0.9.6,<1.2.0" 1001 | preshed = ">=1.0.1,<3.1.0" 1002 | srsly = ">=0.0.6,<1.1.0" 1003 | tqdm = ">=4.10.0,<5.0.0" 1004 | wasabi = ">=0.0.9,<1.1.0" 1005 | 1006 | [package.extras] 1007 | cuda = ["cupy (>=5.0.0b4)"] 1008 | cuda100 = ["cupy-cuda100 (>=5.0.0b4)"] 1009 | cuda101 = ["cupy-cuda101 (>=5.0.0b4)"] 1010 | cuda80 = ["cupy-cuda80 (>=5.0.0b4)"] 1011 | cuda90 = ["cupy-cuda90 (>=5.0.0b4)"] 1012 | cuda91 = ["cupy-cuda91 (>=5.0.0b4)"] 1013 | cuda92 = ["cupy-cuda92 (>=5.0.0b4)"] 1014 | 1015 | [[package]] 1016 | category = "main" 1017 | description = "threadpoolctl" 1018 | name = "threadpoolctl" 1019 | optional = false 1020 | python-versions = ">=3.5" 1021 | version = "2.1.0" 1022 | 1023 | [[package]] 1024 | category = "main" 1025 | description = "Fast and Customizable Tokenizers" 1026 | name = "tokenizers" 1027 | optional = false 1028 | python-versions = "*" 1029 | version = "0.5.2" 1030 | 1031 | [[package]] 1032 | category = "dev" 1033 | description = "Python Library for Tom's Obvious, Minimal Language" 1034 | name = "toml" 1035 | optional = false 1036 | python-versions = "*" 1037 | version = "0.10.1" 1038 | 1039 | [[package]] 1040 | category = "main" 1041 | description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" 1042 | name = "torch" 1043 | optional = false 1044 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.6.0" 1045 | version = "1.5.0" 1046 | 1047 | [package.dependencies] 1048 | future = "*" 1049 | numpy = "*" 1050 | 1051 | [[package]] 1052 | category = "main" 1053 | description = "image and video datasets and models for torch deep learning" 1054 | name = "torchvision" 1055 | optional = false 1056 | python-versions = "*" 1057 | version = "0.6.0" 1058 | 1059 | [package.dependencies] 1060 | numpy = "*" 1061 | pillow = ">=4.1.1" 1062 | torch = "1.5.0" 1063 | 1064 | [package.extras] 1065 | scipy = ["scipy"] 1066 | 1067 | [[package]] 1068 | category = "main" 1069 | description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." 1070 | name = "tornado" 1071 | optional = false 1072 | python-versions = ">= 3.5" 1073 | version = "6.0.4" 1074 | 1075 | [[package]] 1076 | category = "main" 1077 | description = "Fast, Extensible Progress Meter" 1078 | name = "tqdm" 1079 | optional = false 1080 | python-versions = ">=2.6, !=3.0.*, !=3.1.*" 1081 | version = "4.46.1" 1082 | 1083 | [package.extras] 1084 | dev = ["py-make (>=0.1.0)", "twine", "argopt", "pydoc-markdown"] 1085 | 1086 | [[package]] 1087 | category = "main" 1088 | description = "Traitlets Python config system" 1089 | name = "traitlets" 1090 | optional = false 1091 | python-versions = "*" 1092 | version = "4.3.3" 1093 | 1094 | [package.dependencies] 1095 | decorator = "*" 1096 | ipython-genutils = "*" 1097 | six = "*" 1098 | 1099 | [package.extras] 1100 | test = ["pytest", "mock"] 1101 | 1102 | [[package]] 1103 | category = "main" 1104 | description = "State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch" 1105 | name = "transformers" 1106 | optional = false 1107 | python-versions = ">=3.6.0" 1108 | version = "2.8.0" 1109 | 1110 | [package.dependencies] 1111 | boto3 = "*" 1112 | filelock = "*" 1113 | numpy = "*" 1114 | regex = "!=2019.12.17" 1115 | requests = "*" 1116 | sacremoses = "*" 1117 | sentencepiece = "*" 1118 | tokenizers = "0.5.2" 1119 | tqdm = ">=4.27" 1120 | 1121 | [package.extras] 1122 | all = ["pydantic", "uvicorn", "fastapi", "starlette", "tensorflow", "torch"] 1123 | dev = ["pytest", "pytest-xdist", "black", "isort", "flake8", "mecab-python3", "scikit-learn", "tensorflow", "torch"] 1124 | docs = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme"] 1125 | mecab = ["mecab-python3"] 1126 | quality = ["black", "isort", "flake8"] 1127 | serving = ["pydantic", "uvicorn", "fastapi", "starlette"] 1128 | sklearn = ["scikit-learn"] 1129 | testing = ["pytest", "pytest-xdist"] 1130 | tf = ["tensorflow"] 1131 | tf-cpu = ["tensorflow-cpu"] 1132 | torch = ["torch"] 1133 | 1134 | [[package]] 1135 | category = "dev" 1136 | description = "a fork of Python 2 and 3 ast modules with type comment support" 1137 | name = "typed-ast" 1138 | optional = false 1139 | python-versions = "*" 1140 | version = "1.4.1" 1141 | 1142 | [[package]] 1143 | category = "main" 1144 | description = "Load and dump data from json-like format into typed data structures" 1145 | name = "typedload" 1146 | optional = false 1147 | python-versions = "*" 1148 | version = "2.1" 1149 | 1150 | [[package]] 1151 | category = "main" 1152 | description = "HTTP library with thread-safe connection pooling, file post, and more." 1153 | name = "urllib3" 1154 | optional = false 1155 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" 1156 | version = "1.25.9" 1157 | 1158 | [package.extras] 1159 | brotli = ["brotlipy (>=0.6.0)"] 1160 | secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "pyOpenSSL (>=0.14)", "ipaddress"] 1161 | socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7,<2.0)"] 1162 | 1163 | [[package]] 1164 | category = "main" 1165 | description = "A lightweight console printing and formatting toolkit" 1166 | name = "wasabi" 1167 | optional = false 1168 | python-versions = "*" 1169 | version = "0.6.0" 1170 | 1171 | [[package]] 1172 | category = "main" 1173 | description = "Measures the displayed width of unicode strings in a terminal" 1174 | name = "wcwidth" 1175 | optional = false 1176 | python-versions = "*" 1177 | version = "0.2.3" 1178 | 1179 | [[package]] 1180 | category = "main" 1181 | description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" 1182 | name = "websockets" 1183 | optional = false 1184 | python-versions = ">=3.6.1" 1185 | version = "8.1" 1186 | 1187 | [[package]] 1188 | category = "main" 1189 | description = "Backport of pathlib-compatible object wrapper for zip files" 1190 | marker = "python_version < \"3.8\"" 1191 | name = "zipp" 1192 | optional = false 1193 | python-versions = ">=3.6" 1194 | version = "3.1.0" 1195 | 1196 | [package.extras] 1197 | docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] 1198 | testing = ["jaraco.itertools", "func-timeout"] 1199 | 1200 | [metadata] 1201 | content-hash = "39ff7bebe8d3cc6d661a28ce55efe9d9424becf35df46fb9c1cd21eb86342530" 1202 | python-versions = "^3.7" 1203 | 1204 | [metadata.files] 1205 | appdirs = [ 1206 | {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, 1207 | {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, 1208 | ] 1209 | appnope = [ 1210 | {file = "appnope-0.1.0-py2.py3-none-any.whl", hash = "sha256:5b26757dc6f79a3b7dc9fab95359328d5747fcb2409d331ea66d0272b90ab2a0"}, 1211 | {file = "appnope-0.1.0.tar.gz", hash = "sha256:8b995ffe925347a2138d7ac0fe77155e4311a0ea6d6da4f5128fe4b3cbe5ed71"}, 1212 | ] 1213 | atomicwrites = [ 1214 | {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, 1215 | {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, 1216 | ] 1217 | attrs = [ 1218 | {file = "attrs-19.3.0-py2.py3-none-any.whl", hash = "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c"}, 1219 | {file = "attrs-19.3.0.tar.gz", hash = "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"}, 1220 | ] 1221 | backcall = [ 1222 | {file = "backcall-0.1.0.tar.gz", hash = "sha256:38ecd85be2c1e78f77fd91700c76e14667dc21e2713b63876c0eb901196e01e4"}, 1223 | {file = "backcall-0.1.0.zip", hash = "sha256:bbbf4b1e5cd2bdb08f915895b51081c041bac22394fdfcfdfbe9f14b77c08bf2"}, 1224 | ] 1225 | black = [ 1226 | {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, 1227 | {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, 1228 | ] 1229 | blis = [ 1230 | {file = "blis-0.4.1-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:135450caabc8aea9bb9250329ebdf7189982d9b57d5c92789b2ba2fe52c247a7"}, 1231 | {file = "blis-0.4.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:26b16d6005bb2671699831b5cc699905215d1abde1ec5c1d04de7dcd9eb29f75"}, 1232 | {file = "blis-0.4.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d1d59faebc1c94f8f4f77154ef4b9d6d40364b111cf8fde48ee3b524c85f1075"}, 1233 | {file = "blis-0.4.1-cp35-cp35m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:38fe877a4b52e762f5e137a412e3c256545a696a12ae8c40d67b8815d2bb5097"}, 1234 | {file = "blis-0.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:1402d9cbb0fbc21b749dd5b87d7ee14249e74a0ca38be6ecc56b3b356fca2f21"}, 1235 | {file = "blis-0.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:8aeaf6954351593a1e412f80e398aa51df588d3c0de74b9f3323b694c603381b"}, 1236 | {file = "blis-0.4.1-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:3347a4b1b7d3ae14476aac9a6f7bf8ebf464863f4ebf4aea228874a7694ea240"}, 1237 | {file = "blis-0.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:77a6486b9794af01bcdfd1bc6e067c93add4b93292e6f95bf6e5ce7f98bf0163"}, 1238 | {file = "blis-0.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f0b0dad4d6268d9dba0a65a9db12dd7a2d8686b648399e4aa1aec7550697e99e"}, 1239 | {file = "blis-0.4.1-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:4fb89c47ee06b58a4410a16fd5794847517262c9d2a342643475b477dfeff0a4"}, 1240 | {file = "blis-0.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:03c368c9716ca814c436550a5f1e02ccf74850e613602519e3941d212e5aa177"}, 1241 | {file = "blis-0.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ddd732c5274d1082fa92e2c42317587d5ebabce7741ca98120f69bd45d004b99"}, 1242 | {file = "blis-0.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ede123065f3cacb109967755b3d83d4ca0de90643a9058129a6ab2d4051954f"}, 1243 | {file = "blis-0.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:00473602629ba69fe6565108e21957e918cb48b59f5bf2f6bfb6e04de42500cb"}, 1244 | {file = "blis-0.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:856142a11e37fd2c47c5006a3197e157bb8469a491a73d2d442223dd3279df84"}, 1245 | {file = "blis-0.4.1.tar.gz", hash = "sha256:d69257d317e86f34a7f230a2fd1f021fd2a1b944137f40d8cdbb23bd334cd0c4"}, 1246 | ] 1247 | boto3 = [ 1248 | {file = "boto3-1.13.23-py2.py3-none-any.whl", hash = "sha256:e974e7a3bbdbd6a73ffc07bea5fa0c0744a5a8b87dcca94702597176e3de465e"}, 1249 | {file = "boto3-1.13.23.tar.gz", hash = "sha256:bcaa88b2f81b88741c47da52f3414c876236700441df87b6198f860e6a200d6f"}, 1250 | ] 1251 | botocore = [ 1252 | {file = "botocore-1.16.23-py2.py3-none-any.whl", hash = "sha256:7778957bdc9a25dd33bb4383ebd6d45a8570a2cbff03d1edf430fdacec2b7437"}, 1253 | {file = "botocore-1.16.23.tar.gz", hash = "sha256:5831068c9b49b4c91b0733e0ec784a7733d8732359d73c67a07a0b0868433cae"}, 1254 | ] 1255 | catalogue = [ 1256 | {file = "catalogue-1.0.0-py2.py3-none-any.whl", hash = "sha256:584d78e7f4c3c6e2fd498eb56dfc8ef1f4ff738480237de2ccd26cbe2cf47172"}, 1257 | {file = "catalogue-1.0.0.tar.gz", hash = "sha256:d74d1d856c6b36a37bf14aa6dbbc27d0582667b7ab979a6108e61a575e8723f5"}, 1258 | ] 1259 | certifi = [ 1260 | {file = "certifi-2020.4.5.1-py2.py3-none-any.whl", hash = "sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304"}, 1261 | {file = "certifi-2020.4.5.1.tar.gz", hash = "sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519"}, 1262 | ] 1263 | chardet = [ 1264 | {file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"}, 1265 | {file = "chardet-3.0.4.tar.gz", hash = "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"}, 1266 | ] 1267 | click = [ 1268 | {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, 1269 | {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, 1270 | ] 1271 | colorama = [ 1272 | {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, 1273 | {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, 1274 | ] 1275 | colorlog = [ 1276 | {file = "colorlog-4.1.0-py2.py3-none-any.whl", hash = "sha256:732c191ebbe9a353ec160d043d02c64ddef9028de8caae4cfa8bd49b6afed53e"}, 1277 | {file = "colorlog-4.1.0.tar.gz", hash = "sha256:30aaef5ab2a1873dec5da38fd6ba568fa761c9fa10b40241027fa3edea47f3d2"}, 1278 | ] 1279 | cycler = [ 1280 | {file = "cycler-0.10.0-py2.py3-none-any.whl", hash = "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d"}, 1281 | {file = "cycler-0.10.0.tar.gz", hash = "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8"}, 1282 | ] 1283 | cymem = [ 1284 | {file = "cymem-2.0.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f4f19af4bca81f11922508a9dcf30ce1d2aee4972af9f81ce8e5331a6f46f5e1"}, 1285 | {file = "cymem-2.0.3-cp35-cp35m-win_amd64.whl", hash = "sha256:cd21ec48ee70878d46c486e2f7ae94b32bfc6b37c4d27876c5a5a00c4eb75c3c"}, 1286 | {file = "cymem-2.0.3-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:6f4cb689a9552e9e13dccc89203c8ab09f210a7ffb92ce27c384a4a0be27b527"}, 1287 | {file = "cymem-2.0.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7236252bed70f37b898933dcf8aa875d0829664a245a272516f27b30439df71c"}, 1288 | {file = "cymem-2.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:719f04a11ca709fc2b47868070d79fccff77e5d502ff32de2f4baa73cb16166f"}, 1289 | {file = "cymem-2.0.3-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:d7505c500d994f11662e5595f5002251f572acc189f18944619352e2636f5181"}, 1290 | {file = "cymem-2.0.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:c288a1bbdf58c360457443e5297e74844e1961e5e7001dbcb3a5297a41911a11"}, 1291 | {file = "cymem-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:7f5ddceb12b73f7fd2e4398266401b6f887003740ccd18c989a2af04500b5f2b"}, 1292 | {file = "cymem-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:622c20a57701d02f01a47e856dea248e112638f28c8249dbe3ed95a9702e3d74"}, 1293 | {file = "cymem-2.0.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:85b9364e099426bd7f445a7705aad87bf6dbb71d79e3802dd8ca14e181d38a33"}, 1294 | {file = "cymem-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd24848fbd75b17bab06408da6c029ba7cc615bd9e4a1f755fb3a090025fb922"}, 1295 | {file = "cymem-2.0.3.tar.gz", hash = "sha256:5083b2ab5fe13ced094a82e0df465e2dbbd9b1c013288888035e24fd6eb4ed01"}, 1296 | ] 1297 | decorator = [ 1298 | {file = "decorator-4.4.2-py2.py3-none-any.whl", hash = "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760"}, 1299 | {file = "decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"}, 1300 | ] 1301 | deepkit = [ 1302 | {file = "deepkit-1.0.5-py3-none-any.whl", hash = "sha256:75bff1d053829fb7a31fc795a194c7059cdeb23f4a84bd63f2369360f88f5353"}, 1303 | {file = "deepkit-1.0.5.tar.gz", hash = "sha256:8c93321356449266beff2ec6ac5def9d73705e489b289b9d643ea3a67e4729d9"}, 1304 | ] 1305 | docutils = [ 1306 | {file = "docutils-0.15.2-py2-none-any.whl", hash = "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827"}, 1307 | {file = "docutils-0.15.2-py3-none-any.whl", hash = "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0"}, 1308 | {file = "docutils-0.15.2.tar.gz", hash = "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"}, 1309 | ] 1310 | fastai2 = [ 1311 | {file = "fastai2-0.0.17-py3-none-any.whl", hash = "sha256:d0a31cea5bab265101b30506eb77694044a8cb6924990261232e06f7b982a1be"}, 1312 | {file = "fastai2-0.0.17.tar.gz", hash = "sha256:887a4dd660fe68ba52d220bbfcdd0944b798c5144c406015054fea5746036e71"}, 1313 | ] 1314 | fastcore = [ 1315 | {file = "fastcore-0.1.17-py3-none-any.whl", hash = "sha256:7340de5e1597ea79b20d0136a3dc9975a989fc9a8cf4a1e7e1c1d2319f1e5b54"}, 1316 | {file = "fastcore-0.1.17.tar.gz", hash = "sha256:d1ca794593322c5c5e046f431ff0a5e12a89af2a60418e2ebce5787ec95cb84d"}, 1317 | ] 1318 | fastprogress = [ 1319 | {file = "fastprogress-0.2.3-py3-none-any.whl", hash = "sha256:8b4d7a6af31bafbe1f17a8e5c29befe514a6d84a920c2d215cb5fac016c8e661"}, 1320 | {file = "fastprogress-0.2.3.tar.gz", hash = "sha256:0d3db1d44bf9538e4d4f94c5eb8676017256bdcca94fa0c49b4546b72c1fefed"}, 1321 | ] 1322 | fastscript = [ 1323 | {file = "fastscript-0.1.4-py3-none-any.whl", hash = "sha256:c495d13b827963981568a7f48cb5291cafef7f3cbc6adbb6f0cb1e4db26f4ca3"}, 1324 | {file = "fastscript-0.1.4.tar.gz", hash = "sha256:a0fd479b17ac063d4005d72140280b426f66ee503fb39dc5e4c2550fb28ba19a"}, 1325 | ] 1326 | filelock = [ 1327 | {file = "filelock-3.0.12-py3-none-any.whl", hash = "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"}, 1328 | {file = "filelock-3.0.12.tar.gz", hash = "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59"}, 1329 | ] 1330 | flake8 = [ 1331 | {file = "flake8-3.8.2-py2.py3-none-any.whl", hash = "sha256:ccaa799ef9893cebe69fdfefed76865aeaefbb94cb8545617b2298786a4de9a5"}, 1332 | {file = "flake8-3.8.2.tar.gz", hash = "sha256:c69ac1668e434d37a2d2880b3ca9aafd54b3a10a3ac1ab101d22f29e29cf8634"}, 1333 | ] 1334 | future = [ 1335 | {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, 1336 | ] 1337 | hydra-colorlog = [ 1338 | {file = "hydra-colorlog-0.1.4.tar.gz", hash = "sha256:79dfefb02eb1ae435ebdb897edeb89a85e332bf3a6336eb4ecbf0cf9f530a826"}, 1339 | {file = "hydra_colorlog-0.1.4-py3-none-any.whl", hash = "sha256:6b03fb0011bbf91bf8236b8db4de707297ce3fc84a9975da03f673c5ca19ca09"}, 1340 | ] 1341 | hydra-core = [ 1342 | {file = "hydra-core-0.11.3.tar.gz", hash = "sha256:173d2688b65ba1345f4f43b6bc5a3d92485b3cebfccca2ca33d5f8a37ec6fdc6"}, 1343 | {file = "hydra_core-0.11.3-py3-none-any.whl", hash = "sha256:43ef27b4a32a70cbf566b10bd198def5c5d03cab212d4ea49e744aeb0c14a6b7"}, 1344 | ] 1345 | idna = [ 1346 | {file = "idna-2.9-py2.py3-none-any.whl", hash = "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa"}, 1347 | {file = "idna-2.9.tar.gz", hash = "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb"}, 1348 | ] 1349 | importlib-metadata = [ 1350 | {file = "importlib_metadata-1.6.0-py2.py3-none-any.whl", hash = "sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f"}, 1351 | {file = "importlib_metadata-1.6.0.tar.gz", hash = "sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e"}, 1352 | ] 1353 | ipykernel = [ 1354 | {file = "ipykernel-5.3.0-py3-none-any.whl", hash = "sha256:a8362e3ae365023ca458effe93b026b8cdadc0b73ff3031472128dd8a2cf0289"}, 1355 | {file = "ipykernel-5.3.0.tar.gz", hash = "sha256:731adb3f2c4ebcaff52e10a855ddc87670359a89c9c784d711e62d66fccdafae"}, 1356 | ] 1357 | ipython = [ 1358 | {file = "ipython-7.15.0-py3-none-any.whl", hash = "sha256:1b85d65632211bf5d3e6f1406f3393c8c429a47d7b947b9a87812aa5bce6595c"}, 1359 | {file = "ipython-7.15.0.tar.gz", hash = "sha256:0ef1433879816a960cd3ae1ae1dc82c64732ca75cec8dab5a4e29783fb571d0e"}, 1360 | ] 1361 | ipython-genutils = [ 1362 | {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, 1363 | {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, 1364 | ] 1365 | jedi = [ 1366 | {file = "jedi-0.17.0-py2.py3-none-any.whl", hash = "sha256:cd60c93b71944d628ccac47df9a60fec53150de53d42dc10a7fc4b5ba6aae798"}, 1367 | {file = "jedi-0.17.0.tar.gz", hash = "sha256:df40c97641cb943661d2db4c33c2e1ff75d491189423249e989bcea4464f3030"}, 1368 | ] 1369 | jmespath = [ 1370 | {file = "jmespath-0.10.0-py2.py3-none-any.whl", hash = "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f"}, 1371 | {file = "jmespath-0.10.0.tar.gz", hash = "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9"}, 1372 | ] 1373 | joblib = [ 1374 | {file = "joblib-0.15.1-py3-none-any.whl", hash = "sha256:6825784ffda353cc8a1be573118085789e5b5d29401856b35b756645ab5aecb5"}, 1375 | {file = "joblib-0.15.1.tar.gz", hash = "sha256:61e49189c84b3c5d99a969d314853f4d1d263316cc694bec17548ebaa9c47b6e"}, 1376 | ] 1377 | jupyter-client = [ 1378 | {file = "jupyter_client-6.1.3-py3-none-any.whl", hash = "sha256:cde8e83aab3ec1c614f221ae54713a9a46d3bf28292609d2db1b439bef5a8c8e"}, 1379 | {file = "jupyter_client-6.1.3.tar.gz", hash = "sha256:3a32fa4d0b16d1c626b30c3002a62dfd86d6863ed39eaba3f537fade197bb756"}, 1380 | ] 1381 | jupyter-core = [ 1382 | {file = "jupyter_core-4.6.3-py2.py3-none-any.whl", hash = "sha256:a4ee613c060fe5697d913416fc9d553599c05e4492d58fac1192c9a6844abb21"}, 1383 | {file = "jupyter_core-4.6.3.tar.gz", hash = "sha256:394fd5dd787e7c8861741880bdf8a00ce39f95de5d18e579c74b882522219e7e"}, 1384 | ] 1385 | kiwisolver = [ 1386 | {file = "kiwisolver-1.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:443c2320520eda0a5b930b2725b26f6175ca4453c61f739fef7a5847bd262f74"}, 1387 | {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:efcf3397ae1e3c3a4a0a0636542bcad5adad3b1dd3e8e629d0b6e201347176c8"}, 1388 | {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fccefc0d36a38c57b7bd233a9b485e2f1eb71903ca7ad7adacad6c28a56d62d2"}, 1389 | {file = "kiwisolver-1.2.0-cp36-none-win32.whl", hash = "sha256:60a78858580761fe611d22127868f3dc9f98871e6fdf0a15cc4203ed9ba6179b"}, 1390 | {file = "kiwisolver-1.2.0-cp36-none-win_amd64.whl", hash = "sha256:556da0a5f60f6486ec4969abbc1dd83cf9b5c2deadc8288508e55c0f5f87d29c"}, 1391 | {file = "kiwisolver-1.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cc095a4661bdd8a5742aaf7c10ea9fac142d76ff1770a0f84394038126d8fc7"}, 1392 | {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c955791d80e464da3b471ab41eb65cf5a40c15ce9b001fdc5bbc241170de58ec"}, 1393 | {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:603162139684ee56bcd57acc74035fceed7dd8d732f38c0959c8bd157f913fec"}, 1394 | {file = "kiwisolver-1.2.0-cp37-none-win32.whl", hash = "sha256:03662cbd3e6729f341a97dd2690b271e51a67a68322affab12a5b011344b973c"}, 1395 | {file = "kiwisolver-1.2.0-cp37-none-win_amd64.whl", hash = "sha256:4eadb361baf3069f278b055e3bb53fa189cea2fd02cb2c353b7a99ebb4477ef1"}, 1396 | {file = "kiwisolver-1.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c31bc3c8e903d60a1ea31a754c72559398d91b5929fcb329b1c3a3d3f6e72113"}, 1397 | {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d52b989dc23cdaa92582ceb4af8d5bcc94d74b2c3e64cd6785558ec6a879793e"}, 1398 | {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e586b28354d7b6584d8973656a7954b1c69c93f708c0c07b77884f91640b7657"}, 1399 | {file = "kiwisolver-1.2.0-cp38-none-win32.whl", hash = "sha256:d069ef4b20b1e6b19f790d00097a5d5d2c50871b66d10075dab78938dc2ee2cf"}, 1400 | {file = "kiwisolver-1.2.0-cp38-none-win_amd64.whl", hash = "sha256:18d749f3e56c0480dccd1714230da0f328e6e4accf188dd4e6884bdd06bf02dd"}, 1401 | {file = "kiwisolver-1.2.0.tar.gz", hash = "sha256:247800260cd38160c362d211dcaf4ed0f7816afb5efe56544748b21d6ad6d17f"}, 1402 | ] 1403 | matplotlib = [ 1404 | {file = "matplotlib-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e06304686209331f99640642dee08781a9d55c6e32abb45ed54f021f46ccae47"}, 1405 | {file = "matplotlib-3.2.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ce378047902b7a05546b6485b14df77b2ff207a0054e60c10b5680132090c8ee"}, 1406 | {file = "matplotlib-3.2.1-cp36-cp36m-win32.whl", hash = "sha256:2466d4dddeb0f5666fd1e6736cc5287a4f9f7ae6c1a9e0779deff798b28e1d35"}, 1407 | {file = "matplotlib-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f4412241e32d0f8d3713b68d3ca6430190a5e8a7c070f1c07d7833d8c5264398"}, 1408 | {file = "matplotlib-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e20ba7fb37d4647ac38f3c6d8672dd8b62451ee16173a0711b37ba0ce42bf37d"}, 1409 | {file = "matplotlib-3.2.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:282b3fc8023c4365bad924d1bb442ddc565c2d1635f210b700722776da466ca3"}, 1410 | {file = "matplotlib-3.2.1-cp37-cp37m-win32.whl", hash = "sha256:c1cf735970b7cd424502719b44288b21089863aaaab099f55e0283a721aaf781"}, 1411 | {file = "matplotlib-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:56d3147714da5c7ac4bc452d041e70e0e0b07c763f604110bd4e2527f320b86d"}, 1412 | {file = "matplotlib-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:af14e77829c5b5d5be11858d042d6f2459878f8e296228c7ea13ec1fd308eb68"}, 1413 | {file = "matplotlib-3.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:aae7d107dc37b4bb72dcc45f70394e6df2e5e92ac4079761aacd0e2ad1d3b1f7"}, 1414 | {file = "matplotlib-3.2.1-cp38-cp38-win32.whl", hash = "sha256:d35891a86a4388b6965c2d527b9a9f9e657d9e110b0575ca8a24ba0d4e34b8fc"}, 1415 | {file = "matplotlib-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:4bb50ee4755271a2017b070984bcb788d483a8ce3132fab68393d1555b62d4ba"}, 1416 | {file = "matplotlib-3.2.1-pp373-pypy36_pp73-win32.whl", hash = "sha256:7a9baefad265907c6f0b037c8c35a10cf437f7708c27415a5513cf09ac6d6ddd"}, 1417 | {file = "matplotlib-3.2.1.tar.gz", hash = "sha256:ffe2f9cdcea1086fc414e82f42271ecf1976700b8edd16ca9d376189c6d93aee"}, 1418 | ] 1419 | mccabe = [ 1420 | {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, 1421 | {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, 1422 | ] 1423 | more-itertools = [ 1424 | {file = "more-itertools-8.3.0.tar.gz", hash = "sha256:558bb897a2232f5e4f8e2399089e35aecb746e1f9191b6584a151647e89267be"}, 1425 | {file = "more_itertools-8.3.0-py3-none-any.whl", hash = "sha256:7818f596b1e87be009031c7653d01acc46ed422e6656b394b0f765ce66ed4982"}, 1426 | ] 1427 | murmurhash = [ 1428 | {file = "murmurhash-1.0.2-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:717196a04cdc80cc3103a3da17b2415a8a5e1d0d578b7079259386bf153b3258"}, 1429 | {file = "murmurhash-1.0.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:a6c071b4b498bcea16a8dc8590cad81fa8d43821f34c74bc00f96499e2527073"}, 1430 | {file = "murmurhash-1.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:d696c394ebd164ca80b5871e2e9ad2f9fdbb81bd3c552c1d5f1e8ee694e6204a"}, 1431 | {file = "murmurhash-1.0.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:27b908fe4bdb426f4e4e4a8821acbe0302915b2945e035ec9d8ca513e2a74b1f"}, 1432 | {file = "murmurhash-1.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:33405103fa8cde15d72ee525a03d5cfe2c7e4901133819754810986e29627d68"}, 1433 | {file = "murmurhash-1.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:3af36a0dc9f13f6892d9b8b39a6a3ccf216cae5bce38adc7c2d145677987772f"}, 1434 | {file = "murmurhash-1.0.2-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:fe344face8d30a5a6aa26e5acf288aa2a8f0f32e05efdda3d314b4bf289ec2af"}, 1435 | {file = "murmurhash-1.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:386a9eed3cb27cb2cd4394b6521275ba04552642c2d9cab5c9fb42aa5a3325c0"}, 1436 | {file = "murmurhash-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b0afe329701b59d02e56bc6cee7325af83e3fee9c299c615fc1df3202b4f886f"}, 1437 | {file = "murmurhash-1.0.2-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:bf33490514d308bcc27ed240cb3eb114f1ec31af031535cd8f27659a7049bd52"}, 1438 | {file = "murmurhash-1.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:8a4ed95cd3456b43ea301679c7c39ade43fc18b844b37d0ba0ac0d6acbff8e0c"}, 1439 | {file = "murmurhash-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:ba766343bdbcb928039b8fff609e80ae7a5fd5ed7a4fc5af822224b63e0cbaff"}, 1440 | {file = "murmurhash-1.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cc97ea766ac545074bab0e5af3dbc48e0d05ba230ae5a404e284d39abe4b3baf"}, 1441 | {file = "murmurhash-1.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8b045a79e8b621b4b35b29f29e33e9e0964f3a276f7da4d5736142f322ad4842"}, 1442 | {file = "murmurhash-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:f468e4868f78c3ac202a66abfe2866414bca4ae7666a21ef0938c423de0f7d50"}, 1443 | {file = "murmurhash-1.0.2.tar.gz", hash = "sha256:c7a646f6b07b033642b4f52ae2e45efd8b80780b3b90e8092a0cec935fbf81e2"}, 1444 | ] 1445 | numpy = [ 1446 | {file = "numpy-1.18.5-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0"}, 1447 | {file = "numpy-1.18.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583"}, 1448 | {file = "numpy-1.18.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271"}, 1449 | {file = "numpy-1.18.5-cp35-cp35m-win32.whl", hash = "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3"}, 1450 | {file = "numpy-1.18.5-cp35-cp35m-win_amd64.whl", hash = "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1"}, 1451 | {file = "numpy-1.18.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc"}, 1452 | {file = "numpy-1.18.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675"}, 1453 | {file = "numpy-1.18.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"}, 1454 | {file = "numpy-1.18.5-cp36-cp36m-win32.whl", hash = "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5"}, 1455 | {file = "numpy-1.18.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161"}, 1456 | {file = "numpy-1.18.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824"}, 1457 | {file = "numpy-1.18.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf"}, 1458 | {file = "numpy-1.18.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f"}, 1459 | {file = "numpy-1.18.5-cp37-cp37m-win32.whl", hash = "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f"}, 1460 | {file = "numpy-1.18.5-cp37-cp37m-win_amd64.whl", hash = "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233"}, 1461 | {file = "numpy-1.18.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b"}, 1462 | {file = "numpy-1.18.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7"}, 1463 | {file = "numpy-1.18.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb"}, 1464 | {file = "numpy-1.18.5-cp38-cp38-win32.whl", hash = "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a"}, 1465 | {file = "numpy-1.18.5-cp38-cp38-win_amd64.whl", hash = "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f"}, 1466 | {file = "numpy-1.18.5.zip", hash = "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b"}, 1467 | ] 1468 | omegaconf = [ 1469 | {file = "omegaconf-1.4.1-py3-none-any.whl", hash = "sha256:01c9f93c3578dce7d42ce06f3d1305437322234ccc1accfc71d344d5e17413a1"}, 1470 | {file = "omegaconf-1.4.1.tar.gz", hash = "sha256:5bb6c9391224c6277e06171b661904a68dcbcfbabd831eb552f4803a02eefddc"}, 1471 | ] 1472 | packaging = [ 1473 | {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, 1474 | {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, 1475 | ] 1476 | pandas = [ 1477 | {file = "pandas-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa"}, 1478 | {file = "pandas-1.0.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648"}, 1479 | {file = "pandas-1.0.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc"}, 1480 | {file = "pandas-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46"}, 1481 | {file = "pandas-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4"}, 1482 | {file = "pandas-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4"}, 1483 | {file = "pandas-1.0.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31"}, 1484 | {file = "pandas-1.0.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6"}, 1485 | {file = "pandas-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa"}, 1486 | {file = "pandas-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8"}, 1487 | {file = "pandas-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5"}, 1488 | {file = "pandas-1.0.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd"}, 1489 | {file = "pandas-1.0.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678"}, 1490 | {file = "pandas-1.0.4-cp38-cp38-win32.whl", hash = "sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc"}, 1491 | {file = "pandas-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874"}, 1492 | {file = "pandas-1.0.4.tar.gz", hash = "sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126"}, 1493 | ] 1494 | parso = [ 1495 | {file = "parso-0.7.0-py2.py3-none-any.whl", hash = "sha256:158c140fc04112dc45bca311633ae5033c2c2a7b732fa33d0955bad8152a8dd0"}, 1496 | {file = "parso-0.7.0.tar.gz", hash = "sha256:908e9fae2144a076d72ae4e25539143d40b8e3eafbaeae03c1bfe226f4cdf12c"}, 1497 | ] 1498 | pathspec = [ 1499 | {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, 1500 | {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, 1501 | ] 1502 | pexpect = [ 1503 | {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, 1504 | {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, 1505 | ] 1506 | pickleshare = [ 1507 | {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, 1508 | {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, 1509 | ] 1510 | pillow = [ 1511 | {file = "Pillow-7.1.2-cp35-cp35m-macosx_10_10_intel.whl", hash = "sha256:ae2b270f9a0b8822b98655cb3a59cdb1bd54a34807c6c56b76dd2e786c3b7db3"}, 1512 | {file = "Pillow-7.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:d23e2aa9b969cf9c26edfb4b56307792b8b374202810bd949effd1c6e11ebd6d"}, 1513 | {file = "Pillow-7.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b532bcc2f008e96fd9241177ec580829dee817b090532f43e54074ecffdcd97f"}, 1514 | {file = "Pillow-7.1.2-cp35-cp35m-win32.whl", hash = "sha256:12e4bad6bddd8546a2f9771485c7e3d2b546b458ae8ff79621214119ac244523"}, 1515 | {file = "Pillow-7.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:9744350687459234867cbebfe9df8f35ef9e1538f3e729adbd8fde0761adb705"}, 1516 | {file = "Pillow-7.1.2-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:f54be399340aa602066adb63a86a6a5d4f395adfdd9da2b9a0162ea808c7b276"}, 1517 | {file = "Pillow-7.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:1f694e28c169655c50bb89a3fa07f3b854d71eb47f50783621de813979ba87f3"}, 1518 | {file = "Pillow-7.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f784aad988f12c80aacfa5b381ec21fd3f38f851720f652b9f33facc5101cf4d"}, 1519 | {file = "Pillow-7.1.2-cp36-cp36m-win32.whl", hash = "sha256:b37bb3bd35edf53125b0ff257822afa6962649995cbdfde2791ddb62b239f891"}, 1520 | {file = "Pillow-7.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b67a6c47ed963c709ed24566daa3f95a18f07d3831334da570c71da53d97d088"}, 1521 | {file = "Pillow-7.1.2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:eaa83729eab9c60884f362ada982d3a06beaa6cc8b084cf9f76cae7739481dfa"}, 1522 | {file = "Pillow-7.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f46e0e024346e1474083c729d50de909974237c72daca05393ee32389dabe457"}, 1523 | {file = "Pillow-7.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0e2a3bceb0fd4e0cb17192ae506d5f082b309ffe5fc370a5667959c9b2f85fa3"}, 1524 | {file = "Pillow-7.1.2-cp37-cp37m-win32.whl", hash = "sha256:ccc9ad2460eb5bee5642eaf75a0438d7f8887d484490d5117b98edd7f33118b7"}, 1525 | {file = "Pillow-7.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b943e71c2065ade6fef223358e56c167fc6ce31c50bc7a02dd5c17ee4338e8ac"}, 1526 | {file = "Pillow-7.1.2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:04766c4930c174b46fd72d450674612ab44cca977ebbcc2dde722c6933290107"}, 1527 | {file = "Pillow-7.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:f455efb7a98557412dc6f8e463c1faf1f1911ec2432059fa3e582b6000fc90e2"}, 1528 | {file = "Pillow-7.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ee94fce8d003ac9fd206496f2707efe9eadcb278d94c271f129ab36aa7181344"}, 1529 | {file = "Pillow-7.1.2-cp38-cp38-win32.whl", hash = "sha256:4b02b9c27fad2054932e89f39703646d0c543f21d3cc5b8e05434215121c28cd"}, 1530 | {file = "Pillow-7.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:3d25dd8d688f7318dca6d8cd4f962a360ee40346c15893ae3b95c061cdbc4079"}, 1531 | {file = "Pillow-7.1.2-pp373-pypy36_pp73-win32.whl", hash = "sha256:0f01e63c34f0e1e2580cc0b24e86a5ccbbfa8830909a52ee17624c4193224cd9"}, 1532 | {file = "Pillow-7.1.2-py3.8-macosx-10.9-x86_64.egg", hash = "sha256:70e3e0d99a0dcda66283a185f80697a9b08806963c6149c8e6c5f452b2aa59c0"}, 1533 | {file = "Pillow-7.1.2.tar.gz", hash = "sha256:a0b49960110bc6ff5fead46013bcb8825d101026d466f3a4de3476defe0fb0dd"}, 1534 | ] 1535 | plac = [ 1536 | {file = "plac-1.1.3-py2.py3-none-any.whl", hash = "sha256:487e553017d419f35add346c4c09707e52fa53f7e7181ce1098ca27620e9ceee"}, 1537 | {file = "plac-1.1.3.tar.gz", hash = "sha256:398cb947c60c4c25e275e1f1dadf027e7096858fb260b8ece3b33bcff90d985f"}, 1538 | ] 1539 | pluggy = [ 1540 | {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, 1541 | {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, 1542 | ] 1543 | preshed = [ 1544 | {file = "preshed-3.0.2-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:448d9df12e63fe4a3024f6153ee6703bb95d2be0ce887b5eda7ddc41acfba825"}, 1545 | {file = "preshed-3.0.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:633358f1fb0ec5dd6dbe4971c328d08809e5a8dbefdf13a802ae0a7cb45306c7"}, 1546 | {file = "preshed-3.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:7ea588a78aaf310ae2c293071a8571b07ae434819be05fe510442b6df3f8fbf7"}, 1547 | {file = "preshed-3.0.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:8a9a8222a697a513f25a94733e7a17cc298ecd8fd56b606a1d8fa0ac342c2830"}, 1548 | {file = "preshed-3.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:253970beae87ab672a6afb543908761795eea3cb7b0d784e2ea51e265752059e"}, 1549 | {file = "preshed-3.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:88427346b220293439db77c82913791fa13edc6ac73d8159610699a3ca17aae9"}, 1550 | {file = "preshed-3.0.2-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:6518bbd5fb8adbc3231e75ae78d96a7bdd5405a3b23a09d5e62a2e4fc833724e"}, 1551 | {file = "preshed-3.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:1be3cb59211282e906a11443464fe3e19f6561e2fcd06410e4adc6d45354cf82"}, 1552 | {file = "preshed-3.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:ece5e850f667eaa3367d5c56dda9e3aa6ac1c0bb2117d2f466a26db5f26bbe4b"}, 1553 | {file = "preshed-3.0.2-cp37-cp37m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1ef72a120e49356058b3c0590d7b5e91f2747b44e006eef6579be6131223cab0"}, 1554 | {file = "preshed-3.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:7e80ffc1fb79496d4feafe0eaf71ee5e532b91daf6cec235d7f9c4c12657a58c"}, 1555 | {file = "preshed-3.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:0c15ae62f2595ca479decc3452967484dae57b510278800f5deb9115238cc818"}, 1556 | {file = "preshed-3.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e37058d91bd7f0f5a7a9c83d22a83dc581ab5f79688a87be81f200993145a250"}, 1557 | {file = "preshed-3.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b4ae6c7c44aa3ff7bd717791bb6b619ecb273b7cb128c986f2dc65f6e0e6ddd4"}, 1558 | {file = "preshed-3.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:190345724eb3f7aeaeb2a758740d698bd6c017c2cdf07c71c16b34820973d114"}, 1559 | {file = "preshed-3.0.2.tar.gz", hash = "sha256:61d73468c97c1d6d5a048de0b01d5a6fd052123358aca4823cdb277e436436cb"}, 1560 | ] 1561 | prompt-toolkit = [ 1562 | {file = "prompt_toolkit-3.0.5-py3-none-any.whl", hash = "sha256:df7e9e63aea609b1da3a65641ceaf5bc7d05e0a04de5bd45d05dbeffbabf9e04"}, 1563 | {file = "prompt_toolkit-3.0.5.tar.gz", hash = "sha256:563d1a4140b63ff9dd587bda9557cffb2fe73650205ab6f4383092fb882e7dc8"}, 1564 | ] 1565 | psutil = [ 1566 | {file = "psutil-5.7.0-cp27-none-win32.whl", hash = "sha256:298af2f14b635c3c7118fd9183843f4e73e681bb6f01e12284d4d70d48a60953"}, 1567 | {file = "psutil-5.7.0-cp27-none-win_amd64.whl", hash = "sha256:75e22717d4dbc7ca529ec5063000b2b294fc9a367f9c9ede1f65846c7955fd38"}, 1568 | {file = "psutil-5.7.0-cp35-cp35m-win32.whl", hash = "sha256:f344ca230dd8e8d5eee16827596f1c22ec0876127c28e800d7ae20ed44c4b310"}, 1569 | {file = "psutil-5.7.0-cp35-cp35m-win_amd64.whl", hash = "sha256:e2d0c5b07c6fe5a87fa27b7855017edb0d52ee73b71e6ee368fae268605cc3f5"}, 1570 | {file = "psutil-5.7.0-cp36-cp36m-win32.whl", hash = "sha256:a02f4ac50d4a23253b68233b07e7cdb567bd025b982d5cf0ee78296990c22d9e"}, 1571 | {file = "psutil-5.7.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1413f4158eb50e110777c4f15d7c759521703bd6beb58926f1d562da40180058"}, 1572 | {file = "psutil-5.7.0-cp37-cp37m-win32.whl", hash = "sha256:d008ddc00c6906ec80040d26dc2d3e3962109e40ad07fd8a12d0284ce5e0e4f8"}, 1573 | {file = "psutil-5.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:73f35ab66c6c7a9ce82ba44b1e9b1050be2a80cd4dcc3352cc108656b115c74f"}, 1574 | {file = "psutil-5.7.0-cp38-cp38-win32.whl", hash = "sha256:60b86f327c198561f101a92be1995f9ae0399736b6eced8f24af41ec64fb88d4"}, 1575 | {file = "psutil-5.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:d84029b190c8a66a946e28b4d3934d2ca1528ec94764b180f7d6ea57b0e75e26"}, 1576 | {file = "psutil-5.7.0.tar.gz", hash = "sha256:685ec16ca14d079455892f25bd124df26ff9137664af445563c1bd36629b5e0e"}, 1577 | ] 1578 | ptyprocess = [ 1579 | {file = "ptyprocess-0.6.0-py2.py3-none-any.whl", hash = "sha256:d7cc528d76e76342423ca640335bd3633420dc1366f258cb31d05e865ef5ca1f"}, 1580 | {file = "ptyprocess-0.6.0.tar.gz", hash = "sha256:923f299cc5ad920c68f2bc0bc98b75b9f838b93b599941a6b63ddbc2476394c0"}, 1581 | ] 1582 | py = [ 1583 | {file = "py-1.8.1-py2.py3-none-any.whl", hash = "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0"}, 1584 | {file = "py-1.8.1.tar.gz", hash = "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa"}, 1585 | ] 1586 | pycodestyle = [ 1587 | {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, 1588 | {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, 1589 | ] 1590 | pyflakes = [ 1591 | {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, 1592 | {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, 1593 | ] 1594 | pygments = [ 1595 | {file = "Pygments-2.6.1-py3-none-any.whl", hash = "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324"}, 1596 | {file = "Pygments-2.6.1.tar.gz", hash = "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44"}, 1597 | ] 1598 | pyparsing = [ 1599 | {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, 1600 | {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, 1601 | ] 1602 | pytest = [ 1603 | {file = "pytest-5.4.3-py3-none-any.whl", hash = "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1"}, 1604 | {file = "pytest-5.4.3.tar.gz", hash = "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"}, 1605 | ] 1606 | pytest-concurrent = [ 1607 | {file = "pytest_concurrent-0.2.2-py3-none-any.whl", hash = "sha256:8b012417c2f0a1f433e4fe2131d9d47c6852ace8087143b62e383d3abc917274"}, 1608 | ] 1609 | pytest-testdox = [ 1610 | {file = "pytest-testdox-1.2.1.tar.gz", hash = "sha256:ff7117fe8544afbcac63952a9b3479f0b3ff92d8c43e380cb329f2803af8c3ac"}, 1611 | {file = "pytest_testdox-1.2.1-py2.py3-none-any.whl", hash = "sha256:082352573dcb5df59054ee41bdd9f6f47d3f1b6624aea6586548ba0c640cbd9d"}, 1612 | ] 1613 | python-dateutil = [ 1614 | {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"}, 1615 | {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"}, 1616 | ] 1617 | pytz = [ 1618 | {file = "pytz-2020.1-py2.py3-none-any.whl", hash = "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed"}, 1619 | {file = "pytz-2020.1.tar.gz", hash = "sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048"}, 1620 | ] 1621 | pywin32 = [ 1622 | {file = "pywin32-227-cp27-cp27m-win32.whl", hash = "sha256:371fcc39416d736401f0274dd64c2302728c9e034808e37381b5e1b22be4a6b0"}, 1623 | {file = "pywin32-227-cp27-cp27m-win_amd64.whl", hash = "sha256:4cdad3e84191194ea6d0dd1b1b9bdda574ff563177d2adf2b4efec2a244fa116"}, 1624 | {file = "pywin32-227-cp35-cp35m-win32.whl", hash = "sha256:f4c5be1a293bae0076d93c88f37ee8da68136744588bc5e2be2f299a34ceb7aa"}, 1625 | {file = "pywin32-227-cp35-cp35m-win_amd64.whl", hash = "sha256:a929a4af626e530383a579431b70e512e736e9588106715215bf685a3ea508d4"}, 1626 | {file = "pywin32-227-cp36-cp36m-win32.whl", hash = "sha256:300a2db938e98c3e7e2093e4491439e62287d0d493fe07cce110db070b54c0be"}, 1627 | {file = "pywin32-227-cp36-cp36m-win_amd64.whl", hash = "sha256:9b31e009564fb95db160f154e2aa195ed66bcc4c058ed72850d047141b36f3a2"}, 1628 | {file = "pywin32-227-cp37-cp37m-win32.whl", hash = "sha256:47a3c7551376a865dd8d095a98deba954a98f326c6fe3c72d8726ca6e6b15507"}, 1629 | {file = "pywin32-227-cp37-cp37m-win_amd64.whl", hash = "sha256:31f88a89139cb2adc40f8f0e65ee56a8c585f629974f9e07622ba80199057511"}, 1630 | {file = "pywin32-227-cp38-cp38-win32.whl", hash = "sha256:7f18199fbf29ca99dff10e1f09451582ae9e372a892ff03a28528a24d55875bc"}, 1631 | {file = "pywin32-227-cp38-cp38-win_amd64.whl", hash = "sha256:7c1ae32c489dc012930787f06244426f8356e129184a02c25aef163917ce158e"}, 1632 | {file = "pywin32-227-cp39-cp39-win32.whl", hash = "sha256:c054c52ba46e7eb6b7d7dfae4dbd987a1bb48ee86debe3f245a2884ece46e295"}, 1633 | {file = "pywin32-227-cp39-cp39-win_amd64.whl", hash = "sha256:f27cec5e7f588c3d1051651830ecc00294f90728d19c3bf6916e6dba93ea357c"}, 1634 | ] 1635 | pyyaml = [ 1636 | {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, 1637 | {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, 1638 | {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, 1639 | {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, 1640 | {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, 1641 | {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, 1642 | {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, 1643 | {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, 1644 | {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, 1645 | {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, 1646 | {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, 1647 | ] 1648 | pyzmq = [ 1649 | {file = "pyzmq-19.0.1-cp27-cp27m-macosx_10_9_intel.whl", hash = "sha256:58688a2dfa044fad608a8e70ba8d019d0b872ec2acd75b7b5e37da8905605891"}, 1650 | {file = "pyzmq-19.0.1-cp27-cp27m-win32.whl", hash = "sha256:87c78f6936e2654397ca2979c1d323ee4a889eef536cc77a938c6b5be33351a7"}, 1651 | {file = "pyzmq-19.0.1-cp27-cp27m-win_amd64.whl", hash = "sha256:97b6255ae77328d0e80593681826a0479cb7bac0ba8251b4dd882f5145a2293a"}, 1652 | {file = "pyzmq-19.0.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:15b4cb21118f4589c4db8be4ac12b21c8b4d0d42b3ee435d47f686c32fe2e91f"}, 1653 | {file = "pyzmq-19.0.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:931339ac2000d12fe212e64f98ce291e81a7ec6c73b125f17cf08415b753c087"}, 1654 | {file = "pyzmq-19.0.1-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:2a88b8fabd9cc35bd59194a7723f3122166811ece8b74018147a4ed8489e6421"}, 1655 | {file = "pyzmq-19.0.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:bafd651b557dd81d89bd5f9c678872f3e7b7255c1c751b78d520df2caac80230"}, 1656 | {file = "pyzmq-19.0.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:8952f6ba6ae598e792703f3134af5a01af8f5c7cf07e9a148f05a12b02412cea"}, 1657 | {file = "pyzmq-19.0.1-cp35-cp35m-win32.whl", hash = "sha256:54aa24fd60c4262286fc64ca632f9e747c7cc3a3a1144827490e1dc9b8a3a960"}, 1658 | {file = "pyzmq-19.0.1-cp35-cp35m-win_amd64.whl", hash = "sha256:dcbc3f30c11c60d709c30a213dc56e88ac016fe76ac6768e64717bd976072566"}, 1659 | {file = "pyzmq-19.0.1-cp36-cp36m-macosx_10_9_intel.whl", hash = "sha256:6ca519309703e95d55965735a667809bbb65f52beda2fdb6312385d3e7a6d234"}, 1660 | {file = "pyzmq-19.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4ee0bfd82077a3ff11c985369529b12853a4064320523f8e5079b630f9551448"}, 1661 | {file = "pyzmq-19.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ba6f24431b569aec674ede49cad197cad59571c12deed6ad8e3c596da8288217"}, 1662 | {file = "pyzmq-19.0.1-cp36-cp36m-win32.whl", hash = "sha256:956775444d01331c7eb412c5fb9bb62130dfaac77e09f32764ea1865234e2ca9"}, 1663 | {file = "pyzmq-19.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b08780e3a55215873b3b8e6e7ca8987f14c902a24b6ac081b344fd430d6ca7cd"}, 1664 | {file = "pyzmq-19.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:21f7d91f3536f480cb2c10d0756bfa717927090b7fb863e6323f766e5461ee1c"}, 1665 | {file = "pyzmq-19.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:bfff5ffff051f5aa47ba3b379d87bd051c3196b0c8a603e8b7ed68a6b4f217ec"}, 1666 | {file = "pyzmq-19.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:07fb8fe6826a229dada876956590135871de60dbc7de5a18c3bcce2ed1f03c98"}, 1667 | {file = "pyzmq-19.0.1-cp37-cp37m-win32.whl", hash = "sha256:342fb8a1dddc569bc361387782e8088071593e7eaf3e3ecf7d6bd4976edff112"}, 1668 | {file = "pyzmq-19.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:faee2604f279d31312bc455f3d024f160b6168b9c1dde22bf62d8c88a4deca8e"}, 1669 | {file = "pyzmq-19.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b9d21fc56c8aacd2e6d14738021a9d64f3f69b30578a99325a728e38a349f85"}, 1670 | {file = "pyzmq-19.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:af0c02cf49f4f9eedf38edb4f3b6bb621d83026e7e5d76eb5526cc5333782fd6"}, 1671 | {file = "pyzmq-19.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5f1f2eb22aab606f808163eb1d537ac9a0ba4283fbeb7a62eb48d9103cf015c2"}, 1672 | {file = "pyzmq-19.0.1-cp38-cp38-win32.whl", hash = "sha256:f9d7e742fb0196992477415bb34366c12e9bb9a0699b8b3f221ff93b213d7bec"}, 1673 | {file = "pyzmq-19.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:5b99c2ae8089ef50223c28bac57510c163bfdff158c9e90764f812b94e69a0e6"}, 1674 | {file = "pyzmq-19.0.1-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:cf5d689ba9513b9753959164cf500079383bc18859f58bf8ce06d8d4bef2b054"}, 1675 | {file = "pyzmq-19.0.1-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aaa8b40b676576fd7806839a5de8e6d5d1b74981e6376d862af6c117af2a3c10"}, 1676 | {file = "pyzmq-19.0.1.tar.gz", hash = "sha256:13a5638ab24d628a6ade8f794195e1a1acd573496c3b85af2f1183603b7bf5e0"}, 1677 | ] 1678 | regex = [ 1679 | {file = "regex-2020.5.14-cp27-cp27m-win32.whl", hash = "sha256:e565569fc28e3ba3e475ec344d87ed3cd8ba2d575335359749298a0899fe122e"}, 1680 | {file = "regex-2020.5.14-cp27-cp27m-win_amd64.whl", hash = "sha256:d466967ac8e45244b9dfe302bbe5e3337f8dc4dec8d7d10f5e950d83b140d33a"}, 1681 | {file = "regex-2020.5.14-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:27ff7325b297fb6e5ebb70d10437592433601c423f5acf86e5bc1ee2919b9561"}, 1682 | {file = "regex-2020.5.14-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ea55b80eb0d1c3f1d8d784264a6764f931e172480a2f1868f2536444c5f01e01"}, 1683 | {file = "regex-2020.5.14-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:c9bce6e006fbe771a02bda468ec40ffccbf954803b470a0345ad39c603402577"}, 1684 | {file = "regex-2020.5.14-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:d881c2e657c51d89f02ae4c21d9adbef76b8325fe4d5cf0e9ad62f850f3a98fd"}, 1685 | {file = "regex-2020.5.14-cp36-cp36m-win32.whl", hash = "sha256:99568f00f7bf820c620f01721485cad230f3fb28f57d8fbf4a7967ec2e446994"}, 1686 | {file = "regex-2020.5.14-cp36-cp36m-win_amd64.whl", hash = "sha256:70c14743320a68c5dac7fc5a0f685be63bc2024b062fe2aaccc4acc3d01b14a1"}, 1687 | {file = "regex-2020.5.14-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:a7c37f048ec3920783abab99f8f4036561a174f1314302ccfa4e9ad31cb00eb4"}, 1688 | {file = "regex-2020.5.14-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:89d76ce33d3266173f5be80bd4efcbd5196cafc34100fdab814f9b228dee0fa4"}, 1689 | {file = "regex-2020.5.14-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:51f17abbe973c7673a61863516bdc9c0ef467407a940f39501e786a07406699c"}, 1690 | {file = "regex-2020.5.14-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:ce5cc53aa9fbbf6712e92c7cf268274eaff30f6bd12a0754e8133d85a8fb0f5f"}, 1691 | {file = "regex-2020.5.14-cp37-cp37m-win32.whl", hash = "sha256:8044d1c085d49673aadb3d7dc20ef5cb5b030c7a4fa253a593dda2eab3059929"}, 1692 | {file = "regex-2020.5.14-cp37-cp37m-win_amd64.whl", hash = "sha256:c2062c7d470751b648f1cacc3f54460aebfc261285f14bc6da49c6943bd48bdd"}, 1693 | {file = "regex-2020.5.14-cp38-cp38-manylinux1_i686.whl", hash = "sha256:329ba35d711e3428db6b45a53b1b13a0a8ba07cbbcf10bbed291a7da45f106c3"}, 1694 | {file = "regex-2020.5.14-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:579ea215c81d18da550b62ff97ee187b99f1b135fd894a13451e00986a080cad"}, 1695 | {file = "regex-2020.5.14-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:3a9394197664e35566242686d84dfd264c07b20f93514e2e09d3c2b3ffdf78fe"}, 1696 | {file = "regex-2020.5.14-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ce367d21f33e23a84fb83a641b3834dd7dd8e9318ad8ff677fbfae5915a239f7"}, 1697 | {file = "regex-2020.5.14-cp38-cp38-win32.whl", hash = "sha256:1386e75c9d1574f6aa2e4eb5355374c8e55f9aac97e224a8a5a6abded0f9c927"}, 1698 | {file = "regex-2020.5.14-cp38-cp38-win_amd64.whl", hash = "sha256:7e61be8a2900897803c293247ef87366d5df86bf701083b6c43119c7c6c99108"}, 1699 | {file = "regex-2020.5.14.tar.gz", hash = "sha256:ce450ffbfec93821ab1fea94779a8440e10cf63819be6e176eb1973a6017aff5"}, 1700 | ] 1701 | requests = [ 1702 | {file = "requests-2.23.0-py2.py3-none-any.whl", hash = "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee"}, 1703 | {file = "requests-2.23.0.tar.gz", hash = "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6"}, 1704 | ] 1705 | rope = [ 1706 | {file = "rope-0.16.0-py2-none-any.whl", hash = "sha256:ae1fa2fd56f64f4cc9be46493ce54bed0dd12dee03980c61a4393d89d84029ad"}, 1707 | {file = "rope-0.16.0-py3-none-any.whl", hash = "sha256:52423a7eebb5306a6d63bdc91a7c657db51ac9babfb8341c9a1440831ecf3203"}, 1708 | {file = "rope-0.16.0.tar.gz", hash = "sha256:d2830142c2e046f5fc26a022fe680675b6f48f81c7fc1f03a950706e746e9dfe"}, 1709 | ] 1710 | rx = [ 1711 | {file = "Rx-3.1.0.tar.gz", hash = "sha256:aaf409848e24dd514926eb8467e2764762bfd258325717fca4628d32d8721252"}, 1712 | ] 1713 | s3transfer = [ 1714 | {file = "s3transfer-0.3.3-py2.py3-none-any.whl", hash = "sha256:2482b4259524933a022d59da830f51bd746db62f047d6eb213f2f8855dcb8a13"}, 1715 | {file = "s3transfer-0.3.3.tar.gz", hash = "sha256:921a37e2aefc64145e7b73d50c71bb4f26f46e4c9f414dc648c6245ff92cf7db"}, 1716 | ] 1717 | sacremoses = [ 1718 | {file = "sacremoses-0.0.43.tar.gz", hash = "sha256:123c1bf2664351fb05e16f87d3786dbe44a050cfd7b85161c09ad9a63a8e2948"}, 1719 | ] 1720 | scikit-learn = [ 1721 | {file = "scikit-learn-0.23.1.tar.gz", hash = "sha256:e3fec1c8831f8f93ad85581ca29ca1bb88e2da377fb097cf8322aa89c21bc9b8"}, 1722 | {file = "scikit_learn-0.23.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:058d213092de4384710137af1300ed0ff030b8c40459a6c6f73c31ccd274cc39"}, 1723 | {file = "scikit_learn-0.23.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:ebe853e6f318f9d8b3b74dd17e553720d35646eff675a69eeaed12fbbbb07daa"}, 1724 | {file = "scikit_learn-0.23.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e9879ba9e64ec3add41bf201e06034162f853652ef4849b361d73b0deb3153ad"}, 1725 | {file = "scikit_learn-0.23.1-cp36-cp36m-win32.whl", hash = "sha256:c2fa33d20408b513cf432505c80e6eb4bf4d71434f1ae36680765d4a2c2a16ec"}, 1726 | {file = "scikit_learn-0.23.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e585682e37f2faa81ad6cd4472fff646bf2fd0542147bec93697a905db8e6bd2"}, 1727 | {file = "scikit_learn-0.23.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:244ca85d6eba17a1e6e8a66ab2f584be6a7784b5f59297e3d7ff8c7983af627c"}, 1728 | {file = "scikit_learn-0.23.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:9e04c0811ea92931ee8490d638171b8cb2f21387efcfff526bbc8c2a3da60f1c"}, 1729 | {file = "scikit_learn-0.23.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0e7b55f73b35537ecd0d19df29dd39aa9e076dba78f3507b8136c819d84611fd"}, 1730 | {file = "scikit_learn-0.23.1-cp37-cp37m-win32.whl", hash = "sha256:bded94236e16774385202cafd26190ce96db18e4dc21e99473848c61e4fdc400"}, 1731 | {file = "scikit_learn-0.23.1-cp37-cp37m-win_amd64.whl", hash = "sha256:04799686060ecbf8992f26a35be1d99e981894c8c7860c1365cda4200f954a16"}, 1732 | {file = "scikit_learn-0.23.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c3464e46ef8bd4f1bfa5c009648c6449412c8f7e9b3fc0c9e3d800139c48827"}, 1733 | {file = "scikit_learn-0.23.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:93f56abd316d131645559ec0ab4f45e3391c2ccdd4eadaa4912f4c1e0a6f2c96"}, 1734 | {file = "scikit_learn-0.23.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:3e6e92b495eee193a8fa12a230c9b7976ea0fc1263719338e35c986ea1e42cff"}, 1735 | {file = "scikit_learn-0.23.1-cp38-cp38-win32.whl", hash = "sha256:5bcea4d6ee431c814261117281363208408aa4e665633655895feb059021aca6"}, 1736 | {file = "scikit_learn-0.23.1-cp38-cp38-win_amd64.whl", hash = "sha256:16feae4361be6b299d4d08df5a30956b4bfc8eadf173fe9258f6d59630f851d4"}, 1737 | ] 1738 | scipy = [ 1739 | {file = "scipy-1.4.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:c5cac0c0387272ee0e789e94a570ac51deb01c796b37fb2aad1fb13f85e2f97d"}, 1740 | {file = "scipy-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a144811318853a23d32a07bc7fd5561ff0cac5da643d96ed94a4ffe967d89672"}, 1741 | {file = "scipy-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:71eb180f22c49066f25d6df16f8709f215723317cc951d99e54dc88020ea57be"}, 1742 | {file = "scipy-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:770254a280d741dd3436919d47e35712fb081a6ff8bafc0f319382b954b77802"}, 1743 | {file = "scipy-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:a1aae70d52d0b074d8121333bc807a485f9f1e6a69742010b33780df2e60cfe0"}, 1744 | {file = "scipy-1.4.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:bb517872058a1f087c4528e7429b4a44533a902644987e7b2fe35ecc223bc408"}, 1745 | {file = "scipy-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:dba8306f6da99e37ea08c08fef6e274b5bf8567bb094d1dbe86a20e532aca088"}, 1746 | {file = "scipy-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:386086e2972ed2db17cebf88610aab7d7f6e2c0ca30042dc9a89cf18dcc363fa"}, 1747 | {file = "scipy-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:8d3bc3993b8e4be7eade6dcc6fd59a412d96d3a33fa42b0fa45dc9e24495ede9"}, 1748 | {file = "scipy-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:dc60bb302f48acf6da8ca4444cfa17d52c63c5415302a9ee77b3b21618090521"}, 1749 | {file = "scipy-1.4.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:787cc50cab3020a865640aba3485e9fbd161d4d3b0d03a967df1a2881320512d"}, 1750 | {file = "scipy-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0902a620a381f101e184a958459b36d3ee50f5effd186db76e131cbefcbb96f7"}, 1751 | {file = "scipy-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:00af72998a46c25bdb5824d2b729e7dabec0c765f9deb0b504f928591f5ff9d4"}, 1752 | {file = "scipy-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:9508a7c628a165c2c835f2497837bf6ac80eb25291055f56c129df3c943cbaf8"}, 1753 | {file = "scipy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2d6df9eb074af7f08866598e4ef068a2b310d98f87dc23bd1b90ec7bdcec802"}, 1754 | {file = "scipy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3092857f36b690a321a662fe5496cb816a7f4eecd875e1d36793d92d3f884073"}, 1755 | {file = "scipy-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8a07760d5c7f3a92e440ad3aedcc98891e915ce857664282ae3c0220f3301eb6"}, 1756 | {file = "scipy-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1e3190466d669d658233e8a583b854f6386dd62d655539b77b3fa25bfb2abb70"}, 1757 | {file = "scipy-1.4.1-cp38-cp38-win32.whl", hash = "sha256:cc971a82ea1170e677443108703a2ec9ff0f70752258d0e9f5433d00dda01f59"}, 1758 | {file = "scipy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:2cce3f9847a1a51019e8c5b47620da93950e58ebc611f13e0d11f4980ca5fecb"}, 1759 | {file = "scipy-1.4.1.tar.gz", hash = "sha256:dee1bbf3a6c8f73b6b218cb28eed8dd13347ea2f87d572ce19b289d6fd3fbc59"}, 1760 | ] 1761 | sentencepiece = [ 1762 | {file = "sentencepiece-0.1.91-cp27-cp27m-macosx_10_6_x86_64.whl", hash = "sha256:f2f109514b28326d5c6d69b43ba6b08e6fedf8fc77416b9a9c16be55c9ac138d"}, 1763 | {file = "sentencepiece-0.1.91-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b3643634e043fd7a5914d51d7dc60003dad0af976b8496df0406a487b0b83a8e"}, 1764 | {file = "sentencepiece-0.1.91-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:21a2e9f476f0c3e45da1e5da00b7ec5241bbddb524bd7b1b3d61b1bcbc05efa6"}, 1765 | {file = "sentencepiece-0.1.91-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:30224b1f77af9cef79ffe3a40ed0e536be44df75c066f771e9e769b48379bd98"}, 1766 | {file = "sentencepiece-0.1.91-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:679fcdd01e01d990950a46a814210445f2202260d092d3b69f9826739d8aed2b"}, 1767 | {file = "sentencepiece-0.1.91-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:123ac26429025b3153f8bae53d044e9dd29539e888dcb9f39a4982e6daf9dbe9"}, 1768 | {file = "sentencepiece-0.1.91-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:53f4a91c8a6b55a75caad70d9f34bf331a576e9708c549729b06412c857aacb9"}, 1769 | {file = "sentencepiece-0.1.91-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f1c0cf36fcff4a3ea8925babc3ed7f5f3d58628e062b24f9fad92c400bc9210c"}, 1770 | {file = "sentencepiece-0.1.91-cp36-cp36m-macosx_10_6_x86_64.whl", hash = "sha256:c0b01bb8ab3b62aba76d6b0851a1d0fcf5df5ef5616f114ea85917d8ab5f59db"}, 1771 | {file = "sentencepiece-0.1.91-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:70b4164c98dba43246a068c848fab5ac9966b5bcd731ee4cb9bcf6ae976389a4"}, 1772 | {file = "sentencepiece-0.1.91-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:b858805ac3c6d92e5454a89476b1f9e0505e1511276cd258d5a70776c70374f1"}, 1773 | {file = "sentencepiece-0.1.91-cp36-cp36m-win32.whl", hash = "sha256:8bbb9173168d53165ba00172a5db7734e4826c30e6ffc4b3f8a6098713f6111d"}, 1774 | {file = "sentencepiece-0.1.91-cp36-cp36m-win_amd64.whl", hash = "sha256:51c25d504beeef4c697b8f55e2baf7c6b31733a190ff9b983a6db57faa59d3d8"}, 1775 | {file = "sentencepiece-0.1.91-cp37-cp37m-macosx_10_6_x86_64.whl", hash = "sha256:a04218f1b93b5669f3cee1dc8d6c397428e2f6af8843a20ddb2b629f6a86e632"}, 1776 | {file = "sentencepiece-0.1.91-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:7fcda06cab76136346dd279268664c65f0b66bb2147ceb97e2cec25b426e8210"}, 1777 | {file = "sentencepiece-0.1.91-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:3b4175a9d883a3b27436d51cb82794e3a62d0955bb36cc96d5d7932095c13135"}, 1778 | {file = "sentencepiece-0.1.91-cp37-cp37m-win32.whl", hash = "sha256:f0b383de68604195fe806072e7c8837eb5156455dfdb18fd26a9a94df3f57d42"}, 1779 | {file = "sentencepiece-0.1.91-cp37-cp37m-win_amd64.whl", hash = "sha256:82d819eb1e997b39424d7422aa885d2ef514f754dd2935a4f4fcfdedeee955c6"}, 1780 | {file = "sentencepiece-0.1.91-cp38-cp38-macosx_10_6_x86_64.whl", hash = "sha256:f331fd58f438d5d5476d189e9a4944c84f7e4b027533809292b9c119b58e43b8"}, 1781 | {file = "sentencepiece-0.1.91-cp38-cp38-manylinux1_i686.whl", hash = "sha256:79ad2f82b412859c516b569fb89b5e5c0ceba74d71c8d4b99cc8a2c734f3c79d"}, 1782 | {file = "sentencepiece-0.1.91-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:796726d680d6c26f0f4ff213a0f6b1ef790b02071268adf5f449739c1c903f93"}, 1783 | {file = "sentencepiece-0.1.91-cp38-cp38-win32.whl", hash = "sha256:9bdff324279359598a516de8413fd2c62bc3c9c8f569f4431829599fbe57e417"}, 1784 | {file = "sentencepiece-0.1.91-cp38-cp38-win_amd64.whl", hash = "sha256:c2a004470d388272d6c17ca160ef73d6ea1ffcddc345771d817ece5c85d85dcb"}, 1785 | {file = "sentencepiece-0.1.91.tar.gz", hash = "sha256:f9700cf607ea064d9fad34c751fbf49953dcc56fe68c54b277481aa0aec5c18f"}, 1786 | ] 1787 | six = [ 1788 | {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, 1789 | {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, 1790 | ] 1791 | spacy = [ 1792 | {file = "spacy-2.2.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fd740cb1b50cd86c648f64313be4734b0c2a2931d83761f46821061f42d791a3"}, 1793 | {file = "spacy-2.2.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01202066f75c7f2cfeb9c167c3184b5b0a9d465604b0ca553bd9e788353c5905"}, 1794 | {file = "spacy-2.2.4-cp36-cp36m-win_amd64.whl", hash = "sha256:f75ba238066455f5b5498a987b4e2c84705d92138e02e890e0b0a1d1eb2d9462"}, 1795 | {file = "spacy-2.2.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ce3886e9bfb9071d2708d2cd7157ada93ab378bbb38cf079842181cd671fc6f9"}, 1796 | {file = "spacy-2.2.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:212314be762bd40dfbbeeba1c4742c242e4b6ea3f9340891f0ff282b2e723ed0"}, 1797 | {file = "spacy-2.2.4-cp37-cp37m-win_amd64.whl", hash = "sha256:c5e6f8155f6b54a8ef89637b3c7d553f0ddb5478c4dd568fde7392efbf8a26c8"}, 1798 | {file = "spacy-2.2.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7313b4fa921ed997d9719f99f5a375d672d2f4a908c7750033c4b37d9fa8547a"}, 1799 | {file = "spacy-2.2.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6c1618c05bf65ae4bc94608f2390130ca21112fb3d920d1a03727691e3e7fb1b"}, 1800 | {file = "spacy-2.2.4-cp38-cp38-win_amd64.whl", hash = "sha256:877d8e157a708c8b77c0dea61e526632f6d57f27be64087dac22a4581facea68"}, 1801 | {file = "spacy-2.2.4.tar.gz", hash = "sha256:f0f3a67c5841e6e35d62c98f40ebb3d132587d3aba4f4dccac5056c4e90ff5b9"}, 1802 | ] 1803 | srsly = [ 1804 | {file = "srsly-1.0.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7c553a709fd56a37a07f969e849f55a0aeabaeb7677bebc588a640ab8ec134aa"}, 1805 | {file = "srsly-1.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:21cfb0e5dea2c4515b5c2daa78402d5782c6425b4f58af40d2e2cb45e4778d8c"}, 1806 | {file = "srsly-1.0.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:46213d8f094b348a9433c825ac1eba36a21aa25a8bae6f29c2f9f053e15be961"}, 1807 | {file = "srsly-1.0.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2179cf1e88c250e89e40227bd5848341011c170079b3d424987d067de6a73f42"}, 1808 | {file = "srsly-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b94d8a13c60e3298a9ba12b1b211026e8378c7d087efd7ce46a3f2d8d4678d94"}, 1809 | {file = "srsly-1.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8beff52c104a7ffe4a15513a05dc0497998cf83aa1ca39454489994d18c1c07"}, 1810 | {file = "srsly-1.0.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:95849d84e8929be248a180e672c8ce1ed98b1341263bc983efdf8427465584f1"}, 1811 | {file = "srsly-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:3f3975e8cb67194d26dd03508469b1303f8b994f30e7782f7eae25fef6dc4aad"}, 1812 | {file = "srsly-1.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d409beb7257208633c974c01f9dc3265562fb6802caee7de21880761ba87c3ed"}, 1813 | {file = "srsly-1.0.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:18bad26c34cf5a8853fbf018fd168a7bf2ea7ce661e66476c25dac711cb79c9b"}, 1814 | {file = "srsly-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:29434753a77481ec6129991f4116f983085cc8005c1ad963261124842e8c05fc"}, 1815 | {file = "srsly-1.0.2.tar.gz", hash = "sha256:59258b81d567df207f8a0a33c4b5fa232afccf1d927c8ce3ba5395bfd64c0ed8"}, 1816 | ] 1817 | thinc = [ 1818 | {file = "thinc-7.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9c40101f3148405cb291be2033758d011d348a5dea5d151811def8d1e466f25a"}, 1819 | {file = "thinc-7.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:ebb81b7ff8f852aae1b9c26dfb629344ab962e221ec87c83b2a7c4aec337477d"}, 1820 | {file = "thinc-7.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:23b77994be3376cd8efa85adfa1bcf0ffcb4cfd279f48a3ab842570f419334ca"}, 1821 | {file = "thinc-7.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2aa4cab69067f9dbe4ed7a1d937a4467edcc5f50d43996fba8c645f08ab1f387"}, 1822 | {file = "thinc-7.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:0522cc8b7a74e1de0902b55e1f141f889a088565f72ea0042a9c0f7f3ce83879"}, 1823 | {file = "thinc-7.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d1ee60d44ee840b75c0c0a3ade70908f05f414a65f20082483a5a5bfe82e9497"}, 1824 | {file = "thinc-7.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1375c11ed4f7c7178a5749e17b2f3bb1644c98ecc8874e402aceaeec63df6297"}, 1825 | {file = "thinc-7.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7bb69a8cace8d85a3f65d94176f381c5216df08d79a520b005653d0a23f523a8"}, 1826 | {file = "thinc-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3c5786238991925694aba81fa305c1f2290a960fe5428a26b6f82134b260ad1"}, 1827 | {file = "thinc-7.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:a7332e323b76d63e1cfd2e6bc08a5527c5a6a0eba39197c56af8fe6eef62ef69"}, 1828 | {file = "thinc-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5ac162b010f21f8fcc3fd10766025fad3ec670f6b2e0a72284912332d1ae292a"}, 1829 | {file = "thinc-7.4.0.tar.gz", hash = "sha256:523e9be1bfaa3ed1d03d406ce451b6b4793a9719d5b83d2ea6b3398b96bc58b8"}, 1830 | ] 1831 | threadpoolctl = [ 1832 | {file = "threadpoolctl-2.1.0-py3-none-any.whl", hash = "sha256:38b74ca20ff3bb42caca8b00055111d74159ee95c4370882bbff2b93d24da725"}, 1833 | {file = "threadpoolctl-2.1.0.tar.gz", hash = "sha256:ddc57c96a38beb63db45d6c159b5ab07b6bced12c45a1f07b2b92f272aebfa6b"}, 1834 | ] 1835 | tokenizers = [ 1836 | {file = "tokenizers-0.5.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:986d3502c794ffdee17acd18e6b002c9d8c7636a00c81277309202a3ad3fd778"}, 1837 | {file = "tokenizers-0.5.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:3090b03f192b4b8e18d43cae8d3ea8612f38b4d3d314553e0153f0662cac47c5"}, 1838 | {file = "tokenizers-0.5.2-cp35-cp35m-win_amd64.whl", hash = "sha256:6b27efdc51a225967e5237ca5c38c1bb7730e9904f666d0346df3a580f62fb54"}, 1839 | {file = "tokenizers-0.5.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:28551ed0b445e307c30f83ebaa181016f915ee12d2c037df00303128364bbc2b"}, 1840 | {file = "tokenizers-0.5.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:eea450fa68c4e7309f6cfd105b77d3f2f8c8c7c90e121abc7399cfbce5c650da"}, 1841 | {file = "tokenizers-0.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3e157fc74fd280183922684d7fc8fb8a22f7a09958132feefbdd4cc5f3d4cded"}, 1842 | {file = "tokenizers-0.5.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:7d9752cbc4d8276758b25f52a3ac25df7bbd3b8948df3f753470c9de2aa31ff0"}, 1843 | {file = "tokenizers-0.5.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:3c6a3123253d4150c69a3a5288f994b9384b4df2c8b0ae2be3adb47a2570f390"}, 1844 | {file = "tokenizers-0.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:f131788f76d4ceef2abc25c245b42807efc059753f7f76b32cbc6ead8a4c80f0"}, 1845 | {file = "tokenizers-0.5.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f51f774cf5f1c991490cd7580b411f67e62586e91ee0bfe51a8338850511cced"}, 1846 | {file = "tokenizers-0.5.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:423cfe1ee552a75e3a52584c935494e1895e5d2632e07a87d795869162327255"}, 1847 | {file = "tokenizers-0.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:e9fa4c6002478131b58255e5337512f8a5630c0f52b14f6d6905c8ac37b47a72"}, 1848 | {file = "tokenizers-0.5.2.tar.gz", hash = "sha256:b5a235f9c71d04d4925df6c4fa13b13f1d03f9b7ac302b89f8120790c4f742bc"}, 1849 | ] 1850 | toml = [ 1851 | {file = "toml-0.10.1-py2.py3-none-any.whl", hash = "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88"}, 1852 | {file = "toml-0.10.1.tar.gz", hash = "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f"}, 1853 | ] 1854 | torch = [ 1855 | {file = "torch-1.5.0-cp27-none-macosx_10_7_x86_64.whl", hash = "sha256:6fcfe5deaf0788bbe8639869d3c752ff5fe1bdedce11c7ed2d44379b1fbe6d6c"}, 1856 | {file = "torch-1.5.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:931b79aed9aba50bf314214be6efaaf7972ea9539a3d63f82622bc5860a1fd81"}, 1857 | {file = "torch-1.5.0-cp35-none-macosx_10_6_x86_64.whl", hash = "sha256:7f3d6af2d7e2576b9640aa684f0c18a773efffe8b37f9056272287345c1dcba5"}, 1858 | {file = "torch-1.5.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:dfaac4c5d27ac80705956743c34fb1ab5fb37e1646a6c8e45f05f7e739f6ea7c"}, 1859 | {file = "torch-1.5.0-cp36-none-macosx_10_9_x86_64.whl", hash = "sha256:402951484443bb49b5bc2129414ac6c644c07b8378e79922cf3645fd08cbfdc9"}, 1860 | {file = "torch-1.5.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:865d4bec21542647e0822e8b753e05d67eee874974a3937273f710edd99a7516"}, 1861 | {file = "torch-1.5.0-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:3cc72d36eaeda96488e3a29373f739b887338952417b3e1620871063bf5d14d2"}, 1862 | {file = "torch-1.5.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ecdc2ea4011e3ec04937b6b9e803ab671c3ac04e81b1df20354e01453e508b2f"}, 1863 | {file = "torch-1.5.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:cb4412c6b00117ab5e014d07dac45b87f1e918e31fbb849e7e39f1f9140fff59"}, 1864 | ] 1865 | torchvision = [ 1866 | {file = "torchvision-0.6.0-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:0ea04a7e0f64599c158d36da01afd0cb3bc49033d2a145be4eb80c17c4c0482b"}, 1867 | {file = "torchvision-0.6.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a9b08435fdadd89520a78f5a54d196c05878d1a15e37f760d43f72f10bae308f"}, 1868 | {file = "torchvision-0.6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8992f10a7860e0991766a788b546d5f11e3e7465e87a72eb9c78675dd2616400"}, 1869 | {file = "torchvision-0.6.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f43dae3b348afa5778439913ba1f3f176362ffc9e684ef01dc54dae7cf1b82e4"}, 1870 | {file = "torchvision-0.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fa9e4a8381e5e04d0da0acd93f1429347053497ec343fe6d625b1b7fb2ce36e"}, 1871 | {file = "torchvision-0.6.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6eb4e0d7dc61030447b98d412162f222a95d848b3b0e484a81282c057af6dd25"}, 1872 | {file = "torchvision-0.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:691d68f3726b7392fe37db7184aef8a6b6f7cf6ff38fae769b287b3d6e1eb69a"}, 1873 | {file = "torchvision-0.6.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ea39bed9e9497a67c5f66e37d3d5a663a0284868ae8616de81f65c66d9ad802b"}, 1874 | ] 1875 | tornado = [ 1876 | {file = "tornado-6.0.4-cp35-cp35m-win32.whl", hash = "sha256:5217e601700f24e966ddab689f90b7ea4bd91ff3357c3600fa1045e26d68e55d"}, 1877 | {file = "tornado-6.0.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c98232a3ac391f5faea6821b53db8db461157baa788f5d6222a193e9456e1740"}, 1878 | {file = "tornado-6.0.4-cp36-cp36m-win32.whl", hash = "sha256:5f6a07e62e799be5d2330e68d808c8ac41d4a259b9cea61da4101b83cb5dc673"}, 1879 | {file = "tornado-6.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c952975c8ba74f546ae6de2e226ab3cc3cc11ae47baf607459a6728585bb542a"}, 1880 | {file = "tornado-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:2c027eb2a393d964b22b5c154d1a23a5f8727db6fda837118a776b29e2b8ebc6"}, 1881 | {file = "tornado-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:5618f72e947533832cbc3dec54e1dffc1747a5cb17d1fd91577ed14fa0dc081b"}, 1882 | {file = "tornado-6.0.4-cp38-cp38-win32.whl", hash = "sha256:22aed82c2ea340c3771e3babc5ef220272f6fd06b5108a53b4976d0d722bcd52"}, 1883 | {file = "tornado-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:c58d56003daf1b616336781b26d184023ea4af13ae143d9dda65e31e534940b9"}, 1884 | {file = "tornado-6.0.4.tar.gz", hash = "sha256:0fe2d45ba43b00a41cd73f8be321a44936dc1aba233dee979f17a042b83eb6dc"}, 1885 | ] 1886 | tqdm = [ 1887 | {file = "tqdm-4.46.1-py2.py3-none-any.whl", hash = "sha256:07c06493f1403c1380b630ae3dcbe5ae62abcf369a93bbc052502279f189ab8c"}, 1888 | {file = "tqdm-4.46.1.tar.gz", hash = "sha256:cd140979c2bebd2311dfb14781d8f19bd5a9debb92dcab9f6ef899c987fcf71f"}, 1889 | ] 1890 | traitlets = [ 1891 | {file = "traitlets-4.3.3-py2.py3-none-any.whl", hash = "sha256:70b4c6a1d9019d7b4f6846832288f86998aa3b9207c6821f3578a6a6a467fe44"}, 1892 | {file = "traitlets-4.3.3.tar.gz", hash = "sha256:d023ee369ddd2763310e4c3eae1ff649689440d4ae59d7485eb4cfbbe3e359f7"}, 1893 | ] 1894 | transformers = [ 1895 | {file = "transformers-2.8.0-py3-none-any.whl", hash = "sha256:2b64cfe0033a47ba664837758cd9750196666ea1306e5c40ad5617353c3dc2fc"}, 1896 | {file = "transformers-2.8.0.tar.gz", hash = "sha256:b9f29cdfd39c28f29e0806c321270dea337d6174a7aa60daf9625bf83dbb12ee"}, 1897 | ] 1898 | typed-ast = [ 1899 | {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3"}, 1900 | {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb"}, 1901 | {file = "typed_ast-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919"}, 1902 | {file = "typed_ast-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01"}, 1903 | {file = "typed_ast-1.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75"}, 1904 | {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652"}, 1905 | {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7"}, 1906 | {file = "typed_ast-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1"}, 1907 | {file = "typed_ast-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa"}, 1908 | {file = "typed_ast-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614"}, 1909 | {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41"}, 1910 | {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b"}, 1911 | {file = "typed_ast-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe"}, 1912 | {file = "typed_ast-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355"}, 1913 | {file = "typed_ast-1.4.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6"}, 1914 | {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907"}, 1915 | {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d"}, 1916 | {file = "typed_ast-1.4.1-cp38-cp38-win32.whl", hash = "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c"}, 1917 | {file = "typed_ast-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4"}, 1918 | {file = "typed_ast-1.4.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34"}, 1919 | {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, 1920 | ] 1921 | typedload = [ 1922 | {file = "typedload-2.1.tar.gz", hash = "sha256:3791d3b21025d21567088741561098d0801db40f120b2f080d18a32b891c6f6d"}, 1923 | ] 1924 | urllib3 = [ 1925 | {file = "urllib3-1.25.9-py2.py3-none-any.whl", hash = "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115"}, 1926 | {file = "urllib3-1.25.9.tar.gz", hash = "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527"}, 1927 | ] 1928 | wasabi = [ 1929 | {file = "wasabi-0.6.0-py3-none-any.whl", hash = "sha256:da1f100e0025fe1e50fd67fa5b0b05df902187d5c65c86dc110974ab856d1f05"}, 1930 | {file = "wasabi-0.6.0.tar.gz", hash = "sha256:b8dd3e963cd693fde1eb6bfbecf51790171aa3534fa299faf35cf269f2fd6063"}, 1931 | ] 1932 | wcwidth = [ 1933 | {file = "wcwidth-0.2.3-py2.py3-none-any.whl", hash = "sha256:980fbf4f3c196c0f329cdcd1e84c554d6a211f18e252e525a0cf4223154a41d6"}, 1934 | {file = "wcwidth-0.2.3.tar.gz", hash = "sha256:edbc2b718b4db6cdf393eefe3a420183947d6aa312505ce6754516f458ff8830"}, 1935 | ] 1936 | websockets = [ 1937 | {file = "websockets-8.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:3762791ab8b38948f0c4d281c8b2ddfa99b7e510e46bd8dfa942a5fff621068c"}, 1938 | {file = "websockets-8.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:3db87421956f1b0779a7564915875ba774295cc86e81bc671631379371af1170"}, 1939 | {file = "websockets-8.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4f9f7d28ce1d8f1295717c2c25b732c2bc0645db3215cf757551c392177d7cb8"}, 1940 | {file = "websockets-8.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:295359a2cc78736737dd88c343cd0747546b2174b5e1adc223824bcaf3e164cb"}, 1941 | {file = "websockets-8.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:1d3f1bf059d04a4e0eb4985a887d49195e15ebabc42364f4eb564b1d065793f5"}, 1942 | {file = "websockets-8.1-cp36-cp36m-win32.whl", hash = "sha256:2db62a9142e88535038a6bcfea70ef9447696ea77891aebb730a333a51ed559a"}, 1943 | {file = "websockets-8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:0e4fb4de42701340bd2353bb2eee45314651caa6ccee80dbd5f5d5978888fed5"}, 1944 | {file = "websockets-8.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:9b248ba3dd8a03b1a10b19efe7d4f7fa41d158fdaa95e2cf65af5a7b95a4f989"}, 1945 | {file = "websockets-8.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ce85b06a10fc65e6143518b96d3dca27b081a740bae261c2fb20375801a9d56d"}, 1946 | {file = "websockets-8.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:965889d9f0e2a75edd81a07592d0ced54daa5b0785f57dc429c378edbcffe779"}, 1947 | {file = "websockets-8.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:751a556205d8245ff94aeef23546a1113b1dd4f6e4d102ded66c39b99c2ce6c8"}, 1948 | {file = "websockets-8.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:3ef56fcc7b1ff90de46ccd5a687bbd13a3180132268c4254fc0fa44ecf4fc422"}, 1949 | {file = "websockets-8.1-cp37-cp37m-win32.whl", hash = "sha256:7ff46d441db78241f4c6c27b3868c9ae71473fe03341340d2dfdbe8d79310acc"}, 1950 | {file = "websockets-8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:20891f0dddade307ffddf593c733a3fdb6b83e6f9eef85908113e628fa5a8308"}, 1951 | {file = "websockets-8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c1ec8db4fac31850286b7cd3b9c0e1b944204668b8eb721674916d4e28744092"}, 1952 | {file = "websockets-8.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:5c01fd846263a75bc8a2b9542606927cfad57e7282965d96b93c387622487485"}, 1953 | {file = "websockets-8.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9bef37ee224e104a413f0780e29adb3e514a5b698aabe0d969a6ba426b8435d1"}, 1954 | {file = "websockets-8.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d705f8aeecdf3262379644e4b55107a3b55860eb812b673b28d0fbc347a60c55"}, 1955 | {file = "websockets-8.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:c8a116feafdb1f84607cb3b14aa1418424ae71fee131642fc568d21423b51824"}, 1956 | {file = "websockets-8.1-cp38-cp38-win32.whl", hash = "sha256:e898a0863421650f0bebac8ba40840fc02258ef4714cb7e1fd76b6a6354bda36"}, 1957 | {file = "websockets-8.1-cp38-cp38-win_amd64.whl", hash = "sha256:f8a7bff6e8664afc4e6c28b983845c5bc14965030e3fb98789734d416af77c4b"}, 1958 | {file = "websockets-8.1.tar.gz", hash = "sha256:5c65d2da8c6bce0fca2528f69f44b2f977e06954c8512a952222cea50dad430f"}, 1959 | ] 1960 | zipp = [ 1961 | {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"}, 1962 | {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"}, 1963 | ] 1964 | --------------------------------------------------------------------------------