├── input └── .gitkeep ├── notebook ├── .gitkeep └── sample.ipynb ├── output └── .gitkeep ├── utils ├── __init__.py ├── env.py ├── logger.py └── timing.py ├── .vscode ├── extensions.json └── settings.json ├── experiments └── exp000_sample │ ├── exp │ ├── 001.yaml │ └── 000.yaml │ ├── config.yaml │ └── run.py ├── .jupyter-settings └── @jupyterlab │ ├── docmanager-extension │ └── plugin.jupyterlab-settings │ └── filebrowser-extension │ └── widget.jupyterlab-settings ├── Dockerfile.cpu ├── Dockerfile ├── Makefile ├── compose.cpu.yaml ├── compose.yaml ├── tools ├── check_submission.py └── upload_model.py ├── LICENSE ├── README.md └── .gitignore /input/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /notebook/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /output/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "charliermarsh.ruff" 4 | ] 5 | } -------------------------------------------------------------------------------- /experiments/exp000_sample/exp/001.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default@_here_ # defaultの値を設定してから上書きする 3 | 4 | seed: 634 5 | -------------------------------------------------------------------------------- /experiments/exp000_sample/exp/000.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - default@_here_ # defaultの値を設定してから上書きする 3 | 4 | seed: 0 5 | folds: [0] -------------------------------------------------------------------------------- /.jupyter-settings/@jupyterlab/docmanager-extension/plugin.jupyterlab-settings: -------------------------------------------------------------------------------- 1 | { 2 | "autosave": true, 3 | "autosaveInterval": 600 4 | } -------------------------------------------------------------------------------- /Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | # https://github.com/Kaggle/docker-python 2 | FROM gcr.io/kaggle-images/python:v160 3 | 4 | RUN pip install --no-cache-dir \ 5 | hydra-core 6 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # https://github.com/Kaggle/docker-python/releases 2 | FROM gcr.io/kaggle-gpu-images/python:v160 3 | 4 | RUN pip install --no-cache-dir \ 5 | hydra-core -------------------------------------------------------------------------------- /utils/env.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | 4 | @dataclass 5 | class EnvConfig: 6 | input_dir: str = "/kaggle/input" 7 | output_dir: str = "/kaggle/working/output" 8 | exp_output_dir: str = "/kaggle/working/output/experiments" 9 | -------------------------------------------------------------------------------- /experiments/exp000_sample/config.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - _self_ 3 | # defaultはpythonスクリプト中で登録する 4 | - exp: default 5 | - env: default 6 | # hydraで自動的にログファイルが生成されるのを防ぐ 7 | - override hydra/job_logging: none 8 | 9 | hydra: 10 | output_subdir: null 11 | job: 12 | chdir: False 13 | run: 14 | dir: . -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnPaste": true, 3 | "[python]": { 4 | "editor.formatOnSave": true, 5 | "editor.codeActionsOnSave": { 6 | "source.organizeImports": "explicit" 7 | }, 8 | "editor.defaultFormatter": "charliermarsh.ruff" 9 | }, 10 | "cSpell.words": [ 11 | "kaggle" 12 | ], 13 | } -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CPU_FLAG := 2 | ifneq ($(CPU),) 3 | CPU_FLAG := -f compose.CPU.yaml 4 | endif 5 | 6 | default: build 7 | 8 | build: 9 | docker compose $(CPU_FLAG) build 10 | 11 | bash: 12 | docker compose $(CPU_FLAG) run --rm kaggle bash 13 | 14 | jupyter: 15 | docker compose $(CPU_FLAG) up 16 | 17 | down: 18 | docker compose $(CPU_FLAG) down -------------------------------------------------------------------------------- /compose.cpu.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kaggle: 3 | container_name: 'kaggle-cpu' 4 | build: 5 | context: . 6 | dockerfile: Dockerfile.cpu 7 | volumes: 8 | - $PWD/.jupyter:/root/.jupyter # jupyterの設定 9 | - $PWD/input:/kaggle/input 10 | - $PWD/.cache:/root/.cache 11 | - $PWD:/kaggle/working 12 | environment: 13 | - JUPYTERLAB_SETTINGS_DIR=/kaggle/working/.jupyter-settings # jupyterの設定(保存用) 14 | - NETRC=/kaggle/working/.netrc # wandb key の保存先 15 | working_dir: /kaggle/working 16 | ports: 17 | - 8889:8889 18 | command: jupyter lab --allow-root --ip=0.0.0.0 --port=8889 --no-browser --NotebookApp.token='' --config="./jupyter_lab_config.py" 19 | shm_size: '2gb' 20 | tty: true 21 | -------------------------------------------------------------------------------- /compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | kaggle: 3 | build: . 4 | volumes: 5 | - $PWD/.jupyter:/root/.jupyter # jupyterの設定 6 | - $PWD/input:/kaggle/input 7 | - $PWD/.cache:/root/.cache 8 | - $PWD:/kaggle/working 9 | environment: 10 | - JUPYTERLAB_SETTINGS_DIR=/kaggle/working/.jupyter-settings # jupyterの設定(保存用) 11 | - NETRC=/kaggle/working/.netrc # wandb key の保存先 12 | working_dir: /kaggle/working 13 | ports: 14 | - 8889:8889 15 | command: jupyter lab --allow-root --ip=0.0.0.0 --port=8889 --no-browser --NotebookApp.token='' 16 | deploy: 17 | resources: 18 | reservations: 19 | devices: 20 | - driver: nvidia 21 | count: 1 22 | capabilities: [ gpu ] 23 | shm_size: '8gb' 24 | -------------------------------------------------------------------------------- /utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from logging import INFO, FileHandler, StreamHandler 4 | from pathlib import Path 5 | 6 | 7 | def get_logger(file_name: str, file_dir: Path | str) -> logging.Logger: 8 | logging.basicConfig(level=logging.INFO) 9 | logger = logging.getLogger(file_name) 10 | logger.setLevel(logging.INFO) 11 | 12 | stream_handler = StreamHandler() 13 | stream_handler.setLevel(INFO) 14 | logger.addHandler(stream_handler) 15 | 16 | file_name = Path(file_dir) / f"{time.strftime('%Y%m%d_%H%M%S')}.log" 17 | file_handler = FileHandler(file_name) 18 | file_handler.setLevel(INFO) 19 | formatter = logging.Formatter("[%(asctime)s : %(levelname)s - %(filename)s] %(message)s") 20 | file_handler.setFormatter(formatter) 21 | logger.addHandler(file_handler) 22 | 23 | logger.propagate = False 24 | return logger 25 | -------------------------------------------------------------------------------- /.jupyter-settings/@jupyterlab/filebrowser-extension/widget.jupyterlab-settings: -------------------------------------------------------------------------------- 1 | { 2 | "toolbar": [ 3 | { 4 | "name": "new-launcher", 5 | "command": "launcher:create", 6 | "disabled": false, 7 | "rank": 1 8 | }, 9 | { 10 | "name": "new-directory", 11 | "command": "filebrowser:create-new-directory", 12 | "disabled": false, 13 | "rank": 10 14 | }, 15 | { 16 | "name": "uploader", 17 | "command": "", 18 | "disabled": false, 19 | "rank": 20 20 | }, 21 | { 22 | "name": "refresh", 23 | "command": "filebrowser:refresh", 24 | "disabled": false, 25 | "rank": 30 26 | }, 27 | { 28 | "name": "fileNameSearcher", 29 | "command": "", 30 | "disabled": false, 31 | "rank": 40 32 | } 33 | ] 34 | } -------------------------------------------------------------------------------- /utils/timing.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import sys 4 | import time 5 | from contextlib import contextmanager 6 | 7 | import psutil 8 | 9 | 10 | @contextmanager 11 | def trace(title): 12 | """ 13 | Examples: 14 | >>> with trace("wait"): 15 | time.sleep(2.0) 16 | """ 17 | t0 = time.time() 18 | p = psutil.Process(os.getpid()) 19 | m0 = p.memory_info().rss / 2.0**30 20 | yield 21 | m1 = p.memory_info().rss / 2.0**30 22 | delta = m1 - m0 23 | sign = "+" if delta >= 0 else "-" 24 | delta = math.fabs(delta) 25 | print( 26 | f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", 27 | file=sys.stderr, 28 | ) 29 | 30 | 31 | @contextmanager 32 | def timer(name): 33 | """ 34 | Examples: 35 | >>> with timer("wait"): 36 | time.sleep(2.0) 37 | """ 38 | t0 = time.time() 39 | yield 40 | elapsed_time = time.time() - t0 41 | print(f"[{name}] done in {elapsed_time:.1f} s") 42 | -------------------------------------------------------------------------------- /tools/check_submission.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import time 3 | from datetime import timezone 4 | 5 | from kaggle.api.kaggle_api_extended import KaggleApi 6 | 7 | api = KaggleApi() 8 | api.authenticate() 9 | 10 | COMPETITION = "child-mind-institute-detect-sleep-states" 11 | result_ = api.competition_submissions(COMPETITION)[0] 12 | latest_ref = str(result_) # 最新のサブミット番号 13 | print(result_.url) 14 | submit_time = result_.date 15 | 16 | status = "" 17 | 18 | while status != "complete": 19 | list_of_submission = api.competition_submissions(COMPETITION) 20 | for result in list_of_submission: 21 | if str(result.ref) == latest_ref: 22 | break 23 | status = result.status 24 | 25 | now = datetime.datetime.now(timezone.utc).replace(tzinfo=None) 26 | elapsed_time = int((now - submit_time).seconds / 60) + 1 27 | if status == "complete": 28 | print("\r", f"run-time: {elapsed_time} min, LB: {result.publicScore}") 29 | else: 30 | print("\r", f"elapsed time: {elapsed_time} min", end="") 31 | time.sleep(60) 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 unonao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MLコンペ用実験テンプレート 2 | 3 | ## 特徴 4 | - Docker によるポータブルなKaggleと同一の環境 5 | - Hydra による実験管理 6 | - 実験用スクリプトファイルを major バージョンごとにフォルダごとに管理 & 実験パラメータ設定を minor バージョンとしてファイルとして管理 7 | - 実験用スクリプトと実験パラメータ設定を同一フォルダで局所的に管理して把握しやすくする 8 | - dataclass を用いた config 定義を用いることで、エディタの補完機能を利用できるように 9 | 10 | ### Hydra による Config 管理 11 | - Config は yamlとdictで定義するのではなく、dataclass を用いて定義することで、エディタの補完などの機能を使いつつタイポを防止できるようにする 12 | - 各スクリプトに共通する環境依存となる設定は utils/env.py の EnvConfig で定義される 13 | - 各スクリプトによって変わる設定は、実行スクリプトのあるフォルダ(`{major_exp_name}`)の中に `exp/{minor_exp_name}.yaml` として配置することで管理。 14 | - 実行時に `exp={minor_exp_name}` で上書きする 15 | - `{major_exp_name}` と `{minor_exp_name}` の組み合わせで実験が再現できるようにする 16 | 17 | ## Structure 18 | ```text 19 | . 20 | ├── experiments 21 | ├── input 22 | ├── notebook 23 | ├── output 24 | ├── tools 25 | ├── utils 26 | ├── Dockerfile 27 | ├── Dockerfile.cpu 28 | ├── LICENSE 29 | ├── Makefile 30 | ├── README.md 31 | ├── compose.cpu.yaml 32 | └── compose.yaml 33 | 34 | ``` 35 | 36 | ## Docker による環境構築 37 | 38 | ```sh 39 | # imageのbuild 40 | make build 41 | 42 | # bash に入る場合 43 | make bash 44 | 45 | # jupyter lab を起動する場合 46 | make jupyter 47 | 48 | # CPUで起動する場合はCPU=1やCPU=True などをつける 49 | ``` 50 | 51 | ## スクリプトの実行方法 52 | 53 | ```sh 54 | # python experiments/{major_version_name}/run.py exp={minor_version_name} 55 | 56 | python experiments/exp000_sample/run.py 57 | python experiments/exp000_sample/run.py exp=001 58 | ``` 59 | -------------------------------------------------------------------------------- /notebook/sample.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "116a23a6-2c3d-4da1-9a03-e8977528b65c", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stdout", 11 | "output_type": "stream", 12 | "text": [ 13 | "/kaggle/working\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "%cd /kaggle/working" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 5, 24 | "id": "bb7750da", 25 | "metadata": {}, 26 | "outputs": [ 27 | { 28 | "data": { 29 | "text/plain": [ 30 | "Config(env=EnvConfig(input_dir='/kaggle/input', output_dir='/kaggle/working/output', exp_output_dir='/kaggle/working/output/experiments'), exp=ExpConfig(seed=7, learning_rate=0.001, batch_size=32))" 31 | ] 32 | }, 33 | "execution_count": 5, 34 | "metadata": {}, 35 | "output_type": "execute_result" 36 | } 37 | ], 38 | "source": [ 39 | "from experiments.exp000_sample.run import Config\n", 40 | "\n", 41 | "cfg = Config()\n", 42 | "cfg" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 7, 48 | "id": "8926e091", 49 | "metadata": {}, 50 | "outputs": [ 51 | { 52 | "name": "stdout", 53 | "output_type": "stream", 54 | "text": [ 55 | "exp_name='notebook/tmp', output_path=PosixPath('/kaggle/working/output/notebook/tmp')\n" 56 | ] 57 | } 58 | ], 59 | "source": [ 60 | "from pathlib import Path\n", 61 | "import os\n", 62 | "\n", 63 | "exp_name = \"notebook/tmp\"\n", 64 | "output_path = Path(cfg.env.output_dir) / exp_name\n", 65 | "print(f\"{exp_name=}, {output_path=}\")\n", 66 | "os.makedirs(output_path, exist_ok=True)" 67 | ] 68 | } 69 | ], 70 | "metadata": { 71 | "kernelspec": { 72 | "display_name": "Python 3", 73 | "language": "python", 74 | "name": "python3" 75 | } 76 | }, 77 | "nbformat": 4, 78 | "nbformat_minor": 5 79 | } 80 | -------------------------------------------------------------------------------- /experiments/exp000_sample/run.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | from dataclasses import dataclass, field 5 | from pathlib import Path 6 | 7 | import hydra 8 | import wandb 9 | from hydra.core.config_store import ConfigStore 10 | from hydra.core.hydra_config import HydraConfig 11 | from omegaconf import OmegaConf 12 | 13 | from utils.env import EnvConfig 14 | from utils.logger import get_logger 15 | from utils.timing import trace 16 | 17 | LOGGER = None 18 | WANDB_PROJECT_NAME = "kaggle-template" 19 | 20 | 21 | #################### 22 | # Config 設定 23 | #################### 24 | @dataclass 25 | class ExpConfig: 26 | debug: bool = False 27 | seed: int = 7 28 | learning_rate: float = 0.001 29 | batch_size: int = 32 30 | folds: list = field(default_factory=lambda: [0, 1, 2, 3, 4]) 31 | 32 | 33 | @dataclass 34 | class Config: 35 | env: EnvConfig = field(default_factory=EnvConfig) 36 | exp: ExpConfig = field(default_factory=ExpConfig) 37 | 38 | 39 | # hydra用にdefaultを設定 40 | cs = ConfigStore.instance() 41 | cs.store(name="default", group="env", node=EnvConfig) 42 | cs.store(name="default", group="exp", node=ExpConfig) 43 | 44 | 45 | #################### 46 | # 実験用コード 47 | #################### 48 | def log_config(cfg: Config) -> None: 49 | LOGGER.info("Config: %s", cfg) 50 | 51 | 52 | @hydra.main(version_base=None, config_path=".", config_name="config") 53 | def main( 54 | cfg: Config, 55 | ) -> None: # Duck typing: cfgは実際にはDictConfigだが、Configクラスのように扱える 56 | print(cfg) 57 | 58 | exp_name = f"{Path(sys.argv[0]).parent.name}/{HydraConfig.get().runtime.choices.exp}" # e.g. 000_sample/default 59 | output_dir = Path(cfg.env.exp_output_dir) / exp_name 60 | os.makedirs(output_dir, exist_ok=True) 61 | print(f"output_dir: {output_dir}") 62 | 63 | with trace("sleep"): 64 | time.sleep(1.1) 65 | 66 | global LOGGER 67 | LOGGER = get_logger(__name__, output_dir) 68 | LOGGER.info("Start") 69 | 70 | log_config(cfg) 71 | 72 | wandb.init( 73 | project=WANDB_PROJECT_NAME, 74 | name=exp_name, 75 | notes=", ".join(HydraConfig.get().overrides.task), # オーバーライドの内容 76 | config=OmegaConf.to_container(cfg.exp, resolve=True), 77 | mode="disabled" if cfg.exp.debug else "online", 78 | ) 79 | 80 | 81 | if __name__ == "__main__": 82 | main() 83 | -------------------------------------------------------------------------------- /tools/upload_model.py: -------------------------------------------------------------------------------- 1 | import json 2 | import shutil 3 | from pathlib import Path 4 | from typing import Any 5 | 6 | import click 7 | from kaggle.api.kaggle_api_extended import KaggleApi 8 | 9 | 10 | def copy_files_with_exts(source_dir: Path, dest_dir: Path, exts: list): 11 | """ 12 | source_dir: 探索開始ディレクトリ 13 | dest_dir: コピー先のディレクトリ 14 | exts: 対象の拡張子のリスト (例: ['.txt', '.jpg']) 15 | """ 16 | 17 | # source_dirの中での各拡張子と一致するファイルのパスを探索 18 | for ext in exts: 19 | for source_path in source_dir.rglob(f"*{ext}"): 20 | # dest_dir内での相対パスを計算 21 | relative_path = source_path.relative_to(source_dir) 22 | dest_path = dest_dir / relative_path 23 | 24 | # 必要に応じてコピー先ディレクトリを作成 25 | dest_path.parent.mkdir(parents=True, exist_ok=True) 26 | 27 | # ファイルをコピー 28 | shutil.copy2(source_path, dest_path) 29 | print(f"Copied {source_path} to {dest_path}") 30 | 31 | 32 | @click.command() 33 | @click.option("--title", "-t", default="kami-model") 34 | @click.option("--dir", "-d", type=Path, default="./output/experiments") 35 | @click.option( 36 | "--extentions", 37 | "-e", 38 | type=list[str], 39 | default=["best_model.pt", ".hydra/*.yaml"], 40 | ) 41 | @click.option("--user_name", "-u", default="kami634") 42 | @click.option("--new", "-n", is_flag=True) 43 | def main( 44 | title: str, 45 | dir: Path, 46 | extentions: list[str] = [".pth", ".yaml"], 47 | user_name: str = "kami634", 48 | new: bool = False, 49 | ): 50 | """extentionを指定して、dir以下のファイルをzipに圧縮し、kaggleにアップロードする。 51 | 52 | Args: 53 | title (str): kaggleにアップロードするときのタイトル 54 | dir (Path): アップロードするファイルがあるディレクトリ 55 | extentions (list[str], optional): アップロードするファイルの拡張子. 56 | user_name (str, optional): kaggleのユーザー名. 57 | new (bool, optional): 新規データセットとしてアップロードするかどうか. 58 | """ 59 | tmp_dir = Path("./tmp") 60 | tmp_dir.mkdir(parents=True, exist_ok=True) 61 | 62 | # 拡張子が.pthのファイルをコピー 63 | copy_files_with_exts(dir, tmp_dir, extentions) 64 | 65 | # dataset-metadata.jsonを作成 66 | dataset_metadata: dict[str, Any] = {} 67 | dataset_metadata["id"] = f"{user_name}/{title}" 68 | dataset_metadata["licenses"] = [{"name": "CC0-1.0"}] 69 | dataset_metadata["title"] = title 70 | with open(tmp_dir / "dataset-metadata.json", "w") as f: 71 | json.dump(dataset_metadata, f, indent=4) 72 | 73 | # api認証 74 | api = KaggleApi() 75 | api.authenticate() 76 | 77 | if new: 78 | api.dataset_create_new( 79 | folder=tmp_dir, 80 | dir_mode="tar", 81 | convert_to_csv=False, 82 | public=False, 83 | ) 84 | else: 85 | api.dataset_create_version( 86 | folder=tmp_dir, 87 | version_notes="", 88 | dir_mode="tar", 89 | convert_to_csv=False, 90 | ) 91 | 92 | # delete tmp dir 93 | shutil.rmtree(tmp_dir) 94 | 95 | 96 | if __name__ == "__main__": 97 | main() 98 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.virtual_documents 2 | /.jupyter 3 | /.ipython 4 | /input 5 | /output 6 | /wandb 7 | /.netrc 8 | 9 | 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | cover/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | # For a library or package, you might want to ignore these files since the code is 96 | # intended to run in multiple environments; otherwise, check them in: 97 | # .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # poetry 107 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 108 | # This is especially recommended for binary packages to ensure reproducibility, and is more 109 | # commonly ignored for libraries. 110 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 111 | #poetry.lock 112 | 113 | # pdm 114 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 115 | #pdm.lock 116 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 117 | # in version control. 118 | # https://pdm.fming.dev/#use-with-ide 119 | .pdm.toml 120 | 121 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 122 | __pypackages__/ 123 | 124 | # Celery stuff 125 | celerybeat-schedule 126 | celerybeat.pid 127 | 128 | # SageMath parsed files 129 | *.sage.py 130 | 131 | # Environments 132 | .env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # pytype static type analyzer 159 | .pytype/ 160 | 161 | # Cython debug symbols 162 | cython_debug/ 163 | 164 | # PyCharm 165 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 166 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 167 | # and can be added to the global gitignore or merged into this file. For a more nuclear 168 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 169 | #.idea/ 170 | --------------------------------------------------------------------------------