├── .env
├── .gitignore
├── Dockerfile
├── Pipfile
├── Pipfile.lock
├── README.md
├── docker-compose.yaml
├── mlflow-project
├── MLproject
├── README.rst
├── als.py
├── conda.yaml
├── etl_data.py
├── load_raw_data.py
├── main.py
├── spark-defaults.conf
└── train_keras.py
├── notebooks
├── 1-sklearn-example.ipynb
├── 2-keras-example.ipynb
├── 3-mlflow-models-and-model-registry.ipynb
├── 4-mlproject-example.ipynb
└── experiment_tracking_template.png
├── requirements.txt
└── src
├── 0_hello_world.py
├── 1_sklearn_example.py
├── 1_sklearn_optuna.py
├── 2_keras_example.py
└── 4_mlproject_example.py
/.env:
--------------------------------------------------------------------------------
1 | export MLFLOW_TRACKING_URI="http://mlflow:5000"
2 | export AWS_ACCESS_KEY_ID="minioadmin"
3 | export AWS_SECRET_ACCESS_KEY="minioadmin"
4 | export AWS_ACCESS_KEY_ID=minioadmin
5 | export AWS_SECRET_ACCESS_KEY=minioadmin
6 | export AWS_DEFAULT_REGION=eu-west-1
7 | export AWS_ID=""
8 | export AWS_ARN=""
9 | export MLFLOW_S3_ENDPOINT_URL=http://s3:9000
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | # Created by https://www.toptal.com/developers/gitignore/api/python,jupyternotebooks,macos,pycharm
3 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,jupyternotebooks,macos,pycharm
4 |
5 | ### JupyterNotebooks ###
6 | # gitignore template for Jupyter Notebooks
7 | # website: http://jupyter.org/
8 |
9 | .ipynb_checkpoints
10 | */.ipynb_checkpoints/*
11 |
12 | # IPython
13 | profile_default/
14 | ipython_config.py
15 |
16 | # Remove previous ipynb_checkpoints
17 | # git rm -r .ipynb_checkpoints/
18 |
19 | ### macOS ###
20 | # General
21 | .DS_Store
22 | .AppleDouble
23 | .LSOverride
24 |
25 | # Icon must end with two \r
26 | Icon
27 |
28 |
29 | # Thumbnails
30 | ._*
31 |
32 | # Files that might appear in the root of a volume
33 | .DocumentRevisions-V100
34 | .fseventsd
35 | .Spotlight-V100
36 | .TemporaryItems
37 | .Trashes
38 | .VolumeIcon.icns
39 | .com.apple.timemachine.donotpresent
40 |
41 | # Directories potentially created on remote AFP share
42 | .AppleDB
43 | .AppleDesktop
44 | Network Trash Folder
45 | Temporary Items
46 | .apdisk
47 |
48 | ### PyCharm ###
49 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
50 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
51 |
52 | # User-specific stuff
53 | .idea/**/workspace.xml
54 | .idea/**/tasks.xml
55 | .idea/**/usage.statistics.xml
56 | .idea/**/dictionaries
57 | .idea/**/shelf
58 |
59 | # Generated files
60 | .idea/**/contentModel.xml
61 |
62 | # Sensitive or high-churn files
63 | .idea/**/dataSources/
64 | .idea/**/dataSources.ids
65 | .idea/**/dataSources.local.xml
66 | .idea/**/sqlDataSources.xml
67 | .idea/**/dynamic.xml
68 | .idea/**/uiDesigner.xml
69 | .idea/**/dbnavigator.xml
70 |
71 | # Gradle
72 | .idea/**/gradle.xml
73 | .idea/**/libraries
74 |
75 | # Gradle and Maven with auto-import
76 | # When using Gradle or Maven with auto-import, you should exclude module files,
77 | # since they will be recreated, and may cause churn. Uncomment if using
78 | # auto-import.
79 | # .idea/artifacts
80 | # .idea/compiler.xml
81 | # .idea/jarRepositories.xml
82 | # .idea/modules.xml
83 | # .idea/*.iml
84 | # .idea/modules
85 | # *.iml
86 | # *.ipr
87 |
88 | # CMake
89 | cmake-build-*/
90 |
91 | # Mongo Explorer plugin
92 | .idea/**/mongoSettings.xml
93 |
94 | # File-based project format
95 | *.iws
96 |
97 | # IntelliJ
98 | out/
99 |
100 | # mpeltonen/sbt-idea plugin
101 | .idea_modules/
102 |
103 | # JIRA plugin
104 | atlassian-ide-plugin.xml
105 |
106 | # Cursive Clojure plugin
107 | .idea/replstate.xml
108 |
109 | # Crashlytics plugin (for Android Studio and IntelliJ)
110 | com_crashlytics_export_strings.xml
111 | crashlytics.properties
112 | crashlytics-build.properties
113 | fabric.properties
114 |
115 | # Editor-based Rest Client
116 | .idea/httpRequests
117 |
118 | # Android studio 3.1+ serialized cache file
119 | .idea/caches/build_file_checksums.ser
120 |
121 | ### PyCharm Patch ###
122 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
123 |
124 | # *.iml
125 | # modules.xml
126 | # .idea/misc.xml
127 | # *.ipr
128 |
129 | # Sonarlint plugin
130 | # https://plugins.jetbrains.com/plugin/7973-sonarlint
131 | .idea/**/sonarlint/
132 |
133 | # SonarQube Plugin
134 | # https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
135 | .idea/**/sonarIssues.xml
136 |
137 | # Markdown Navigator plugin
138 | # https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
139 | .idea/**/markdown-navigator.xml
140 | .idea/**/markdown-navigator-enh.xml
141 | .idea/**/markdown-navigator/
142 |
143 | # Cache file creation bug
144 | # See https://youtrack.jetbrains.com/issue/JBR-2257
145 | .idea/$CACHE_FILE$
146 |
147 | # CodeStream plugin
148 | # https://plugins.jetbrains.com/plugin/12206-codestream
149 | .idea/codestream.xml
150 |
151 | ### Python ###
152 | # Byte-compiled / optimized / DLL files
153 | __pycache__/
154 | *.py[cod]
155 | *$py.class
156 |
157 | # C extensions
158 | *.so
159 |
160 | # Distribution / packaging
161 | .Python
162 | build/
163 | develop-eggs/
164 | dist/
165 | downloads/
166 | eggs/
167 | .eggs/
168 | parts/
169 | sdist/
170 | var/
171 | wheels/
172 | pip-wheel-metadata/
173 | share/python-wheels/
174 | *.egg-info/
175 | .installed.cfg
176 | *.egg
177 | MANIFEST
178 |
179 | # PyInstaller
180 | # Usually these files are written by a python script from a template
181 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
182 | *.manifest
183 | *.spec
184 |
185 | # Installer logs
186 | pip-log.txt
187 | pip-delete-this-directory.txt
188 |
189 | # Unit test / coverage reports
190 | htmlcov/
191 | .tox/
192 | .nox/
193 | .coverage
194 | .coverage.*
195 | .cache
196 | nosetests.xml
197 | coverage.xml
198 | *.cover
199 | *.py,cover
200 | .hypothesis/
201 | .pytest_cache/
202 | pytestdebug.log
203 |
204 | # Translations
205 | *.mo
206 | *.pot
207 |
208 | # Django stuff:
209 | *.log
210 | local_settings.py
211 | db.sqlite3
212 | db.sqlite3-journal
213 |
214 | # Flask stuff:
215 | instance/
216 | .webassets-cache
217 |
218 | # Scrapy stuff:
219 | .scrapy
220 |
221 | # Sphinx documentation
222 | docs/_build/
223 | doc/_build/
224 |
225 | # PyBuilder
226 | target/
227 |
228 | # Jupyter Notebook
229 |
230 | # IPython
231 |
232 | # pyenv
233 | .python-version
234 |
235 | # pipenv
236 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
237 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
238 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
239 | # install all needed dependencies.
240 | #Pipfile.lock
241 |
242 | # poetry
243 | #poetry.lock
244 |
245 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
246 | __pypackages__/
247 |
248 | # Celery stuff
249 | celerybeat-schedule
250 | celerybeat.pid
251 |
252 | # SageMath parsed files
253 | *.sage.py
254 |
255 | # Environments
256 | # .env
257 | .env/
258 | .venv/
259 | env/
260 | venv/
261 | ENV/
262 | env.bak/
263 | venv.bak/
264 | pythonenv*
265 |
266 | # Spyder project settings
267 | .spyderproject
268 | .spyproject
269 |
270 | # Rope project settings
271 | .ropeproject
272 |
273 | # mkdocs documentation
274 | /site
275 |
276 | # mypy
277 | .mypy_cache/
278 | .dmypy.json
279 | dmypy.json
280 |
281 | # Pyre type checker
282 | .pyre/
283 |
284 | # pytype static type analyzer
285 | .pytype/
286 |
287 | # operating system-related files
288 | *.DS_Store #file properties cache/storage on macOS
289 | Thumbs.db #thumbnail cache on Windows
290 |
291 | # profiling data
292 | .prof
293 |
294 |
295 | # End of https://www.toptal.com/developers/gitignore/api/python,jupyternotebooks,macos,pycharm
296 |
297 | # mlflow
298 | mlruns/
299 | mlflow.db
300 | models/
301 | notebooks/mlruns/
302 | notebooks/mlflow.db
303 | notebooks/models/
304 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9.7
2 |
3 | ENV PIP_DISABLE_PIP_VERSION_CHECK=1
4 | ENV PIP_NO_CACHE_DIR=1
5 | ENV NB_PREFIX /
6 |
7 | # Install OpenJDK-11
8 | RUN apt-get update && \
9 | apt-get install -y openjdk-11-jre-headless && \
10 | apt-get clean;
11 |
12 | RUN pip install --upgrade pip==21.1.1 pipenv==2020.11.15 python-dotenv
13 |
14 | WORKDIR /app
15 |
16 | ADD Pipfile Pipfile.lock ./
17 |
18 | RUN pipenv install --system --deploy
19 |
20 | ADD . .
21 |
22 | CMD ["sh","-c", "jupyter notebook --notebook-dir=/app --ip=0.0.0.0 --no-browser --allow-root --port=8888 --NotebookApp.token='' --NotebookApp.password='' --NotebookApp.allow_origin='*' --NotebookApp.base_url=${NB_PREFIX}"]
23 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [packages]
7 | numpy = "*"
8 | pandas = "*"
9 | jupyter = "*"
10 | mlflow = "*"
11 | sklearn = "*"
12 | keras = "*"
13 | boto3 = "*"
14 | tensorflow = "*"
15 | pyspark = "*"
16 |
17 | [dev-packages]
18 |
19 | [requires]
20 | python_version = "3.9"
21 |
--------------------------------------------------------------------------------
/Pipfile.lock:
--------------------------------------------------------------------------------
1 | {
2 | "_meta": {
3 | "hash": {
4 | "sha256": "aec1be4f3dbc949fe83e5b2b43cdbaf8f7ee4a1a1653fb62a8a854f66d85795b"
5 | },
6 | "pipfile-spec": 6,
7 | "requires": {
8 | "python_version": "3.9"
9 | },
10 | "sources": [
11 | {
12 | "name": "pypi",
13 | "url": "https://pypi.org/simple",
14 | "verify_ssl": true
15 | }
16 | ]
17 | },
18 | "default": {
19 | "absl-py": {
20 | "hashes": [
21 | "sha256:7675ddb70ffd0e37a2e69322f8f2cd37b19e03a27f43420bea5850597ded9504",
22 | "sha256:81409f8c5c1601f47d57eaa548a8516a967ab45a43ef75e5dfceab2ab4b69143"
23 | ],
24 | "version": "==0.14.0"
25 | },
26 | "alembic": {
27 | "hashes": [
28 | "sha256:791a5686953c4b366d3228c5377196db2f534475bb38d26f70eb69668efd9028"
29 | ],
30 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
31 | "version": "==1.4.1"
32 | },
33 | "appnope": {
34 | "hashes": [
35 | "sha256:93aa393e9d6c54c5cd570ccadd8edad61ea0c4b9ea7a01409020c9aa019eb442",
36 | "sha256:dd83cd4b5b460958838f6eb3000c660b1f9caf2a5b1de4264e941512f603258a"
37 | ],
38 | "markers": "sys_platform == 'darwin' and platform_system == 'Darwin'",
39 | "version": "==0.1.2"
40 | },
41 | "argon2-cffi": {
42 | "hashes": [
43 | "sha256:165cadae5ac1e26644f5ade3bd9c18d89963be51d9ea8817bd671006d7909057",
44 | "sha256:217b4f0f853ccbbb5045242946ad2e162e396064575860141b71a85eb47e475a",
45 | "sha256:245f64a203012b144b7b8c8ea6d468cb02b37caa5afee5ba4a10c80599334f6a",
46 | "sha256:4ad152c418f7eb640eac41ac815534e6aa61d1624530b8e7779114ecfbf327f8",
47 | "sha256:566ffb581bbd9db5562327aee71b2eda24a1c15b23a356740abe3c011bbe0dcb",
48 | "sha256:65213a9174320a1aee03fe826596e0620783966b49eb636955958b3074e87ff9",
49 | "sha256:bc513db2283c385ea4da31a2cd039c33380701f376f4edd12fe56db118a3b21a",
50 | "sha256:c7a7c8cc98ac418002090e4add5bebfff1b915ea1cb459c578cd8206fef10378",
51 | "sha256:e4d8f0ae1524b7b0372a3e574a2561cbdddb3fdb6c28b70a72868189bda19659",
52 | "sha256:f710b61103d1a1f692ca3ecbd1373e28aa5e545ac625ba067ff2feca1b2bb870",
53 | "sha256:fa7e7d1fc22514a32b1761fdfa1882b6baa5c36bb3ef557bdd69e6fc9ba14a41"
54 | ],
55 | "markers": "python_version >= '3.5'",
56 | "version": "==21.1.0"
57 | },
58 | "astunparse": {
59 | "hashes": [
60 | "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
61 | "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"
62 | ],
63 | "version": "==1.6.3"
64 | },
65 | "attrs": {
66 | "hashes": [
67 | "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
68 | "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
69 | ],
70 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
71 | "version": "==21.2.0"
72 | },
73 | "backcall": {
74 | "hashes": [
75 | "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e",
76 | "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"
77 | ],
78 | "version": "==0.2.0"
79 | },
80 | "bleach": {
81 | "hashes": [
82 | "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da",
83 | "sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994"
84 | ],
85 | "markers": "python_version >= '3.6'",
86 | "version": "==4.1.0"
87 | },
88 | "boto3": {
89 | "hashes": [
90 | "sha256:6c9f2b50827ba9233a143b71746b7f1caee0467cf5004d7b43d5a7d5f468827b",
91 | "sha256:f7ab057556461678dda5597ceb08b233a82b7bbfb29b47d18b2fa102bd5ad680"
92 | ],
93 | "index": "pypi",
94 | "version": "==1.18.50"
95 | },
96 | "botocore": {
97 | "hashes": [
98 | "sha256:06e4d529071accac2ef93de1f9827dce0b41b0b6d9f81bd81d31aa2342d72631",
99 | "sha256:3c24e20ad2b155c5a031db2ed060191df282369c5e59225ef20126cdfa6f082f"
100 | ],
101 | "markers": "python_version >= '3.6'",
102 | "version": "==1.21.50"
103 | },
104 | "cachetools": {
105 | "hashes": [
106 | "sha256:2cc0b89715337ab6dbba85b5b50effe2b0c74e035d83ee8ed637cf52f12ae001",
107 | "sha256:61b5ed1e22a0924aed1d23b478f37e8d52549ff8a961de2909c69bf950020cff"
108 | ],
109 | "markers": "python_version ~= '3.5'",
110 | "version": "==4.2.2"
111 | },
112 | "certifi": {
113 | "hashes": [
114 | "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee",
115 | "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"
116 | ],
117 | "version": "==2021.5.30"
118 | },
119 | "cffi": {
120 | "hashes": [
121 | "sha256:06c54a68935738d206570b20da5ef2b6b6d92b38ef3ec45c5422c0ebaf338d4d",
122 | "sha256:0c0591bee64e438883b0c92a7bed78f6290d40bf02e54c5bf0978eaf36061771",
123 | "sha256:19ca0dbdeda3b2615421d54bef8985f72af6e0c47082a8d26122adac81a95872",
124 | "sha256:22b9c3c320171c108e903d61a3723b51e37aaa8c81255b5e7ce102775bd01e2c",
125 | "sha256:26bb2549b72708c833f5abe62b756176022a7b9a7f689b571e74c8478ead51dc",
126 | "sha256:33791e8a2dc2953f28b8d8d300dde42dd929ac28f974c4b4c6272cb2955cb762",
127 | "sha256:3c8d896becff2fa653dc4438b54a5a25a971d1f4110b32bd3068db3722c80202",
128 | "sha256:4373612d59c404baeb7cbd788a18b2b2a8331abcc84c3ba40051fcd18b17a4d5",
129 | "sha256:487d63e1454627c8e47dd230025780e91869cfba4c753a74fda196a1f6ad6548",
130 | "sha256:48916e459c54c4a70e52745639f1db524542140433599e13911b2f329834276a",
131 | "sha256:4922cd707b25e623b902c86188aca466d3620892db76c0bdd7b99a3d5e61d35f",
132 | "sha256:55af55e32ae468e9946f741a5d51f9896da6b9bf0bbdd326843fec05c730eb20",
133 | "sha256:57e555a9feb4a8460415f1aac331a2dc833b1115284f7ded7278b54afc5bd218",
134 | "sha256:5d4b68e216fc65e9fe4f524c177b54964af043dde734807586cf5435af84045c",
135 | "sha256:64fda793737bc4037521d4899be780534b9aea552eb673b9833b01f945904c2e",
136 | "sha256:6d6169cb3c6c2ad50db5b868db6491a790300ade1ed5d1da29289d73bbe40b56",
137 | "sha256:7bcac9a2b4fdbed2c16fa5681356d7121ecabf041f18d97ed5b8e0dd38a80224",
138 | "sha256:80b06212075346b5546b0417b9f2bf467fea3bfe7352f781ffc05a8ab24ba14a",
139 | "sha256:818014c754cd3dba7229c0f5884396264d51ffb87ec86e927ef0be140bfdb0d2",
140 | "sha256:8eb687582ed7cd8c4bdbff3df6c0da443eb89c3c72e6e5dcdd9c81729712791a",
141 | "sha256:99f27fefe34c37ba9875f224a8f36e31d744d8083e00f520f133cab79ad5e819",
142 | "sha256:9f3e33c28cd39d1b655ed1ba7247133b6f7fc16fa16887b120c0c670e35ce346",
143 | "sha256:a8661b2ce9694ca01c529bfa204dbb144b275a31685a075ce123f12331be790b",
144 | "sha256:a9da7010cec5a12193d1af9872a00888f396aba3dc79186604a09ea3ee7c029e",
145 | "sha256:aedb15f0a5a5949ecb129a82b72b19df97bbbca024081ed2ef88bd5c0a610534",
146 | "sha256:b315d709717a99f4b27b59b021e6207c64620790ca3e0bde636a6c7f14618abb",
147 | "sha256:ba6f2b3f452e150945d58f4badd92310449876c4c954836cfb1803bdd7b422f0",
148 | "sha256:c33d18eb6e6bc36f09d793c0dc58b0211fccc6ae5149b808da4a62660678b156",
149 | "sha256:c9a875ce9d7fe32887784274dd533c57909b7b1dcadcc128a2ac21331a9765dd",
150 | "sha256:c9e005e9bd57bc987764c32a1bee4364c44fdc11a3cc20a40b93b444984f2b87",
151 | "sha256:d2ad4d668a5c0645d281dcd17aff2be3212bc109b33814bbb15c4939f44181cc",
152 | "sha256:d950695ae4381ecd856bcaf2b1e866720e4ab9a1498cba61c602e56630ca7195",
153 | "sha256:e22dcb48709fc51a7b58a927391b23ab37eb3737a98ac4338e2448bef8559b33",
154 | "sha256:e8c6a99be100371dbb046880e7a282152aa5d6127ae01783e37662ef73850d8f",
155 | "sha256:e9dc245e3ac69c92ee4c167fbdd7428ec1956d4e754223124991ef29eb57a09d",
156 | "sha256:eb687a11f0a7a1839719edd80f41e459cc5366857ecbed383ff376c4e3cc6afd",
157 | "sha256:eb9e2a346c5238a30a746893f23a9535e700f8192a68c07c0258e7ece6ff3728",
158 | "sha256:ed38b924ce794e505647f7c331b22a693bee1538fdf46b0222c4717b42f744e7",
159 | "sha256:f0010c6f9d1a4011e429109fda55a225921e3206e7f62a0c22a35344bfd13cca",
160 | "sha256:f0c5d1acbfca6ebdd6b1e3eded8d261affb6ddcf2186205518f1428b8569bb99",
161 | "sha256:f10afb1004f102c7868ebfe91c28f4a712227fe4cb24974350ace1f90e1febbf",
162 | "sha256:f174135f5609428cc6e1b9090f9268f5c8935fddb1b25ccb8255a2d50de6789e",
163 | "sha256:f3ebe6e73c319340830a9b2825d32eb6d8475c1dac020b4f0aa774ee3b898d1c",
164 | "sha256:f627688813d0a4140153ff532537fbe4afea5a3dffce1f9deb7f91f848a832b5",
165 | "sha256:fd4305f86f53dfd8cd3522269ed7fc34856a8ee3709a5e28b2836b2db9d4cd69"
166 | ],
167 | "version": "==1.14.6"
168 | },
169 | "charset-normalizer": {
170 | "hashes": [
171 | "sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6",
172 | "sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f"
173 | ],
174 | "markers": "python_version >= '3'",
175 | "version": "==2.0.6"
176 | },
177 | "clang": {
178 | "hashes": [
179 | "sha256:b9301dff507041b5019b30ae710b78b0552c1ca1d4441b8dfa93c2e85078a5f8",
180 | "sha256:ceccae97eda0225a5b44d42ffd61102e248325c2865ca53e4407746464a5333a"
181 | ],
182 | "version": "==5.0"
183 | },
184 | "click": {
185 | "hashes": [
186 | "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a",
187 | "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"
188 | ],
189 | "markers": "python_version >= '3.6'",
190 | "version": "==8.0.1"
191 | },
192 | "cloudpickle": {
193 | "hashes": [
194 | "sha256:5cd02f3b417a783ba84a4ec3e290ff7929009fe51f6405423cfccfadd43ba4a4",
195 | "sha256:6b2df9741d06f43839a3275c4e6632f7df6487a1f181f5f46a052d3c917c3d11"
196 | ],
197 | "markers": "python_version >= '3.6'",
198 | "version": "==2.0.0"
199 | },
200 | "databricks-cli": {
201 | "hashes": [
202 | "sha256:4d0fbe3cb9c86d7342d45357f1db2e01011c507659f21b7a8144709cf8fa9f96",
203 | "sha256:b6189302a21c59749898507abe4d21206d63f4408d1fc057ddfe1ab4f9d608cc"
204 | ],
205 | "version": "==0.15.0"
206 | },
207 | "debugpy": {
208 | "hashes": [
209 | "sha256:0c523fcbb6fb395403ee8508853767b74949335d5cdacc9f83d350670c2c0db2",
210 | "sha256:135a77ac1a8f6ea49a69928f088967d36842bc492d89b45941c6b19222cffa42",
211 | "sha256:2019ffcd08d7e643c644cd64bee0fd53c730cb8f15ff37e6a320b5afd3785bfa",
212 | "sha256:3e4de96c70f3398abd1777f048b47564d98a40df1f72d33b47ef5b9478e07206",
213 | "sha256:4d53fe5aecf03ba466aa7fa7474c2b2fe28b2a6c0d36688d1e29382bfe88dd5f",
214 | "sha256:5ded60b402f83df46dee3f25ae5851809937176afdafd3fdbaab60b633b77cad",
215 | "sha256:7c15014290150b76f0311debf7fbba2e934680572ea60750b0f048143e873b3e",
216 | "sha256:7e7210a3721fc54b52d8dc2f325e7c937ffcbba02b808e2e3215dcbf0c0b8349",
217 | "sha256:847926f78c1e33f7318a743837adb6a9b360a825b558fd21f9240ba518fe1bb1",
218 | "sha256:88b17d7c2130968f75bdc706a33f46a8a6bb90f09512ea3bd984659d446ee4f4",
219 | "sha256:8d488356cc66172f1ea29635fd148ad131f13fad0e368ae03cc5c0a402372756",
220 | "sha256:ab3f33499c597a2ce454b81088e7f9d56127686e003c4f7a1c97ad4b38a55404",
221 | "sha256:c0fd1a66e104752f86ca2faa6a0194dae61442a768f85369fc3d11bacff8120f",
222 | "sha256:c3d7db37b7eb234e49f50ba22b3b1637e8daadd68985d9cd35a6152aa10faa75",
223 | "sha256:c9665e58b80d839ae1b0815341c63d00cae557c018f198c0b6b7bc5de9eca144",
224 | "sha256:dbda8f877c3dec1559c01c63a1de63969e51a4907dc308f4824238bb776026fe",
225 | "sha256:f3dcc294f3b4d79fdd7ffe1350d5d1e3cc29acaec67dd1c43143a43305bbbc91",
226 | "sha256:f907941ad7a460646773eb3baae4c88836e9256b390dfbfae8d92a3d3b849a7d"
227 | ],
228 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
229 | "version": "==1.4.3"
230 | },
231 | "decorator": {
232 | "hashes": [
233 | "sha256:7b12e7c3c6ab203a29e157335e9122cb03de9ab7264b137594103fd4a683b374",
234 | "sha256:e59913af105b9860aa2c8d3272d9de5a56a4e608db9a2f167a8480b323d529a7"
235 | ],
236 | "markers": "python_version >= '3.5'",
237 | "version": "==5.1.0"
238 | },
239 | "defusedxml": {
240 | "hashes": [
241 | "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69",
242 | "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"
243 | ],
244 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
245 | "version": "==0.7.1"
246 | },
247 | "docker": {
248 | "hashes": [
249 | "sha256:21ec4998e90dff7a7aaaa098ca8d839c7de412b89e6f6c30908372d58fecf663",
250 | "sha256:9b17f0723d83c1f3418d2aa17bf90b24dbe97deda06208dd4262fa30a6ee87eb"
251 | ],
252 | "markers": "python_version >= '3.6'",
253 | "version": "==5.0.2"
254 | },
255 | "entrypoints": {
256 | "hashes": [
257 | "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19",
258 | "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451"
259 | ],
260 | "markers": "python_version >= '2.7'",
261 | "version": "==0.3"
262 | },
263 | "flask": {
264 | "hashes": [
265 | "sha256:1c4c257b1892aec1398784c63791cbaa43062f1f7aeb555c4da961b20ee68f55",
266 | "sha256:a6209ca15eb63fc9385f38e452704113d679511d9574d09b2cf9183ae7d20dc9"
267 | ],
268 | "markers": "python_version >= '3.6'",
269 | "version": "==2.0.1"
270 | },
271 | "flatbuffers": {
272 | "hashes": [
273 | "sha256:63bb9a722d5e373701913e226135b28a6f6ac200d5cc7b4d919fa38d73b44610",
274 | "sha256:9e9ef47fa92625c4721036e7c4124182668dc6021d9e7c73704edd395648deb9"
275 | ],
276 | "version": "==1.12"
277 | },
278 | "gast": {
279 | "hashes": [
280 | "sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1",
281 | "sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4"
282 | ],
283 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
284 | "version": "==0.4.0"
285 | },
286 | "gitdb": {
287 | "hashes": [
288 | "sha256:6c4cc71933456991da20917998acbe6cf4fb41eeaab7d6d67fbc05ecd4c865b0",
289 | "sha256:96bf5c08b157a666fec41129e6d327235284cca4c81e92109260f353ba138005"
290 | ],
291 | "markers": "python_version >= '3.4'",
292 | "version": "==4.0.7"
293 | },
294 | "gitpython": {
295 | "hashes": [
296 | "sha256:dc0a7f2f697657acc8d7f89033e8b1ea94dd90356b2983bca89dc8d2ab3cc647",
297 | "sha256:df83fdf5e684fef7c6ee2c02fc68a5ceb7e7e759d08b694088d0cacb4eba59e5"
298 | ],
299 | "markers": "python_version >= '3.7'",
300 | "version": "==3.1.24"
301 | },
302 | "google-auth": {
303 | "hashes": [
304 | "sha256:997516b42ecb5b63e8d80f5632c1a61dddf41d2a4c2748057837e06e00014258",
305 | "sha256:b7033be9028c188ee30200b204ea00ed82ea1162e8ac1df4aa6ded19a191d88e"
306 | ],
307 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
308 | "version": "==1.35.0"
309 | },
310 | "google-auth-oauthlib": {
311 | "hashes": [
312 | "sha256:3f2a6e802eebbb6fb736a370fbf3b055edcb6b52878bf2f26330b5e041316c73",
313 | "sha256:a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a"
314 | ],
315 | "markers": "python_version >= '3.6'",
316 | "version": "==0.4.6"
317 | },
318 | "google-pasta": {
319 | "hashes": [
320 | "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954",
321 | "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed",
322 | "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"
323 | ],
324 | "version": "==0.2.0"
325 | },
326 | "greenlet": {
327 | "hashes": [
328 | "sha256:04e1849c88aa56584d4a0a6e36af5ec7cc37993fdc1fda72b56aa1394a92ded3",
329 | "sha256:05e72db813c28906cdc59bd0da7c325d9b82aa0b0543014059c34c8c4ad20e16",
330 | "sha256:07e6d88242e09b399682b39f8dfa1e7e6eca66b305de1ff74ed9eb1a7d8e539c",
331 | "sha256:090126004c8ab9cd0787e2acf63d79e80ab41a18f57d6448225bbfcba475034f",
332 | "sha256:1796f2c283faab2b71c67e9b9aefb3f201fdfbee5cb55001f5ffce9125f63a45",
333 | "sha256:2f89d74b4f423e756a018832cd7a0a571e0a31b9ca59323b77ce5f15a437629b",
334 | "sha256:34e6675167a238bede724ee60fe0550709e95adaff6a36bcc97006c365290384",
335 | "sha256:3e594015a2349ec6dcceda9aca29da8dc89e85b56825b7d1f138a3f6bb79dd4c",
336 | "sha256:3f8fc59bc5d64fa41f58b0029794f474223693fd00016b29f4e176b3ee2cfd9f",
337 | "sha256:3fc6a447735749d651d8919da49aab03c434a300e9f0af1c886d560405840fd1",
338 | "sha256:40abb7fec4f6294225d2b5464bb6d9552050ded14a7516588d6f010e7e366dcc",
339 | "sha256:44556302c0ab376e37939fd0058e1f0db2e769580d340fb03b01678d1ff25f68",
340 | "sha256:476ba9435afaead4382fbab8f1882f75e3fb2285c35c9285abb3dd30237f9142",
341 | "sha256:4870b018ca685ff573edd56b93f00a122f279640732bb52ce3a62b73ee5c4a92",
342 | "sha256:4adaf53ace289ced90797d92d767d37e7cdc29f13bd3830c3f0a561277a4ae83",
343 | "sha256:4eae94de9924bbb4d24960185363e614b1b62ff797c23dc3c8a7c75bbb8d187e",
344 | "sha256:5317701c7ce167205c0569c10abc4bd01c7f4cf93f642c39f2ce975fa9b78a3c",
345 | "sha256:5c3b735ccf8fc8048664ee415f8af5a3a018cc92010a0d7195395059b4b39b7d",
346 | "sha256:5cde7ee190196cbdc078511f4df0be367af85636b84d8be32230f4871b960687",
347 | "sha256:655ab836324a473d4cd8cf231a2d6f283ed71ed77037679da554e38e606a7117",
348 | "sha256:6ce9d0784c3c79f3e5c5c9c9517bbb6c7e8aa12372a5ea95197b8a99402aa0e6",
349 | "sha256:6e0696525500bc8aa12eae654095d2260db4dc95d5c35af2b486eae1bf914ccd",
350 | "sha256:75ff270fd05125dce3303e9216ccddc541a9e072d4fc764a9276d44dee87242b",
351 | "sha256:8039f5fe8030c43cd1732d9a234fdcbf4916fcc32e21745ca62e75023e4d4649",
352 | "sha256:84488516639c3c5e5c0e52f311fff94ebc45b56788c2a3bfe9cf8e75670f4de3",
353 | "sha256:84782c80a433d87530ae3f4b9ed58d4a57317d9918dfcc6a59115fa2d8731f2c",
354 | "sha256:8ddb38fb6ad96c2ef7468ff73ba5c6876b63b664eebb2c919c224261ae5e8378",
355 | "sha256:98b491976ed656be9445b79bc57ed21decf08a01aaaf5fdabf07c98c108111f6",
356 | "sha256:990e0f5e64bcbc6bdbd03774ecb72496224d13b664aa03afd1f9b171a3269272",
357 | "sha256:9b02e6039eafd75e029d8c58b7b1f3e450ca563ef1fe21c7e3e40b9936c8d03e",
358 | "sha256:a11b6199a0b9dc868990456a2667167d0ba096c5224f6258e452bfbe5a9742c5",
359 | "sha256:a414f8e14aa7bacfe1578f17c11d977e637d25383b6210587c29210af995ef04",
360 | "sha256:a91ee268f059583176c2c8b012a9fce7e49ca6b333a12bbc2dd01fc1a9783885",
361 | "sha256:ac991947ca6533ada4ce7095f0e28fe25d5b2f3266ad5b983ed4201e61596acf",
362 | "sha256:b050dbb96216db273b56f0e5960959c2b4cb679fe1e58a0c3906fa0a60c00662",
363 | "sha256:b97a807437b81f90f85022a9dcfd527deea38368a3979ccb49d93c9198b2c722",
364 | "sha256:bad269e442f1b7ffa3fa8820b3c3aa66f02a9f9455b5ba2db5a6f9eea96f56de",
365 | "sha256:bf3725d79b1ceb19e83fb1aed44095518c0fcff88fba06a76c0891cfd1f36837",
366 | "sha256:c0f22774cd8294078bdf7392ac73cf00bfa1e5e0ed644bd064fdabc5f2a2f481",
367 | "sha256:c1862f9f1031b1dee3ff00f1027fcd098ffc82120f43041fe67804b464bbd8a7",
368 | "sha256:c8d4ed48eed7414ccb2aaaecbc733ed2a84c299714eae3f0f48db085342d5629",
369 | "sha256:cf31e894dabb077a35bbe6963285d4515a387ff657bd25b0530c7168e48f167f",
370 | "sha256:d15cb6f8706678dc47fb4e4f8b339937b04eda48a0af1cca95f180db552e7663",
371 | "sha256:dfcb5a4056e161307d103bc013478892cfd919f1262c2bb8703220adcb986362",
372 | "sha256:e02780da03f84a671bb4205c5968c120f18df081236d7b5462b380fd4f0b497b",
373 | "sha256:e2002a59453858c7f3404690ae80f10c924a39f45f6095f18a985a1234c37334",
374 | "sha256:e22a82d2b416d9227a500c6860cf13e74060cf10e7daf6695cbf4e6a94e0eee4",
375 | "sha256:e41f72f225192d5d4df81dad2974a8943b0f2d664a2a5cfccdf5a01506f5523c",
376 | "sha256:f253dad38605486a4590f9368ecbace95865fea0f2b66615d121ac91fd1a1563",
377 | "sha256:fddfb31aa2ac550b938d952bca8a87f1db0f8dc930ffa14ce05b5c08d27e7fd1"
378 | ],
379 | "markers": "python_version >= '3' and platform_machine == 'aarch64' or (platform_machine == 'ppc64le' or (platform_machine == 'x86_64' or (platform_machine == 'amd64' or (platform_machine == 'AMD64' or (platform_machine == 'win32' or platform_machine == 'WIN32')))))",
380 | "version": "==1.1.1"
381 | },
382 | "grpcio": {
383 | "hashes": [
384 | "sha256:056806e83eaa09d0af0e452dd353db8f7c90aa2dedcce1112a2d21592550f6b1",
385 | "sha256:07594e585a5ba25cf331ddb63095ca51010c34e328a822cb772ffbd5daa62cb5",
386 | "sha256:0abd56d90dff3ed566807520de1385126dded21e62d3490a34c180a91f94c1f4",
387 | "sha256:15c04d695833c739dbb25c88eaf6abd9a461ec0dbd32f44bc8769335a495cf5a",
388 | "sha256:1820845e7e6410240eff97742e9f76cd5bf10ca01d36a322e86c0bd5340ac25b",
389 | "sha256:1bcbeac764bbae329bc2cc9e95d0f4d3b0fb456b92cf12e7e06e3e860a4b31cf",
390 | "sha256:2410000eb57cf76b05b37d2aee270b686f0a7876710850a2bba92b4ed133e026",
391 | "sha256:2882b62f74de8c8a4f7b2be066f6230ecc46f4edc8f42db1fb7358200abe3b25",
392 | "sha256:297ee755d3c6cd7e7d3770f298f4d4d4b000665943ae6d2888f7407418a9a510",
393 | "sha256:39ce785f0cbd07966a9019386b7a054615b2da63da3c7727f371304d000a1890",
394 | "sha256:3a92e4df5330cd384984e04804104ae34f521345917813aa86fc0930101a3697",
395 | "sha256:3bbeee115b05b22f6a9fa9bc78f9ab8d9d6bb8c16fdfc60401fc8658beae1099",
396 | "sha256:4537bb9e35af62c5189493792a8c34d127275a6d175c8ad48b6314cacba4021e",
397 | "sha256:462178987f0e5c60d6d1b79e4e95803a4cd789db961d6b3f087245906bb5ae04",
398 | "sha256:5292a627b44b6d3065de4a364ead23bab3c9d7a7c05416a9de0c0624d0fe03f4",
399 | "sha256:5502832b7cec670a880764f51a335a19b10ff5ab2e940e1ded67f39b88aa02b1",
400 | "sha256:585847ed190ea9cb4d632eb0ebf58f1d299bbca5e03284bc3d0fa08bab6ea365",
401 | "sha256:59645b2d9f19b5ff30cb46ddbcaa09c398f9cd81e4e476b21c7c55ae1e942807",
402 | "sha256:5d4b30d068b022e412adcf9b14c0d9bcbc872e9745b91467edc0a4c700a8bba6",
403 | "sha256:7033199706526e7ee06a362e38476dfdf2ddbad625c19b67ed30411d1bb25a18",
404 | "sha256:7b07cbbd4eea56738e995fcbba3b60e41fd9aa9dac937fb7985c5dcbc7626260",
405 | "sha256:7da3f6f6b857399c9ad85bcbffc83189e547a0a1a777ab68f5385154f8bc1ed4",
406 | "sha256:83c1e731c2b76f26689ad88534cafefe105dcf385567bead08f5857cb308246b",
407 | "sha256:9674a9d3f23702e35a89e22504f41b467893cf704f627cc9cdd118cf1dcc8e26",
408 | "sha256:9ecd0fc34aa46eeac24f4d20e67bafaf72ca914f99690bf2898674905eaddaf9",
409 | "sha256:a0c4bdd1d646365d10ba1468bcf234ea5ad46e8ce2b115983e8563248614910a",
410 | "sha256:a144f6cecbb61aace12e5920840338a3d246123a41d795e316e2792e9775ad15",
411 | "sha256:a3cd7f945d3e3b82ebd2a4c9862eb9891a5ac87f84a7db336acbeafd86e6c402",
412 | "sha256:a614224719579044bd7950554d3b4c1793bb5715cbf0f0399b1f21d283c40ef6",
413 | "sha256:ace080a9c3c673c42adfd2116875a63fec9613797be01a6105acf7721ed0c693",
414 | "sha256:b2de4e7b5a930be04a4d05c9f5fce7e9191217ccdc174b026c2a7928770dca9f",
415 | "sha256:b6b68c444abbaf4a2b944a61cf35726ab9645f45d416bcc7cf4addc4b2f2d53d",
416 | "sha256:be3c6ac822edb509aeef41361ca9c8c5ee52cb9e4973e1977d2bb7d6a460fd97",
417 | "sha256:c07acd49541f5f6f9984fe0adf162d77bf70e0f58e77f9960c6f571314ff63a4",
418 | "sha256:c1e0a4c86d4cbd93059d5eeceed6e1c2e3e1494e1bf40be9b8ab14302c576162",
419 | "sha256:c8c5bc498f6506b6041c30afb7a55c57a9fd535d1a0ac7cdba9b5fd791a85633",
420 | "sha256:c95dd6e60e059ff770a2ac9f5a202b75dd64d76b0cd0c48f27d58907e43ed6a6",
421 | "sha256:ccd2f1cf11768d1f6fbe4e13e8b8fb0ccfe9914ceeff55a367d5571e82eeb543",
422 | "sha256:d0cc0393744ce3ce1b237ae773635cc928470ff46fb0d3f677e337a38e5ed4f6",
423 | "sha256:d539ebd05a2bbfbf897d41738d37d162d5c3d9f2b1f8ddf2c4f75e2c9cf59907",
424 | "sha256:d71aa430b2ac40e18e388504ac34cc91d49d811855ca507c463a21059bf364f0",
425 | "sha256:dcb5f324712a104aca4a459e524e535f205f36deb8005feb4f9d3ff0a22b5177",
426 | "sha256:e516124010ef60d5fc2e0de0f1f987599249dc55fd529001f17f776a4145767f",
427 | "sha256:fb64abf0d92134cb0ba4496a3b7ab918588eee42de20e5b3507fe6ee16db97ee"
428 | ],
429 | "version": "==1.41.0"
430 | },
431 | "gunicorn": {
432 | "hashes": [
433 | "sha256:9dcc4547dbb1cb284accfb15ab5667a0e5d1881cc443e0677b4882a4067a807e",
434 | "sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8"
435 | ],
436 | "markers": "platform_system != 'Windows'",
437 | "version": "==20.1.0"
438 | },
439 | "h5py": {
440 | "hashes": [
441 | "sha256:02c391fdb980762a1cc03a4bcaecd03dc463994a9a63a02264830114a96e111f",
442 | "sha256:1cd367f89a5441236bdbb795e9fb9a9e3424929c00b4a54254ca760437f83d69",
443 | "sha256:1cdfd1c5449ca1329d152f0b66830e93226ebce4f5e07dd8dc16bfc2b1a49d7b",
444 | "sha256:1e2516f190652beedcb8c7acfa1c6fa92d99b42331cbef5e5c7ec2d65b0fc3c2",
445 | "sha256:236ac8d943be30b617ab615c3d4a4bf4a438add2be87e54af3687ab721a18fac",
446 | "sha256:2e37352ddfcf9d77a2a47f7c8f7e125c6d20cc06c2995edeb7be222d4e152636",
447 | "sha256:80c623be10479e81b64fa713b7ed4c0bbe9f02e8e7d2a2e5382336087b615ce4",
448 | "sha256:ba71f6229d2013fbb606476ecc29c6223fc16b244d35fcd8566ad9dbaf910857",
449 | "sha256:cb74df83709d6d03d11e60b9480812f58da34f194beafa8c8314dbbeeedfe0a6",
450 | "sha256:dccb89358bc84abcd711363c3e138f9f4eccfdf866f2139a8e72308328765b2c",
451 | "sha256:e33f61d3eb862614c0f273a1f993a64dc2f093e1a3094932c50ada9d2db2170f",
452 | "sha256:f89a3dae38843ffa49d17a31a3509a8129e9b46ece602a0138e1ed79e685c361",
453 | "sha256:fea05349f63625a8fb808e57e42bb4c76930cf5d50ac58b678c52f913a48a89b"
454 | ],
455 | "markers": "python_version >= '3.6'",
456 | "version": "==3.1.0"
457 | },
458 | "idna": {
459 | "hashes": [
460 | "sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a",
461 | "sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3"
462 | ],
463 | "markers": "python_version >= '3'",
464 | "version": "==3.2"
465 | },
466 | "importlib-metadata": {
467 | "hashes": [
468 | "sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15",
469 | "sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1"
470 | ],
471 | "markers": "python_version >= '3.6'",
472 | "version": "==4.8.1"
473 | },
474 | "ipykernel": {
475 | "hashes": [
476 | "sha256:a3f6c2dda2ecf63b37446808a70ed825fea04790779ca524889c596deae0def8",
477 | "sha256:df3355e5eec23126bc89767a676c5f0abfc7f4c3497d118c592b83b316e8c0cd"
478 | ],
479 | "markers": "python_version >= '3.7'",
480 | "version": "==6.4.1"
481 | },
482 | "ipython": {
483 | "hashes": [
484 | "sha256:2097be5c814d1b974aea57673176a924c4c8c9583890e7a5f082f547b9975b11",
485 | "sha256:f16148f9163e1e526f1008d7c8d966d9c15600ca20d1a754287cf96d00ba6f1d"
486 | ],
487 | "markers": "python_version >= '3.3'",
488 | "version": "==7.28.0"
489 | },
490 | "ipython-genutils": {
491 | "hashes": [
492 | "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8",
493 | "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"
494 | ],
495 | "version": "==0.2.0"
496 | },
497 | "ipywidgets": {
498 | "hashes": [
499 | "sha256:00974f7cb4d5f8d494c19810fedb9fa9b64bffd3cda7c2be23c133a1ad3c99c5",
500 | "sha256:d258f582f915c62ea91023299603be095de19afb5ee271698f88327b9fe9bf43"
501 | ],
502 | "version": "==7.6.5"
503 | },
504 | "itsdangerous": {
505 | "hashes": [
506 | "sha256:5174094b9637652bdb841a3029700391451bd092ba3db90600dea710ba28e97c",
507 | "sha256:9e724d68fc22902a1435351f84c3fb8623f303fffcc566a4cb952df8c572cff0"
508 | ],
509 | "markers": "python_version >= '3.6'",
510 | "version": "==2.0.1"
511 | },
512 | "jedi": {
513 | "hashes": [
514 | "sha256:18456d83f65f400ab0c2d3319e48520420ef43b23a086fdc05dff34132f0fb93",
515 | "sha256:92550a404bad8afed881a137ec9a461fed49eca661414be45059329614ed0707"
516 | ],
517 | "markers": "python_version >= '3.6'",
518 | "version": "==0.18.0"
519 | },
520 | "jinja2": {
521 | "hashes": [
522 | "sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4",
523 | "sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4"
524 | ],
525 | "markers": "python_version >= '3.6'",
526 | "version": "==3.0.1"
527 | },
528 | "jmespath": {
529 | "hashes": [
530 | "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9",
531 | "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f"
532 | ],
533 | "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
534 | "version": "==0.10.0"
535 | },
536 | "joblib": {
537 | "hashes": [
538 | "sha256:9c17567692206d2f3fb9ecf5e991084254fe631665c450b443761c4186a613f7",
539 | "sha256:feeb1ec69c4d45129954f1b7034954241eedfd6ba39b5e9e4b6883be3332d5e5"
540 | ],
541 | "markers": "python_version >= '3.6'",
542 | "version": "==1.0.1"
543 | },
544 | "jsonschema": {
545 | "hashes": [
546 | "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163",
547 | "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"
548 | ],
549 | "version": "==3.2.0"
550 | },
551 | "jupyter": {
552 | "hashes": [
553 | "sha256:3e1f86076bbb7c8c207829390305a2b1fe836d471ed54be66a3b8c41e7f46cc7",
554 | "sha256:5b290f93b98ffbc21c0c7e749f054b3267782166d72fa5e3ed1ed4eaf34a2b78",
555 | "sha256:d9dc4b3318f310e34c82951ea5d6683f67bed7def4b259fafbfe4f1beb1d8e5f"
556 | ],
557 | "index": "pypi",
558 | "version": "==1.0.0"
559 | },
560 | "jupyter-client": {
561 | "hashes": [
562 | "sha256:0b18f85dfc4b2c48ee65ccd7d57382d151ecb8469cedde0138b7294c1a747d46",
563 | "sha256:67aaadbbbb022cd481146924e5911b56863177456aaf5622049c96ae384edc06"
564 | ],
565 | "markers": "python_full_version >= '3.6.1'",
566 | "version": "==7.0.4"
567 | },
568 | "jupyter-console": {
569 | "hashes": [
570 | "sha256:242248e1685039cd8bff2c2ecb7ce6c1546eb50ee3b08519729e6e881aec19c7",
571 | "sha256:7799c4ea951e0e96ba8260575423cb323ea5a03fcf5503560fa3e15748869e27"
572 | ],
573 | "markers": "python_version >= '3.6'",
574 | "version": "==6.4.0"
575 | },
576 | "jupyter-core": {
577 | "hashes": [
578 | "sha256:8dd262ec8afae95bd512518eb003bc546b76adbf34bf99410e9accdf4be9aa3a",
579 | "sha256:ef210dcb4fca04de07f2ead4adf408776aca94d17151d6f750ad6ded0b91ea16"
580 | ],
581 | "markers": "python_version >= '3.6'",
582 | "version": "==4.8.1"
583 | },
584 | "jupyterlab-pygments": {
585 | "hashes": [
586 | "sha256:abfb880fd1561987efaefcb2d2ac75145d2a5d0139b1876d5be806e32f630008",
587 | "sha256:cfcda0873626150932f438eccf0f8bf22bfa92345b814890ab360d666b254146"
588 | ],
589 | "version": "==0.1.2"
590 | },
591 | "jupyterlab-widgets": {
592 | "hashes": [
593 | "sha256:7885092b2b96bf189c3a705cc3c412a4472ec5e8382d0b47219a66cccae73cfa",
594 | "sha256:f5d9efface8ec62941173ba1cffb2edd0ecddc801c11ae2931e30b50492eb8f7"
595 | ],
596 | "markers": "python_version >= '3.6'",
597 | "version": "==1.0.2"
598 | },
599 | "keras": {
600 | "hashes": [
601 | "sha256:504af5656a9829fe803ce48a8580ef16916e89906aceddad9e098614269437e7"
602 | ],
603 | "index": "pypi",
604 | "version": "==2.6.0"
605 | },
606 | "keras-preprocessing": {
607 | "hashes": [
608 | "sha256:7b82029b130ff61cc99b55f3bd27427df4838576838c5b2f65940e4fcec99a7b",
609 | "sha256:add82567c50c8bc648c14195bf544a5ce7c1f76761536956c3d2978970179ef3"
610 | ],
611 | "version": "==1.1.2"
612 | },
613 | "mako": {
614 | "hashes": [
615 | "sha256:169fa52af22a91900d852e937400e79f535496191c63712e3b9fda5a9bed6fc3",
616 | "sha256:6804ee66a7f6a6416910463b00d76a7b25194cd27f1918500c5bd7be2a088a23"
617 | ],
618 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
619 | "version": "==1.1.5"
620 | },
621 | "markdown": {
622 | "hashes": [
623 | "sha256:31b5b491868dcc87d6c24b7e3d19a0d730d59d3e46f4eea6430a321bed387a49",
624 | "sha256:96c3ba1261de2f7547b46a00ea8463832c921d3f9d6aba3f255a6f71386db20c"
625 | ],
626 | "markers": "python_version >= '3.6'",
627 | "version": "==3.3.4"
628 | },
629 | "markupsafe": {
630 | "hashes": [
631 | "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298",
632 | "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64",
633 | "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b",
634 | "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567",
635 | "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff",
636 | "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724",
637 | "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74",
638 | "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646",
639 | "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35",
640 | "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6",
641 | "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6",
642 | "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad",
643 | "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26",
644 | "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38",
645 | "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac",
646 | "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7",
647 | "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6",
648 | "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75",
649 | "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f",
650 | "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135",
651 | "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8",
652 | "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a",
653 | "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a",
654 | "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9",
655 | "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864",
656 | "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914",
657 | "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18",
658 | "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8",
659 | "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2",
660 | "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d",
661 | "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b",
662 | "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b",
663 | "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f",
664 | "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb",
665 | "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833",
666 | "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28",
667 | "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415",
668 | "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902",
669 | "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d",
670 | "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9",
671 | "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d",
672 | "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145",
673 | "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066",
674 | "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c",
675 | "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1",
676 | "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f",
677 | "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53",
678 | "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134",
679 | "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85",
680 | "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5",
681 | "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94",
682 | "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509",
683 | "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51",
684 | "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"
685 | ],
686 | "markers": "python_version >= '3.6'",
687 | "version": "==2.0.1"
688 | },
689 | "matplotlib-inline": {
690 | "hashes": [
691 | "sha256:a04bfba22e0d1395479f866853ec1ee28eea1485c1d69a6faf00dc3e24ff34ee",
692 | "sha256:aed605ba3b72462d64d475a21a9296f400a19c4f74a31b59103d2a99ffd5aa5c"
693 | ],
694 | "markers": "python_version >= '3.5'",
695 | "version": "==0.1.3"
696 | },
697 | "mistune": {
698 | "hashes": [
699 | "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e",
700 | "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"
701 | ],
702 | "version": "==0.8.4"
703 | },
704 | "mlflow": {
705 | "hashes": [
706 | "sha256:963c22532e82a93450674ab97d62f9e528ed0906b580fadb7c003e696197557c",
707 | "sha256:b15ff0c7e5e64f864a0b40c99b9a582227315eca2065d9f831db9aeb8f24637b"
708 | ],
709 | "index": "pypi",
710 | "version": "==1.20.2"
711 | },
712 | "nbclient": {
713 | "hashes": [
714 | "sha256:6c8ad36a28edad4562580847f9f1636fe5316a51a323ed85a24a4ad37d4aefce",
715 | "sha256:95a300c6fbe73721736cf13972a46d8d666f78794b832866ed7197a504269e11"
716 | ],
717 | "markers": "python_full_version >= '3.6.1'",
718 | "version": "==0.5.4"
719 | },
720 | "nbconvert": {
721 | "hashes": [
722 | "sha256:16ceecd0afaa8fd26c245fa32e2c52066c02f13aa73387fffafd84750baea863",
723 | "sha256:b1b9dc4f1ff6cafae0e6d91f42fb9046fdc32e6beb6d7e2fa2cd7191ad535240"
724 | ],
725 | "markers": "python_version >= '3.7'",
726 | "version": "==6.2.0"
727 | },
728 | "nbformat": {
729 | "hashes": [
730 | "sha256:b516788ad70771c6250977c1374fcca6edebe6126fd2adb5a69aa5c2356fd1c8",
731 | "sha256:eb8447edd7127d043361bc17f2f5a807626bc8e878c7709a1c647abda28a9171"
732 | ],
733 | "markers": "python_version >= '3.5'",
734 | "version": "==5.1.3"
735 | },
736 | "nest-asyncio": {
737 | "hashes": [
738 | "sha256:76d6e972265063fe92a90b9cc4fb82616e07d586b346ed9d2c89a4187acea39c",
739 | "sha256:afc5a1c515210a23c461932765691ad39e8eba6551c055ac8d5546e69250d0aa"
740 | ],
741 | "markers": "python_version >= '3.5'",
742 | "version": "==1.5.1"
743 | },
744 | "notebook": {
745 | "hashes": [
746 | "sha256:26b0095c568e307a310fd78818ad8ebade4f00462dada4c0e34cbad632b9085d",
747 | "sha256:33488bdcc5cbef23c3cfa12cd51b0b5459a211945b5053d17405980611818149"
748 | ],
749 | "markers": "python_version >= '3.6'",
750 | "version": "==6.4.4"
751 | },
752 | "numpy": {
753 | "hashes": [
754 | "sha256:012426a41bc9ab63bb158635aecccc7610e3eff5d31d1eb43bc099debc979d94",
755 | "sha256:06fab248a088e439402141ea04f0fffb203723148f6ee791e9c75b3e9e82f080",
756 | "sha256:0eef32ca3132a48e43f6a0f5a82cb508f22ce5a3d6f67a8329c81c8e226d3f6e",
757 | "sha256:1ded4fce9cfaaf24e7a0ab51b7a87be9038ea1ace7f34b841fe3b6894c721d1c",
758 | "sha256:2e55195bc1c6b705bfd8ad6f288b38b11b1af32f3c8289d6c50d47f950c12e76",
759 | "sha256:2ea52bd92ab9f768cc64a4c3ef8f4b2580a17af0a5436f6126b08efbd1838371",
760 | "sha256:36674959eed6957e61f11c912f71e78857a8d0604171dfd9ce9ad5cbf41c511c",
761 | "sha256:384ec0463d1c2671170901994aeb6dce126de0a95ccc3976c43b0038a37329c2",
762 | "sha256:39b70c19ec771805081578cc936bbe95336798b7edf4732ed102e7a43ec5c07a",
763 | "sha256:400580cbd3cff6ffa6293df2278c75aef2d58d8d93d3c5614cd67981dae68ceb",
764 | "sha256:43d4c81d5ffdff6bae58d66a3cd7f54a7acd9a0e7b18d97abb255defc09e3140",
765 | "sha256:50a4a0ad0111cc1b71fa32dedd05fa239f7fb5a43a40663269bb5dc7877cfd28",
766 | "sha256:603aa0706be710eea8884af807b1b3bc9fb2e49b9f4da439e76000f3b3c6ff0f",
767 | "sha256:6149a185cece5ee78d1d196938b2a8f9d09f5a5ebfbba66969302a778d5ddd1d",
768 | "sha256:759e4095edc3c1b3ac031f34d9459fa781777a93ccc633a472a5468587a190ff",
769 | "sha256:7fb43004bce0ca31d8f13a6eb5e943fa73371381e53f7074ed21a4cb786c32f8",
770 | "sha256:811daee36a58dc79cf3d8bdd4a490e4277d0e4b7d103a001a4e73ddb48e7e6aa",
771 | "sha256:8b5e972b43c8fc27d56550b4120fe6257fdc15f9301914380b27f74856299fea",
772 | "sha256:99abf4f353c3d1a0c7a5f27699482c987cf663b1eac20db59b8c7b061eabd7fc",
773 | "sha256:a0d53e51a6cb6f0d9082decb7a4cb6dfb33055308c4c44f53103c073f649af73",
774 | "sha256:a12ff4c8ddfee61f90a1633a4c4afd3f7bcb32b11c52026c92a12e1325922d0d",
775 | "sha256:a4646724fba402aa7504cd48b4b50e783296b5e10a524c7a6da62e4a8ac9698d",
776 | "sha256:a76f502430dd98d7546e1ea2250a7360c065a5fdea52b2dffe8ae7180909b6f4",
777 | "sha256:a9d17f2be3b427fbb2bce61e596cf555d6f8a56c222bd2ca148baeeb5e5c783c",
778 | "sha256:ab83f24d5c52d60dbc8cd0528759532736b56db58adaa7b5f1f76ad551416a1e",
779 | "sha256:aeb9ed923be74e659984e321f609b9ba54a48354bfd168d21a2b072ed1e833ea",
780 | "sha256:c843b3f50d1ab7361ca4f0b3639bf691569493a56808a0b0c54a051d260b7dbd",
781 | "sha256:cae865b1cae1ec2663d8ea56ef6ff185bad091a5e33ebbadd98de2cfa3fa668f",
782 | "sha256:cc6bd4fd593cb261332568485e20a0712883cf631f6f5e8e86a52caa8b2b50ff",
783 | "sha256:cf2402002d3d9f91c8b01e66fbb436a4ed01c6498fffed0e4c7566da1d40ee1e",
784 | "sha256:d051ec1c64b85ecc69531e1137bb9751c6830772ee5c1c426dbcfe98ef5788d7",
785 | "sha256:d6631f2e867676b13026e2846180e2c13c1e11289d67da08d71cacb2cd93d4aa",
786 | "sha256:dbd18bcf4889b720ba13a27ec2f2aac1981bd41203b3a3b27ba7a33f88ae4827",
787 | "sha256:df609c82f18c5b9f6cb97271f03315ff0dbe481a2a02e56aeb1b1a985ce38e60"
788 | ],
789 | "index": "pypi",
790 | "version": "==1.19.5"
791 | },
792 | "oauthlib": {
793 | "hashes": [
794 | "sha256:42bf6354c2ed8c6acb54d971fce6f88193d97297e18602a3a886603f9d7730cc",
795 | "sha256:8f0215fcc533dd8dd1bee6f4c412d4f0cd7297307d43ac61666389e3bc3198a3"
796 | ],
797 | "markers": "python_version >= '3.6'",
798 | "version": "==3.1.1"
799 | },
800 | "opt-einsum": {
801 | "hashes": [
802 | "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147",
803 | "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"
804 | ],
805 | "markers": "python_version >= '3.5'",
806 | "version": "==3.3.0"
807 | },
808 | "packaging": {
809 | "hashes": [
810 | "sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7",
811 | "sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"
812 | ],
813 | "markers": "python_version >= '3.6'",
814 | "version": "==21.0"
815 | },
816 | "pandas": {
817 | "hashes": [
818 | "sha256:272c8cb14aa9793eada6b1ebe81994616e647b5892a370c7135efb2924b701df",
819 | "sha256:3334a5a9eeaca953b9db1b2b165dcdc5180b5011f3bec3a57a3580c9c22eae68",
820 | "sha256:37d63e78e87eb3791da7be4100a65da0383670c2b59e493d9e73098d7a879226",
821 | "sha256:3f5020613c1d8e304840c34aeb171377dc755521bf5e69804991030c2a48aec3",
822 | "sha256:45649503e167d45360aa7c52f18d1591a6d5c70d2f3a26bc90a3297a30ce9a66",
823 | "sha256:49fd2889d8116d7acef0709e4c82b8560a8b22b0f77471391d12c27596e90267",
824 | "sha256:4def2ef2fb7fcd62f2aa51bacb817ee9029e5c8efe42fe527ba21f6a3ddf1a9f",
825 | "sha256:53e2fb11f86f6253bb1df26e3aeab3bf2e000aaa32a953ec394571bec5dc6fd6",
826 | "sha256:629138b7cf81a2e55aa29ce7b04c1cece20485271d1f6c469c6a0c03857db6a4",
827 | "sha256:68408a39a54ebadb9014ee5a4fae27b2fe524317bc80adf56c9ac59e8f8ea431",
828 | "sha256:7326b37de08d42dd3fff5b7ef7691d0fd0bf2428f4ba5a2bdc3b3247e9a52e4c",
829 | "sha256:7557b39c8e86eb0543a17a002ac1ea0f38911c3c17095bc9350d0a65b32d801c",
830 | "sha256:86b16b1b920c4cb27fdd65a2c20258bcd9c794be491290660722bb0ea765054d",
831 | "sha256:a800df4e101b721e94d04c355e611863cc31887f24c0b019572e26518cbbcab6",
832 | "sha256:a9f1b54d7efc9df05320b14a48fb18686f781aa66cc7b47bb62fabfc67a0985c",
833 | "sha256:c399200631db9bd9335d013ec7fce4edb98651035c249d532945c78ad453f23a",
834 | "sha256:e574c2637c9d27f322e911650b36e858c885702c5996eda8a5a60e35e6648cf2",
835 | "sha256:e9bc59855598cb57f68fdabd4897d3ed2bc3a3b3bef7b868a0153c4cd03f3207",
836 | "sha256:ebbed7312547a924df0cbe133ff1250eeb94cdff3c09a794dc991c5621c8c735",
837 | "sha256:ed2f29b4da6f6ae7c68f4b3708d9d9e59fa89b2f9e87c2b64ce055cbd39f729e",
838 | "sha256:f7d84f321674c2f0f31887ee6d5755c54ca1ea5e144d6d54b3bbf566dd9ea0cc"
839 | ],
840 | "index": "pypi",
841 | "version": "==1.3.3"
842 | },
843 | "pandocfilters": {
844 | "hashes": [
845 | "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38",
846 | "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"
847 | ],
848 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
849 | "version": "==1.5.0"
850 | },
851 | "parso": {
852 | "hashes": [
853 | "sha256:12b83492c6239ce32ff5eed6d3639d6a536170723c6f3f1506869f1ace413398",
854 | "sha256:a8c4922db71e4fdb90e0d0bc6e50f9b273d3397925e5e60a717e719201778d22"
855 | ],
856 | "markers": "python_version >= '3.6'",
857 | "version": "==0.8.2"
858 | },
859 | "pexpect": {
860 | "hashes": [
861 | "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937",
862 | "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"
863 | ],
864 | "markers": "sys_platform != 'win32'",
865 | "version": "==4.8.0"
866 | },
867 | "pickleshare": {
868 | "hashes": [
869 | "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca",
870 | "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"
871 | ],
872 | "version": "==0.7.5"
873 | },
874 | "prometheus-client": {
875 | "hashes": [
876 | "sha256:3a8baade6cb80bcfe43297e33e7623f3118d660d41387593758e2fb1ea173a86",
877 | "sha256:b014bc76815eb1399da8ce5fc84b7717a3e63652b0c0f8804092c9363acab1b2"
878 | ],
879 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
880 | "version": "==0.11.0"
881 | },
882 | "prometheus-flask-exporter": {
883 | "hashes": [
884 | "sha256:fc487e385d95cb5efd045d6a315c4ecf68c42661e7bfde0526af75ed3c4f8c1b"
885 | ],
886 | "version": "==0.18.2"
887 | },
888 | "prompt-toolkit": {
889 | "hashes": [
890 | "sha256:6076e46efae19b1e0ca1ec003ed37a933dc94b4d20f486235d436e64771dcd5c",
891 | "sha256:eb71d5a6b72ce6db177af4a7d4d7085b99756bf656d98ffcc4fecd36850eea6c"
892 | ],
893 | "markers": "python_full_version >= '3.6.2'",
894 | "version": "==3.0.20"
895 | },
896 | "protobuf": {
897 | "hashes": [
898 | "sha256:0a59ea8da307118372750e2fdfe0961622e675b8dd35e05c42384d618189a938",
899 | "sha256:17181fc0814655812aac108e755bd5185d71aa8d81bd241cec6e232c84097918",
900 | "sha256:18b308946a592e245299391e53c01b5b8efc2794f49986e80f37d7b5e60a270f",
901 | "sha256:1f3ecec3038c2fb4dad952d3d6cb9ca301999903a09e43794fb348da48f7577f",
902 | "sha256:3b5b81bb665aac548b413480f4e0d8c38a74bc4dea57835f288a3ce74f63dfe9",
903 | "sha256:42c04e66ec5a38ad2171639dc9860c2f9594668f709ea3a4a192acf7346853a7",
904 | "sha256:5201333b7aa711965c5769b250f8565a9924e8e27f8b622bbc5e6847aeaab1b1",
905 | "sha256:568c049ff002a7523ed33fb612e6b97da002bf87ffb619a1fc3eadf2257a3b31",
906 | "sha256:5730de255c95b3403eedd1a568eb28203b913b6192ff5a3fdc3ff30f37107a38",
907 | "sha256:615099e52e9fbc9fde00177267a94ca820ecf4e80093e390753568b7d8cb3c1a",
908 | "sha256:7646c20605fbee57e77fdbc4a90175538281b152f46ba17019916593f8062c2a",
909 | "sha256:7e791a94db391ae22b3943fc88f6ba0e1f62b6ad58b33db7517df576c7834d23",
910 | "sha256:80b0a5157f3a53043daf8eb7cfa1220b27a5a63dd6655dbd8e1e6f7b5dcd6347",
911 | "sha256:877664b1b8d1e23553634f625e4e12aae4ff16cbbef473f8118c239d478f422a",
912 | "sha256:9072cb18fca8998b77f969fb74d25a11d7f4a39a8b1ddc3cf76cd5abda8499cb",
913 | "sha256:9147565f93e6699d7512747766598afe63205f226ac7b61f47954974c9aab852",
914 | "sha256:93c077fd83879cf48f327a2491c24da447a09da6a7ab3cc311a6f5a61fcb5de0",
915 | "sha256:d11465040cadcea8ecf5f0b131af5099a9696f9d0bef6f88148b372bacc1c52d",
916 | "sha256:f589346b5b3f702c1d30e2343c9897e6c35e7bd495c10a0e17d11ecb5ee5bd06",
917 | "sha256:f6138462643adce0ed6e49007a63b7fd7dc4fda1ef4e15a70fcebe76c1407a71",
918 | "sha256:f7c8193ec805324ff6024242b00f64a24b94d56b895f62bf28a9d72a228d4fca"
919 | ],
920 | "version": "==3.18.0"
921 | },
922 | "ptyprocess": {
923 | "hashes": [
924 | "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35",
925 | "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"
926 | ],
927 | "markers": "os_name != 'nt'",
928 | "version": "==0.7.0"
929 | },
930 | "py4j": {
931 | "hashes": [
932 | "sha256:36ec57f43ff8ced260a18aa9a4e46c3500a730cac8860e259cbaa546c2b9db2f",
933 | "sha256:859ba728a7bb43e9c2bf058832759fb97a598bb28cc12f34f5fc4abdec08ede6"
934 | ],
935 | "version": "==0.10.9"
936 | },
937 | "pyasn1": {
938 | "hashes": [
939 | "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359",
940 | "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576",
941 | "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf",
942 | "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7",
943 | "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d",
944 | "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00",
945 | "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8",
946 | "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86",
947 | "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12",
948 | "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776",
949 | "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba",
950 | "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2",
951 | "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"
952 | ],
953 | "version": "==0.4.8"
954 | },
955 | "pyasn1-modules": {
956 | "hashes": [
957 | "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8",
958 | "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199",
959 | "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811",
960 | "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed",
961 | "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4",
962 | "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e",
963 | "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74",
964 | "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb",
965 | "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45",
966 | "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd",
967 | "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0",
968 | "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d",
969 | "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"
970 | ],
971 | "version": "==0.2.8"
972 | },
973 | "pycparser": {
974 | "hashes": [
975 | "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
976 | "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
977 | ],
978 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
979 | "version": "==2.20"
980 | },
981 | "pygments": {
982 | "hashes": [
983 | "sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380",
984 | "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"
985 | ],
986 | "markers": "python_version >= '3.5'",
987 | "version": "==2.10.0"
988 | },
989 | "pyparsing": {
990 | "hashes": [
991 | "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
992 | "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
993 | ],
994 | "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
995 | "version": "==2.4.7"
996 | },
997 | "pyrsistent": {
998 | "hashes": [
999 | "sha256:097b96f129dd36a8c9e33594e7ebb151b1515eb52cceb08474c10a5479e799f2",
1000 | "sha256:2aaf19dc8ce517a8653746d98e962ef480ff34b6bc563fc067be6401ffb457c7",
1001 | "sha256:404e1f1d254d314d55adb8d87f4f465c8693d6f902f67eb6ef5b4526dc58e6ea",
1002 | "sha256:48578680353f41dca1ca3dc48629fb77dfc745128b56fc01096b2530c13fd426",
1003 | "sha256:4916c10896721e472ee12c95cdc2891ce5890898d2f9907b1b4ae0f53588b710",
1004 | "sha256:527be2bfa8dc80f6f8ddd65242ba476a6c4fb4e3aedbf281dfbac1b1ed4165b1",
1005 | "sha256:58a70d93fb79dc585b21f9d72487b929a6fe58da0754fa4cb9f279bb92369396",
1006 | "sha256:5e4395bbf841693eaebaa5bb5c8f5cdbb1d139e07c975c682ec4e4f8126e03d2",
1007 | "sha256:6b5eed00e597b5b5773b4ca30bd48a5774ef1e96f2a45d105db5b4ebb4bca680",
1008 | "sha256:73ff61b1411e3fb0ba144b8f08d6749749775fe89688093e1efef9839d2dcc35",
1009 | "sha256:772e94c2c6864f2cd2ffbe58bb3bdefbe2a32afa0acb1a77e472aac831f83427",
1010 | "sha256:773c781216f8c2900b42a7b638d5b517bb134ae1acbebe4d1e8f1f41ea60eb4b",
1011 | "sha256:a0c772d791c38bbc77be659af29bb14c38ced151433592e326361610250c605b",
1012 | "sha256:b29b869cf58412ca5738d23691e96d8aff535e17390128a1a52717c9a109da4f",
1013 | "sha256:c1a9ff320fa699337e05edcaae79ef8c2880b52720bc031b219e5b5008ebbdef",
1014 | "sha256:cd3caef37a415fd0dae6148a1b6957a8c5f275a62cca02e18474608cb263640c",
1015 | "sha256:d5ec194c9c573aafaceebf05fc400656722793dac57f254cd4741f3c27ae57b4",
1016 | "sha256:da6e5e818d18459fa46fac0a4a4e543507fe1110e808101277c5a2b5bab0cd2d",
1017 | "sha256:e79d94ca58fcafef6395f6352383fa1a76922268fa02caa2272fff501c2fdc78",
1018 | "sha256:f3ef98d7b76da5eb19c37fda834d50262ff9167c65658d1d8f974d2e4d90676b",
1019 | "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"
1020 | ],
1021 | "markers": "python_version >= '3.6'",
1022 | "version": "==0.18.0"
1023 | },
1024 | "pyspark": {
1025 | "hashes": [
1026 | "sha256:5e25ebb18756e9715f4d26848cc7e558035025da74b4fc325a0ebc05ff538e65"
1027 | ],
1028 | "index": "pypi",
1029 | "version": "==3.1.2"
1030 | },
1031 | "python-dateutil": {
1032 | "hashes": [
1033 | "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
1034 | "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"
1035 | ],
1036 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
1037 | "version": "==2.8.2"
1038 | },
1039 | "python-editor": {
1040 | "hashes": [
1041 | "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d",
1042 | "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b",
1043 | "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8",
1044 | "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77",
1045 | "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522"
1046 | ],
1047 | "version": "==1.0.4"
1048 | },
1049 | "pytz": {
1050 | "hashes": [
1051 | "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da",
1052 | "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798"
1053 | ],
1054 | "version": "==2021.1"
1055 | },
1056 | "pyyaml": {
1057 | "hashes": [
1058 | "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf",
1059 | "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696",
1060 | "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393",
1061 | "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77",
1062 | "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922",
1063 | "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5",
1064 | "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8",
1065 | "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10",
1066 | "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc",
1067 | "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018",
1068 | "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e",
1069 | "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253",
1070 | "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347",
1071 | "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183",
1072 | "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541",
1073 | "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb",
1074 | "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185",
1075 | "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc",
1076 | "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db",
1077 | "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa",
1078 | "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46",
1079 | "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122",
1080 | "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b",
1081 | "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63",
1082 | "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df",
1083 | "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc",
1084 | "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247",
1085 | "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6",
1086 | "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0"
1087 | ],
1088 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
1089 | "version": "==5.4.1"
1090 | },
1091 | "pyzmq": {
1092 | "hashes": [
1093 | "sha256:0ca6cd58f62a2751728016d40082008d3b3412a7f28ddfb4a2f0d3c130f69e74",
1094 | "sha256:1621e7a2af72cced1f6ec8ca8ca91d0f76ac236ab2e8828ac8fe909512d566cb",
1095 | "sha256:18cd854b423fce44951c3a4d3e686bac8f1243d954f579e120a1714096637cc0",
1096 | "sha256:2841997a0d85b998cbafecb4183caf51fd19c4357075dfd33eb7efea57e4c149",
1097 | "sha256:2b97502c16a5ec611cd52410bdfaab264997c627a46b0f98d3f666227fd1ea2d",
1098 | "sha256:3a4c9886d61d386b2b493377d980f502186cd71d501fffdba52bd2a0880cef4f",
1099 | "sha256:3c1895c95be92600233e476fe283f042e71cf8f0b938aabf21b7aafa62a8dac9",
1100 | "sha256:42abddebe2c6a35180ca549fadc7228d23c1e1f76167c5ebc8a936b5804ea2df",
1101 | "sha256:480b9931bfb08bf8b094edd4836271d4d6b44150da051547d8c7113bf947a8b0",
1102 | "sha256:67db33bea0a29d03e6eeec55a8190e033318cee3cbc732ba8fd939617cbf762d",
1103 | "sha256:6b217b8f9dfb6628f74b94bdaf9f7408708cb02167d644edca33f38746ca12dd",
1104 | "sha256:7661fc1d5cb73481cf710a1418a4e1e301ed7d5d924f91c67ba84b2a1b89defd",
1105 | "sha256:76c532fd68b93998aab92356be280deec5de8f8fe59cd28763d2cc8a58747b7f",
1106 | "sha256:79244b9e97948eaf38695f4b8e6fc63b14b78cc37f403c6642ba555517ac1268",
1107 | "sha256:7c58f598d9fcc52772b89a92d72bf8829c12d09746a6d2c724c5b30076c1f11d",
1108 | "sha256:7dc09198e4073e6015d9a8ea093fc348d4e59de49382476940c3dd9ae156fba8",
1109 | "sha256:80e043a89c6cadefd3a0712f8a1322038e819ebe9dbac7eca3bce1721bcb63bf",
1110 | "sha256:851977788b9caa8ed011f5f643d3ee8653af02c5fc723fa350db5125abf2be7b",
1111 | "sha256:8eddc033e716f8c91c6a2112f0a8ebc5e00532b4a6ae1eb0ccc48e027f9c671c",
1112 | "sha256:954e73c9cd4d6ae319f1c936ad159072b6d356a92dcbbabfd6e6204b9a79d356",
1113 | "sha256:ab888624ed68930442a3f3b0b921ad7439c51ba122dbc8c386e6487a658e4a4e",
1114 | "sha256:acebba1a23fb9d72b42471c3771b6f2f18dcd46df77482612054bd45c07dfa36",
1115 | "sha256:b4ebed0977f92320f6686c96e9e8dd29eed199eb8d066936bac991afc37cbb70",
1116 | "sha256:be4e0f229cf3a71f9ecd633566bd6f80d9fa6afaaff5489492be63fe459ef98c",
1117 | "sha256:c0f84360dcca3481e8674393bdf931f9f10470988f87311b19d23cda869bb6b7",
1118 | "sha256:c1e41b32d6f7f9c26bc731a8b529ff592f31fc8b6ef2be9fa74abd05c8a342d7",
1119 | "sha256:cf98fd7a6c8aaa08dbc699ffae33fd71175696d78028281bc7b832b26f00ca57",
1120 | "sha256:d072f7dfbdb184f0786d63bda26e8a0882041b1e393fbe98940395f7fab4c5e2",
1121 | "sha256:d3dcb5548ead4f1123851a5ced467791f6986d68c656bc63bfff1bf9e36671e2",
1122 | "sha256:d6157793719de168b199194f6b6173f0ccd3bf3499e6870fac17086072e39115",
1123 | "sha256:d728b08448e5ac3e4d886b165385a262883c34b84a7fe1166277fe675e1c197a",
1124 | "sha256:de8df0684398bd74ad160afdc2a118ca28384ac6f5e234eb0508858d8d2d9364",
1125 | "sha256:e6a02cf7271ee94674a44f4e62aa061d2d049001c844657740e156596298b70b",
1126 | "sha256:ea12133df25e3a6918718fbb9a510c6ee5d3fdd5a346320421aac3882f4feeea",
1127 | "sha256:f43b4a2e6218371dd4f41e547bd919ceeb6ebf4abf31a7a0669cd11cd91ea973",
1128 | "sha256:f762442bab706fd874064ca218b33a1d8e40d4938e96c24dafd9b12e28017f45",
1129 | "sha256:f89468059ebc519a7acde1ee50b779019535db8dcf9b8c162ef669257fef7a93"
1130 | ],
1131 | "markers": "python_version >= '3.6'",
1132 | "version": "==22.3.0"
1133 | },
1134 | "qtconsole": {
1135 | "hashes": [
1136 | "sha256:73994105b0369bb99f4164df4a131010f3c7b33a7b5169c37366358d8744675b",
1137 | "sha256:bbc34bca14f65535afcb401bc74b752bac955e5313001ba640383f7e5857dc49"
1138 | ],
1139 | "markers": "python_version >= '3.6'",
1140 | "version": "==5.1.1"
1141 | },
1142 | "qtpy": {
1143 | "hashes": [
1144 | "sha256:83c502973e9fdd7b648d8267a421229ea3d9a0651c22e4c65a4d9228479c39b6",
1145 | "sha256:d6e4ae3a41f1fcb19762b58f35ad6dd443b4bdc867a4cb81ef10ccd85403c92b"
1146 | ],
1147 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
1148 | "version": "==1.11.2"
1149 | },
1150 | "querystring-parser": {
1151 | "hashes": [
1152 | "sha256:644fce1cffe0530453b43a83a38094dbe422ccba8c9b2f2a1c00280e14ca8a62",
1153 | "sha256:d2fa90765eaf0de96c8b087872991a10238e89ba015ae59fedfed6bd61c242a0"
1154 | ],
1155 | "version": "==1.2.4"
1156 | },
1157 | "requests": {
1158 | "hashes": [
1159 | "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24",
1160 | "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"
1161 | ],
1162 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
1163 | "version": "==2.26.0"
1164 | },
1165 | "requests-oauthlib": {
1166 | "hashes": [
1167 | "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d",
1168 | "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a",
1169 | "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc"
1170 | ],
1171 | "version": "==1.3.0"
1172 | },
1173 | "rsa": {
1174 | "hashes": [
1175 | "sha256:78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2",
1176 | "sha256:9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9"
1177 | ],
1178 | "markers": "python_version >= '3.6'",
1179 | "version": "==4.7.2"
1180 | },
1181 | "s3transfer": {
1182 | "hashes": [
1183 | "sha256:50ed823e1dc5868ad40c8dc92072f757aa0e653a192845c94a3b676f4a62da4c",
1184 | "sha256:9c1dc369814391a6bda20ebbf4b70a0f34630592c9aa520856bf384916af2803"
1185 | ],
1186 | "markers": "python_version >= '3.6'",
1187 | "version": "==0.5.0"
1188 | },
1189 | "scikit-learn": {
1190 | "hashes": [
1191 | "sha256:121f78d6564000dc5e968394f45aac87981fcaaf2be40cfcd8f07b2baa1e1829",
1192 | "sha256:14bd46639b2149b3ed613adc095511313a0db62ba9fa31117bdcb5c23722e93b",
1193 | "sha256:190c178028f9073d9f61cd30a19c685993236b9b2df884f16608cbb3ff03800b",
1194 | "sha256:29559c207616604bbaa664bf98eed81b32d9f3d4c975065a206a5e2b268fe784",
1195 | "sha256:4cb5ccb2b63c617ead48c6d92001273ad1b0e8e2bd4a4857edb58749a88b6d82",
1196 | "sha256:555f4b4c10d3bef9e3cda63c3b45670a091fb50328fccd54948cd8a7cf887198",
1197 | "sha256:56ab58978c7aa181856a42f8f491be953b755105040aeb070ebd6b180896f146",
1198 | "sha256:663a6aaad92e5690b03d931f849016c9718beaa654e9a15f08bfcac750241036",
1199 | "sha256:6a056637f7f9876e4c9db9b5434d340e0c97e25f00c4c04458f0ff906e82488e",
1200 | "sha256:6d8bdacde73f5f484325179f466ce2011f79360e9a152100179c3dafb88f2a35",
1201 | "sha256:776800194e757cd212b47cd05907e0eb67a554ad333fe76776060dbb729e3427",
1202 | "sha256:83ab0d0447b8de8450c554952a8399791544605caf274fc3c904e247e1584ced",
1203 | "sha256:9d8caf7fa58791b6b26e912e44d5056818b7bb3142bfa7806f54bde47c189078",
1204 | "sha256:9f103cd6d7e15fa537a844c1a85c9beeeee8ec38357287c9efd3ee4bb8354e1d",
1205 | "sha256:af94b89a8f7759603c696b320e86e57f4b2bb4911e02bf2bae33c714ac498fb8",
1206 | "sha256:b1df4d1151dd6d945324583125e6449bb74ec7cd91ffd7f850015cdb75f151b5",
1207 | "sha256:b9f10b85dcd9ce80f738e33f55a32b3a538b47409dc1a59eec30b46ea96759db",
1208 | "sha256:c1f710bba72925aa96e60828df5d2a4872f5d4a4ad7bb4a4c9a6a41c9ce9a198",
1209 | "sha256:c9c329ec195cdea6a4dee3cebdb1602f4e0f69351c63bc58a4812f3c8a9f4f2d",
1210 | "sha256:e35135657b7103a70298cf557e4fad06af97607cb0780d8f44a2f91ca7769458",
1211 | "sha256:e8a6074f7d505bbfd30bcc1c57dc7cb150cc9c021459c2e2729854be1aefb5f7",
1212 | "sha256:eed33b7ca2bf3fdd585339db42838ab0b641952e064564bff6e9a10573ea665c",
1213 | "sha256:efeac34d0ce6bf9404d268545867cbde9d6ecadd0e9bd7e6b468e5f4e2349875",
1214 | "sha256:f7053801ceb7c51ce674c6a8e37a18fcc221c292f66ef7da84744ecf13b4a0c0",
1215 | "sha256:f8aecb3edc443e5625725ae1ef8f500fa78ce7cb0e864115864bb9f234d18290"
1216 | ],
1217 | "markers": "python_version >= '3.7'",
1218 | "version": "==1.0"
1219 | },
1220 | "scipy": {
1221 | "hashes": [
1222 | "sha256:2a0eeaab01258e0870c4022a6cd329aef3b7c6c2b606bd7cf7bb2ba9820ae561",
1223 | "sha256:3304bd5bc32e00954ac4b3f4cc382ca8824719bf348aacbec6347337d6b125fe",
1224 | "sha256:3f52470e0548cdb74fb8ddf06773ffdcca7c97550f903b1c51312ec19243a7f7",
1225 | "sha256:4729b41a4cdaf4cd011aeac816b532f990bdf97710cef59149d3e293115cf467",
1226 | "sha256:4ee952f39a4a4c7ba775a32b664b1f4b74818548b65f765987adc14bb78f5802",
1227 | "sha256:611f9cb459d0707dd8e4de0c96f86e93f61aac7475fcb225e9ec71fecdc5cebf",
1228 | "sha256:6b47d5fa7ea651054362561a28b1ccc8da9368a39514c1bbf6c0977a1c376764",
1229 | "sha256:71cfc96297617eab911e22216e8a8597703202e95636d9406df9af5c2ac99a2b",
1230 | "sha256:787749110a23502031fb1643c55a2236c99c6b989cca703ea2114d65e21728ef",
1231 | "sha256:90c07ba5f34f33299a428b0d4fa24c30d2ceba44d63f8385b2b05be460819fcb",
1232 | "sha256:a496b42dbcd04ea9924f5e92be63af3d8e0f43a274b769bfaca0a297327d54ee",
1233 | "sha256:bc61e3e5ff92d2f32bb263621d54a9cff5e3f7c420af3d1fa122ce2529de2bd9",
1234 | "sha256:c9951e3746b68974125e5e3445008a4163dd6d20ae0bbdae22b38cb8951dc11b",
1235 | "sha256:d1388fbac9dd591ea630da75c455f4cc637a7ca5ecb31a6b6cef430914749cde",
1236 | "sha256:d13f31457f2216e5705304d9f28e2826edf75487410a57aa99263fa4ffd792c2",
1237 | "sha256:d648aa85dd5074b1ed83008ae987c3fbb53d68af619fce1dee231f4d8bd40e2f",
1238 | "sha256:da9c6b336e540def0b7fd65603da8abeb306c5fc9a5f4238665cbbb5ff95cf58",
1239 | "sha256:e101bceeb9e65a90dadbc5ca31283403a2d4667b9c178db29109750568e8d112",
1240 | "sha256:efdd3825d54c58df2cc394366ca4b9166cf940a0ebddeb87b6c10053deb625ea"
1241 | ],
1242 | "markers": "python_version < '3.10' and python_version >= '3.7'",
1243 | "version": "==1.7.1"
1244 | },
1245 | "send2trash": {
1246 | "hashes": [
1247 | "sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d",
1248 | "sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08"
1249 | ],
1250 | "version": "==1.8.0"
1251 | },
1252 | "six": {
1253 | "hashes": [
1254 | "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
1255 | "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
1256 | ],
1257 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
1258 | "version": "==1.15.0"
1259 | },
1260 | "sklearn": {
1261 | "hashes": [
1262 | "sha256:e23001573aa194b834122d2b9562459bf5ae494a2d59ca6b8aa22c85a44c0e31"
1263 | ],
1264 | "index": "pypi",
1265 | "version": "==0.0"
1266 | },
1267 | "smmap": {
1268 | "hashes": [
1269 | "sha256:7e65386bd122d45405ddf795637b7f7d2b532e7e401d46bbe3fb49b9986d5182",
1270 | "sha256:a9a7479e4c572e2e775c404dcd3080c8dc49f39918c2cf74913d30c4c478e3c2"
1271 | ],
1272 | "markers": "python_version >= '3.5'",
1273 | "version": "==4.0.0"
1274 | },
1275 | "sqlalchemy": {
1276 | "hashes": [
1277 | "sha256:0566a6e90951590c0307c75f9176597c88ef4be2724958ca1d28e8ae05ec8822",
1278 | "sha256:08d9396a2a38e672133266b31ed39b2b1f2b5ec712b5bff5e08033970563316a",
1279 | "sha256:0b08a53e40b34205acfeb5328b832f44437956d673a6c09fce55c66ab0e54916",
1280 | "sha256:16ef07e102d2d4f974ba9b0d4ac46345a411ad20ad988b3654d59ff08e553b1c",
1281 | "sha256:1adf3d25e2e33afbcd48cfad8076f9378793be43e7fec3e4334306cac6bec138",
1282 | "sha256:1b38db2417b9f7005d6ceba7ce2a526bf10e3f6f635c0f163e6ed6a42b5b62b2",
1283 | "sha256:1ebd69365717becaa1b618220a3df97f7c08aa68e759491de516d1c3667bba54",
1284 | "sha256:26b0cd2d5c7ea96d3230cb20acac3d89de3b593339c1447b4d64bfcf4eac1110",
1285 | "sha256:2ed67aae8cde4d32aacbdba4f7f38183d14443b714498eada5e5a7a37769c0b7",
1286 | "sha256:33a1e86abad782e90976de36150d910748b58e02cd7d35680d441f9a76806c18",
1287 | "sha256:41a916d815a3a23cb7fff8d11ad0c9b93369ac074e91e428075e088fe57d5358",
1288 | "sha256:6003771ea597346ab1e97f2f58405c6cacbf6a308af3d28a9201a643c0ac7bb3",
1289 | "sha256:6400b22e4e41cc27623a9a75630b7719579cd9a3a2027bcf16ad5aaa9a7806c0",
1290 | "sha256:6b602e3351f59f3999e9fb8b87e5b95cb2faab6a6ecdb482382ac6fdfbee5266",
1291 | "sha256:75cd5d48389a7635393ff5a9214b90695c06b3d74912109c3b00ce7392b69c6c",
1292 | "sha256:7ad59e2e16578b6c1a2873e4888134112365605b08a6067dd91e899e026efa1c",
1293 | "sha256:7b7778a205f956755e05721eebf9f11a6ac18b2409bff5db53ce5fe7ede79831",
1294 | "sha256:842c49dd584aedd75c2ee05f6c950730c3ffcddd21c5824ed0f820808387e1e3",
1295 | "sha256:90fe429285b171bcc252e21515703bdc2a4721008d1f13aa5b7150336f8a8493",
1296 | "sha256:91cd87d1de0111eaca11ccc3d31af441c753fa2bc22df72e5009cfb0a1af5b03",
1297 | "sha256:9a1df8c93a0dd9cef0839917f0c6c49f46c75810cf8852be49884da4a7de3c59",
1298 | "sha256:9ebe49c3960aa2219292ea2e5df6acdc425fc828f2f3d50b4cfae1692bcb5f02",
1299 | "sha256:a28fe28c359835f3be20c89efd517b35e8f97dbb2ca09c6cf0d9ac07f62d7ef6",
1300 | "sha256:a36ea43919e51b0de0c0bc52bcfdad7683f6ea9fb81b340cdabb9df0e045e0f7",
1301 | "sha256:a505ecc0642f52e7c65afb02cc6181377d833b7df0994ecde15943b18d0fa89c",
1302 | "sha256:a79abdb404d9256afb8aeaa0d3a4bc7d3b6d8b66103d8b0f2f91febd3909976e",
1303 | "sha256:c211e8ec81522ce87b0b39f0cf0712c998d4305a030459a0e115a2b3dc71598f",
1304 | "sha256:dd4ed12a775f2cde4519f4267d3601990a97d8ecde5c944ab06bfd6e8e8ea177",
1305 | "sha256:e37621b37c73b034997b5116678862f38ee70e5a054821c7b19d0e55df270dec",
1306 | "sha256:e93978993a2ad0af43f132be3ea8805f56b2f2cd223403ec28d3e7d5c6d39ed1"
1307 | ],
1308 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
1309 | "version": "==1.4.25"
1310 | },
1311 | "sqlparse": {
1312 | "hashes": [
1313 | "sha256:0c00730c74263a94e5a9919ade150dfc3b19c574389985446148402998287dae",
1314 | "sha256:48719e356bb8b42991bdbb1e8b83223757b93789c00910a616a071910ca4a64d"
1315 | ],
1316 | "markers": "python_version >= '3.5'",
1317 | "version": "==0.4.2"
1318 | },
1319 | "tabulate": {
1320 | "hashes": [
1321 | "sha256:d7c013fe7abbc5e491394e10fa845f8f32fe54f8dc60c6622c6cf482d25d47e4",
1322 | "sha256:eb1d13f25760052e8931f2ef80aaf6045a6cceb47514db8beab24cded16f13a7"
1323 | ],
1324 | "version": "==0.8.9"
1325 | },
1326 | "tensorboard": {
1327 | "hashes": [
1328 | "sha256:f7dac4cdfb52d14c9e3f74585ce2aaf8e6203620a864e51faf84988b09f7bbdb"
1329 | ],
1330 | "markers": "python_version >= '3.6'",
1331 | "version": "==2.6.0"
1332 | },
1333 | "tensorboard-data-server": {
1334 | "hashes": [
1335 | "sha256:809fe9887682d35c1f7d1f54f0f40f98bb1f771b14265b453ca051e2ce58fca7",
1336 | "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a",
1337 | "sha256:fa8cef9be4fcae2f2363c88176638baf2da19c5ec90addb49b1cde05c95c88ee"
1338 | ],
1339 | "markers": "python_version >= '3.6'",
1340 | "version": "==0.6.1"
1341 | },
1342 | "tensorboard-plugin-wit": {
1343 | "hashes": [
1344 | "sha256:2a80d1c551d741e99b2f197bb915d8a133e24adb8da1732b840041860f91183a"
1345 | ],
1346 | "version": "==1.8.0"
1347 | },
1348 | "tensorflow": {
1349 | "hashes": [
1350 | "sha256:00b1af0a0c5c102db19caceffac4bd4e6c536e6d7512144c241a4ace4428e7c6",
1351 | "sha256:2a067d22a356c2cd4753bdd16ee492c55a610f5ebc52713e2954c642f070321c",
1352 | "sha256:2c9b8c6adc060acfcf805a2ea501db0124b679d95b522fd5983a4c110e8e0264",
1353 | "sha256:4716c9b25a61a2c79b1f253d1e114f1f8679241559c13ad18c657c626a7d5924",
1354 | "sha256:6e38b6969414d16afc560c58ca34e1328cc0a5dbd644b64e060f5be8a6653274",
1355 | "sha256:8b5ce09ede0fe45ef100f4dc65cf3f46722194e75139f85d524058315e2ce9fa",
1356 | "sha256:bc73ebdd30c48cfc27ba307271117e6dbb795b37396ed817b2fec9393380b115",
1357 | "sha256:bfb255c2b0400bc5b4060dda098d46cd7ddeb53b7cbac1dfa29435612cba828c",
1358 | "sha256:c67fad296a3a2133b7a14da5f06c9937e7911b02c5d7a3ff6ba52a1d79b6bc9e",
1359 | "sha256:d6468e05552720100e8f94097feb770de320e4c8c244323a8746bd84e5ba4052",
1360 | "sha256:dea97f664246e185d79cbe40a86309527affd4232f06afa8a6500c4fc4b64a03",
1361 | "sha256:e45e026a9d08c89cecc1160d8248135e2fb79bdc3267328399e1fb25ce583bd6"
1362 | ],
1363 | "index": "pypi",
1364 | "version": "==2.6.0"
1365 | },
1366 | "tensorflow-estimator": {
1367 | "hashes": [
1368 | "sha256:cf78528998efdb637ac0abaf525c929bf192767544eb24ae20d9266effcf5afd"
1369 | ],
1370 | "version": "==2.6.0"
1371 | },
1372 | "termcolor": {
1373 | "hashes": [
1374 | "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"
1375 | ],
1376 | "version": "==1.1.0"
1377 | },
1378 | "terminado": {
1379 | "hashes": [
1380 | "sha256:09fdde344324a1c9c6e610ee4ca165c4bb7f5bbf982fceeeb38998a988ef8452",
1381 | "sha256:b20fd93cc57c1678c799799d117874367cc07a3d2d55be95205b1a88fa08393f"
1382 | ],
1383 | "markers": "python_version >= '3.6'",
1384 | "version": "==0.12.1"
1385 | },
1386 | "testpath": {
1387 | "hashes": [
1388 | "sha256:1acf7a0bcd3004ae8357409fc33751e16d37ccc650921da1094a86581ad1e417",
1389 | "sha256:8044f9a0bab6567fc644a3593164e872543bb44225b0e24846e2c89237937589"
1390 | ],
1391 | "markers": "python_version >= '3.5'",
1392 | "version": "==0.5.0"
1393 | },
1394 | "threadpoolctl": {
1395 | "hashes": [
1396 | "sha256:86d4b6801456d780e94681d155779058759eaef3c3564758b17b6c99db5f81cb",
1397 | "sha256:e5a995e3ffae202758fa8a90082e35783b9370699627ae2733cd1c3a73553616"
1398 | ],
1399 | "markers": "python_version >= '3.6'",
1400 | "version": "==2.2.0"
1401 | },
1402 | "tornado": {
1403 | "hashes": [
1404 | "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb",
1405 | "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c",
1406 | "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288",
1407 | "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95",
1408 | "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558",
1409 | "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe",
1410 | "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791",
1411 | "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d",
1412 | "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326",
1413 | "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b",
1414 | "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4",
1415 | "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c",
1416 | "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910",
1417 | "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5",
1418 | "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c",
1419 | "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0",
1420 | "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675",
1421 | "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd",
1422 | "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f",
1423 | "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c",
1424 | "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea",
1425 | "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6",
1426 | "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05",
1427 | "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd",
1428 | "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575",
1429 | "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a",
1430 | "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37",
1431 | "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795",
1432 | "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f",
1433 | "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32",
1434 | "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c",
1435 | "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01",
1436 | "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4",
1437 | "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2",
1438 | "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921",
1439 | "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085",
1440 | "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df",
1441 | "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102",
1442 | "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5",
1443 | "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68",
1444 | "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"
1445 | ],
1446 | "markers": "python_version >= '3.5'",
1447 | "version": "==6.1"
1448 | },
1449 | "traitlets": {
1450 | "hashes": [
1451 | "sha256:03f172516916220b58c9f19d7f854734136dd9528103d04e9bf139a92c9f54c4",
1452 | "sha256:bd382d7ea181fbbcce157c133db9a829ce06edffe097bcf3ab945b435452b46d"
1453 | ],
1454 | "markers": "python_version >= '3.7'",
1455 | "version": "==5.1.0"
1456 | },
1457 | "typing-extensions": {
1458 | "hashes": [
1459 | "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918",
1460 | "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c",
1461 | "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"
1462 | ],
1463 | "markers": "python_version < '3.10'",
1464 | "version": "==3.7.4.3"
1465 | },
1466 | "urllib3": {
1467 | "hashes": [
1468 | "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece",
1469 | "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"
1470 | ],
1471 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'",
1472 | "version": "==1.26.7"
1473 | },
1474 | "wcwidth": {
1475 | "hashes": [
1476 | "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784",
1477 | "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"
1478 | ],
1479 | "version": "==0.2.5"
1480 | },
1481 | "webencodings": {
1482 | "hashes": [
1483 | "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78",
1484 | "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"
1485 | ],
1486 | "version": "==0.5.1"
1487 | },
1488 | "websocket-client": {
1489 | "hashes": [
1490 | "sha256:0133d2f784858e59959ce82ddac316634229da55b498aac311f1620567a710ec",
1491 | "sha256:8dfb715d8a992f5712fff8c843adae94e22b22a99b2c5e6b0ec4a1a981cc4e0d"
1492 | ],
1493 | "markers": "python_version >= '3.6'",
1494 | "version": "==1.2.1"
1495 | },
1496 | "werkzeug": {
1497 | "hashes": [
1498 | "sha256:1de1db30d010ff1af14a009224ec49ab2329ad2cde454c8a708130642d579c42",
1499 | "sha256:6c1ec500dcdba0baa27600f6a22f6333d8b662d22027ff9f6202e3367413caa8"
1500 | ],
1501 | "markers": "python_version >= '3.6'",
1502 | "version": "==2.0.1"
1503 | },
1504 | "wheel": {
1505 | "hashes": [
1506 | "sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd",
1507 | "sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad"
1508 | ],
1509 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
1510 | "version": "==0.37.0"
1511 | },
1512 | "widgetsnbextension": {
1513 | "hashes": [
1514 | "sha256:079f87d87270bce047512400efd70238820751a11d2d8cb137a5a5bdbaf255c7",
1515 | "sha256:bd314f8ceb488571a5ffea6cc5b9fc6cba0adaf88a9d2386b93a489751938bcd"
1516 | ],
1517 | "version": "==3.5.1"
1518 | },
1519 | "wrapt": {
1520 | "hashes": [
1521 | "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"
1522 | ],
1523 | "version": "==1.12.1"
1524 | },
1525 | "zipp": {
1526 | "hashes": [
1527 | "sha256:1fc9641b26f3bd81069b7738b039f2819cab6e3fc3399a953e19d92cc81eff4d",
1528 | "sha256:8dc6c4d5a809d659067cc713f76bcf42fae8ae641db12fddfa93694a15abc96b"
1529 | ],
1530 | "markers": "python_version >= '3.6'",
1531 | "version": "==3.5.1"
1532 | }
1533 | },
1534 | "develop": {}
1535 | }
1536 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MLflow Tutorial
2 |
3 | This repository contains the materials used during the AI Marketplace 2021 MLflow tutorial.
4 |
5 | Agenda
6 | ========
7 |
8 | - ML Lifecycle
9 | - Experiment Tracking
10 | - MLflow Models & Model Registry
11 | - MLflow Projects
12 | - Advanced usage & tips
13 |
14 |
15 | Running instructions
16 | ====================
17 |
18 | In order to run the setup you will need to [install docker-compose](https://docs.docker.com/compose/install/) and then can simply do:
19 | ```shell
20 | docker-compose build
21 | docker-compose up -d
22 | ```
23 |
24 | You can then access:
25 | * Jupyter: `http://localhost:8888`
26 | * MLfLow: `http://localhost:5000`
27 | * minio (S3): `http://localhost:9000` (user: `minioadmin`, pass: `minioadmin`)
28 |
29 |
30 | At the end of the session, you can simply run the following command to stop all
31 | the processes:
32 |
33 | ```shell
34 | docker-compose down
35 | ```
36 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 | services:
3 | s3:
4 | image: minio/minio:RELEASE.2021-09-18T18-09-59Z
5 | container_name: aws-s3
6 | ports:
7 | - "9000:9000"
8 | - "9001:9001"
9 | environment:
10 | - MINIO_ROOT_USER=minioadmin
11 | - MINIO_ROOT_PASSWORD=minioadmin
12 | command:
13 | server /data --console-address ":9001"
14 | networks:
15 | - backend
16 | create_bucket:
17 | image: minio/mc:RELEASE.2021-09-23T05-44-03Z
18 | depends_on:
19 | - s3
20 | container_name: mc
21 | networks:
22 | - backend
23 | env_file:
24 | - .env
25 | entrypoint: >
26 | /bin/sh -c "
27 | echo 'Wait 20s for minio to startup...' && sleep 20;
28 | echo 'Setting up connection'; /usr/bin/mc config host add minio http://s3:9000 ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY};
29 | echo 'Cleaning bucket'; /usr/bin/mc rm -r --force minio/mlflow;
30 | echo 'Creating bucket'; /usr/bin/mc mb minio/mlflow;
31 | echo 'Setting bucket policy'; /usr/bin/mc policy set download minio/mlflow;
32 | exit 0;
33 | "
34 | notebook:
35 | image: mlflow-workshop:latest
36 | build:
37 | context: .
38 | dockerfile: Dockerfile
39 | ports:
40 | - "8888:8888"
41 | networks:
42 | - backend
43 | db:
44 | restart: always
45 | image: mysql/mysql-server:5.7.28
46 | container_name: mlflow_db
47 | expose:
48 | - "3306"
49 | networks:
50 | - backend
51 | environment:
52 | - MYSQL_DATABASE=mlflow
53 | - MYSQL_USER=mlflow
54 | - MYSQL_PASSWORD=mlflow
55 | - MYSQL_ROOT_PASSWORD=mlflow
56 | mlflow:
57 | restart: always
58 | image: larribas/mlflow
59 | container_name: mlflow_server
60 | ports:
61 | - "5000:5000"
62 | environment:
63 | - AWS_ACCESS_KEY_ID=minioadmin
64 | - AWS_SECRET_ACCESS_KEY=minioadmin
65 | - AWS_DEFAULT_REGION=eu-west-1
66 | - MLFLOW_S3_ENDPOINT_URL=http://s3:9000
67 | networks:
68 | - backend
69 | command: --backend-store-uri mysql+pymysql://mlflow:mlflow@db:3306/mlflow --default-artifact-root s3://mlflow/ --host 0.0.0.0
70 |
71 |
72 | networks:
73 | backend:
74 | driver: bridge
75 |
--------------------------------------------------------------------------------
/mlflow-project/MLproject:
--------------------------------------------------------------------------------
1 | name: multistep_example
2 |
3 | conda_env: conda.yaml
4 |
5 | entry_points:
6 | load_raw_data:
7 | command: "python load_raw_data.py"
8 |
9 | etl_data:
10 | parameters:
11 | ratings_csv: path
12 | max_row_limit: {type: int, default: 100000}
13 | command: "python etl_data.py --ratings-csv {ratings_csv} --max-row-limit {max_row_limit}"
14 |
15 | als:
16 | parameters:
17 | ratings_data: path
18 | max_iter: {type: int, default: 10}
19 | reg_param: {type: float, default: 0.1}
20 | rank: {type: int, default: 12}
21 | command: "python als.py --ratings-data {ratings_data} --max-iter {max_iter} --reg-param {reg_param} --rank {rank}"
22 |
23 | train_keras:
24 | parameters:
25 | ratings_data: path
26 | als_model_uri: string
27 | hidden_units: {type: int, default: 20}
28 | command: "python train_keras.py --ratings-data {ratings_data} --als-model-uri {als_model_uri} --hidden-units {hidden_units}"
29 |
30 | main:
31 | parameters:
32 | als_max_iter: {type: int, default: 10}
33 | keras_hidden_units: {type: int, default: 20}
34 | max_row_limit: {type: int, default: 100000}
35 | command: "python main.py --als-max-iter {als_max_iter} --keras-hidden-units {keras_hidden_units}
36 | --max-row-limit {max_row_limit}"
37 |
38 |
--------------------------------------------------------------------------------
/mlflow-project/README.rst:
--------------------------------------------------------------------------------
1 | ML Pipelines with MLflow
2 | ------------------------
3 |
4 | The original MLproject can be found [here](https://github.com/mlflow/mlflow/tree/master/examples/multistep_workflow).
--------------------------------------------------------------------------------
/mlflow-project/als.py:
--------------------------------------------------------------------------------
1 | """
2 | Trains an Alternating Least Squares (ALS) model for user/movie ratings.
3 | The input is a Parquet ratings dataset (see etl_data.py), and we output
4 | an mlflow artifact called 'als-model'.
5 | """
6 | import click
7 |
8 | import mlflow
9 | import mlflow.spark
10 |
11 | import pyspark
12 | from pyspark.ml import Pipeline
13 | from pyspark.ml.recommendation import ALS
14 | from pyspark.ml.evaluation import RegressionEvaluator
15 |
16 | from dotenv import load_dotenv
17 |
18 |
19 | load_dotenv()
20 |
21 | @click.command()
22 | @click.option("--ratings-data")
23 | @click.option("--split-prop", default=0.8, type=float)
24 | @click.option("--max-iter", default=10, type=int)
25 | @click.option("--reg-param", default=0.1, type=float)
26 | @click.option("--rank", default=12, type=int)
27 | @click.option("--cold-start-strategy", default="drop")
28 | def train_als(ratings_data, split_prop, max_iter, reg_param, rank, cold_start_strategy):
29 | seed = 42
30 |
31 | spark = pyspark.sql.SparkSession.builder.getOrCreate()
32 |
33 | ratings_df = spark.read.parquet(ratings_data)
34 | (training_df, test_df) = ratings_df.randomSplit([split_prop, 1 - split_prop], seed=seed)
35 | training_df.cache()
36 | test_df.cache()
37 |
38 | mlflow.log_metric("training_nrows", training_df.count())
39 | mlflow.log_metric("test_nrows", test_df.count())
40 |
41 | print("Training: {0}, test: {1}".format(training_df.count(), test_df.count()))
42 |
43 | als = (
44 | ALS()
45 | .setUserCol("userId")
46 | .setItemCol("movieId")
47 | .setRatingCol("rating")
48 | .setPredictionCol("predictions")
49 | .setMaxIter(max_iter)
50 | .setSeed(seed)
51 | .setRegParam(reg_param)
52 | .setColdStartStrategy(cold_start_strategy)
53 | .setRank(rank)
54 | )
55 |
56 | als_model = Pipeline(stages=[als]).fit(training_df)
57 |
58 | reg_eval = RegressionEvaluator(predictionCol="predictions", labelCol="rating", metricName="mse")
59 |
60 | predicted_test_dF = als_model.transform(test_df)
61 |
62 | test_mse = reg_eval.evaluate(predicted_test_dF)
63 | train_mse = reg_eval.evaluate(als_model.transform(training_df))
64 |
65 | print("The model had a MSE on the test set of {0}".format(test_mse))
66 | print("The model had a MSE on the (train) set of {0}".format(train_mse))
67 | mlflow.log_metric("test_mse", test_mse)
68 | mlflow.log_metric("train_mse", train_mse)
69 | mlflow.spark.log_model(als_model, "als-model")
70 |
71 |
72 | if __name__ == "__main__":
73 | train_als()
74 |
--------------------------------------------------------------------------------
/mlflow-project/conda.yaml:
--------------------------------------------------------------------------------
1 | name: multistep
2 | channels:
3 | - defaults
4 | - anaconda
5 | - conda-forge
6 | dependencies:
7 | - python=3.6
8 | - pyspark
9 | - requests
10 | - click
11 | - pip
12 | - pip:
13 | - tensorflow==1.15.2
14 | - keras==2.2.4
15 | - mlflow>=1.0
16 | - python-dotenv==0.15.0
17 | - boto3==1.17.29
18 |
--------------------------------------------------------------------------------
/mlflow-project/etl_data.py:
--------------------------------------------------------------------------------
1 | """
2 | Converts the raw CSV form to a Parquet form with just the columns we want
3 | """
4 | import tempfile
5 | import os
6 | import pyspark
7 | import mlflow
8 | import click
9 | from dotenv import load_dotenv
10 |
11 |
12 | load_dotenv()
13 |
14 | @click.command(
15 | help="Given a CSV file (see load_raw_data), transforms it into Parquet "
16 | "in an mlflow artifact called 'ratings-parquet-dir'"
17 | )
18 | @click.option("--ratings-csv")
19 | @click.option(
20 | "--max-row-limit", default=10000, help="Limit the data size to run comfortably on a laptop."
21 | )
22 | def etl_data(ratings_csv, max_row_limit):
23 | with mlflow.start_run() as mlrun:
24 | tmpdir = tempfile.mkdtemp()
25 | ratings_parquet_dir = os.path.join(tmpdir, "ratings-parquet")
26 | spark = pyspark.sql.SparkSession.builder.getOrCreate()
27 | print("Converting ratings CSV %s to Parquet %s" % (ratings_csv, ratings_parquet_dir))
28 | ratings_df = (
29 | spark.read.option("header", "true")
30 | .option("inferSchema", "true")
31 | .csv(ratings_csv)
32 | .drop("timestamp")
33 | ) # Drop unused column
34 | ratings_df.show()
35 | if max_row_limit != -1:
36 | ratings_df = ratings_df.limit(max_row_limit)
37 | ratings_df.write.parquet(ratings_parquet_dir)
38 | print("Uploading Parquet ratings: %s" % ratings_parquet_dir)
39 | mlflow.log_artifacts(ratings_parquet_dir, "ratings-parquet-dir")
40 |
41 |
42 | if __name__ == "__main__":
43 | etl_data()
44 |
--------------------------------------------------------------------------------
/mlflow-project/load_raw_data.py:
--------------------------------------------------------------------------------
1 | """
2 | Downloads the MovieLens dataset and saves it as an artifact
3 | """
4 | import requests
5 | import tempfile
6 | import os
7 | import zipfile
8 | import pyspark
9 | import mlflow
10 | import click
11 | from dotenv import load_dotenv
12 |
13 |
14 | load_dotenv()
15 |
16 | @click.command(
17 | help="Downloads the MovieLens dataset and saves it as an mlflow artifact "
18 | " called 'ratings-csv-dir'."
19 | )
20 | @click.option("--url", default="http://files.grouplens.org/datasets/movielens/ml-20m.zip")
21 | def load_raw_data(url):
22 |
23 | with mlflow.start_run() as mlrun:
24 | local_dir = tempfile.mkdtemp()
25 | local_filename = os.path.join(local_dir, "ml-20m.zip")
26 | print("Downloading %s to %s" % (url, local_filename))
27 | r = requests.get(url, stream=True)
28 | with open(local_filename, "wb") as f:
29 | for chunk in r.iter_content(chunk_size=1024):
30 | if chunk: # filter out keep-alive new chunks
31 | f.write(chunk)
32 |
33 | extracted_dir = os.path.join(local_dir, "ml-20m")
34 | print("Extracting %s into %s" % (local_filename, extracted_dir))
35 | with zipfile.ZipFile(local_filename, "r") as zip_ref:
36 | zip_ref.extractall(local_dir)
37 |
38 | ratings_file = os.path.join(extracted_dir, "ratings.csv")
39 |
40 | print("Uploading ratings: %s" % ratings_file)
41 | mlflow.log_artifact(ratings_file, "ratings-csv-dir")
42 |
43 |
44 | if __name__ == "__main__":
45 | load_raw_data()
46 |
--------------------------------------------------------------------------------
/mlflow-project/main.py:
--------------------------------------------------------------------------------
1 | """
2 | Downloads the MovieLens dataset, ETLs it into Parquet, trains an
3 | ALS model, and uses the ALS model to train a Keras neural network.
4 | See README.rst for more details.
5 | """
6 |
7 | import click
8 | import os
9 |
10 |
11 | import mlflow
12 | from mlflow.utils import mlflow_tags
13 | from mlflow.entities import RunStatus
14 | from mlflow.utils.logging_utils import eprint
15 |
16 | from mlflow.tracking.fluent import _get_experiment_id
17 |
18 |
19 | def _already_ran(entry_point_name, parameters, git_commit, experiment_id=None):
20 | """Best-effort detection of if a run with the given entrypoint name,
21 | parameters, and experiment id already ran. The run must have completed
22 | successfully and have at least the parameters provided.
23 | """
24 | experiment_id = experiment_id if experiment_id is not None else _get_experiment_id()
25 | client = mlflow.tracking.MlflowClient()
26 | all_run_infos = reversed(client.list_run_infos(experiment_id))
27 | for run_info in all_run_infos:
28 | full_run = client.get_run(run_info.run_id)
29 | tags = full_run.data.tags
30 | if tags.get(mlflow_tags.MLFLOW_PROJECT_ENTRY_POINT, None) != entry_point_name:
31 | continue
32 | match_failed = False
33 | for param_key, param_value in parameters.items():
34 | run_value = full_run.data.params.get(param_key)
35 | if run_value != param_value:
36 | match_failed = True
37 | break
38 | if match_failed:
39 | continue
40 |
41 | if run_info.to_proto().status != RunStatus.FINISHED:
42 | eprint(
43 | ("Run matched, but is not FINISHED, so skipping " "(run_id=%s, status=%s)")
44 | % (run_info.run_id, run_info.status)
45 | )
46 | continue
47 |
48 | previous_version = tags.get(mlflow_tags.MLFLOW_GIT_COMMIT, None)
49 | if git_commit != previous_version:
50 | eprint(
51 | (
52 | "Run matched, but has a different source version, so skipping "
53 | "(found=%s, expected=%s)"
54 | )
55 | % (previous_version, git_commit)
56 | )
57 | continue
58 | return client.get_run(run_info.run_id)
59 | eprint("No matching run has been found.")
60 | return None
61 |
62 |
63 | # TODO(aaron): This is not great because it doesn't account for:
64 | # - changes in code
65 | # - changes in dependant steps
66 | def _get_or_run(entrypoint, parameters, git_commit, use_cache=True):
67 | existing_run = _already_ran(entrypoint, parameters, git_commit)
68 | if use_cache and existing_run:
69 | print("Found existing run for entrypoint=%s and parameters=%s" % (entrypoint, parameters))
70 | return existing_run
71 | print("Launching new run for entrypoint=%s and parameters=%s" % (entrypoint, parameters))
72 | submitted_run = mlflow.run(".", entrypoint, parameters=parameters)
73 | return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id)
74 |
75 |
76 | @click.command()
77 | @click.option("--als-max-iter", default=10, type=int)
78 | @click.option("--keras-hidden-units", default=20, type=int)
79 | @click.option("--max-row-limit", default=100000, type=int)
80 | def workflow(als_max_iter, keras_hidden_units, max_row_limit):
81 | # Note: The entrypoint names are defined in MLproject. The artifact directories
82 | # are documented by each step's .py file.
83 | with mlflow.start_run() as active_run:
84 | os.environ["SPARK_CONF_DIR"] = os.path.abspath(".")
85 | git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT)
86 | load_raw_data_run = _get_or_run("load_raw_data", {}, git_commit)
87 | ratings_csv_uri = os.path.join(load_raw_data_run.info.artifact_uri, "ratings-csv-dir")
88 | etl_data_run = _get_or_run(
89 | "etl_data", {"ratings_csv": ratings_csv_uri, "max_row_limit": max_row_limit}, git_commit
90 | )
91 | ratings_parquet_uri = os.path.join(etl_data_run.info.artifact_uri, "ratings-parquet-dir")
92 |
93 | # We specify a spark-defaults.conf to override the default driver memory. ALS requires
94 | # significant memory. The driver memory property cannot be set by the application itself.
95 | als_run = _get_or_run(
96 | "als", {"ratings_data": ratings_parquet_uri, "max_iter": str(als_max_iter)}, git_commit
97 | )
98 | als_model_uri = os.path.join(als_run.info.artifact_uri, "als-model")
99 |
100 | keras_params = {
101 | "ratings_data": ratings_parquet_uri,
102 | "als_model_uri": als_model_uri,
103 | "hidden_units": keras_hidden_units,
104 | }
105 | _get_or_run("train_keras", keras_params, git_commit, use_cache=False)
106 |
107 |
108 | if __name__ == "__main__":
109 | workflow()
110 |
111 |
--------------------------------------------------------------------------------
/mlflow-project/spark-defaults.conf:
--------------------------------------------------------------------------------
1 | spark.driver.memory 8g
2 |
--------------------------------------------------------------------------------
/mlflow-project/train_keras.py:
--------------------------------------------------------------------------------
1 | """
2 | Trains a Keras model for user/movie ratings. The input is a Parquet
3 | ratings dataset (see etl_data.py) and an ALS model (see als.py), which we
4 | will use to supplement our input and train using.
5 | """
6 | import click
7 |
8 | import mlflow
9 | import mlflow.keras
10 | import mlflow.spark
11 |
12 | from itertools import chain
13 | import pyspark
14 | from pyspark.sql.functions import *
15 | from pyspark.sql.types import *
16 |
17 | import tensorflow as tf
18 | import tensorflow.keras as keras
19 | from tensorflow.keras.models import Sequential
20 | from tensorflow.keras.layers import Dense
21 | from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
22 | import numpy as np
23 | import pandas as pd
24 |
25 | from dotenv import load_dotenv
26 |
27 |
28 | load_dotenv()
29 |
30 | @click.command()
31 | @click.option("--ratings-data", help="Path readable by Spark to the ratings Parquet file")
32 | @click.option("--als-model-uri", help="Path readable by load_model to ALS MLmodel")
33 | @click.option("--hidden-units", default=20, type=int)
34 | def train_keras(ratings_data, als_model_uri, hidden_units):
35 | np.random.seed(0)
36 | tf.random.set_seed(42) # For reproducibility
37 |
38 | spark = pyspark.sql.SparkSession.builder.getOrCreate()
39 | als_model = mlflow.spark.load_model(als_model_uri).stages[0]
40 |
41 | ratings_df = spark.read.parquet(ratings_data)
42 |
43 | (training_df, test_df) = ratings_df.randomSplit([0.8, 0.2], seed=42)
44 | training_df.cache()
45 | test_df.cache()
46 |
47 | mlflow.log_metric("training_nrows", training_df.count())
48 | mlflow.log_metric("test_nrows", test_df.count())
49 |
50 | print("Training: {0}, test: {1}".format(training_df.count(), test_df.count()))
51 |
52 | user_factors = als_model.userFactors.selectExpr("id as userId", "features as uFeatures")
53 | item_factors = als_model.itemFactors.selectExpr("id as movieId", "features as iFeatures")
54 | joined_train_df = training_df.join(item_factors, on="movieId").join(user_factors, on="userId")
55 | joined_test_df = test_df.join(item_factors, on="movieId").join(user_factors, on="userId")
56 |
57 | # We'll combine the movies and ratings vectors into a single vector of length 24.
58 | # We will then explode this features vector into a set of columns.
59 | def concat_arrays(*args):
60 | return list(chain(*args))
61 |
62 | concat_arrays_udf = udf(concat_arrays, ArrayType(FloatType()))
63 |
64 | concat_train_df = joined_train_df.select(
65 | "userId",
66 | "movieId",
67 | concat_arrays_udf(col("iFeatures"), col("uFeatures")).alias("features"),
68 | col("rating").cast("float"),
69 | )
70 | concat_test_df = joined_test_df.select(
71 | "userId",
72 | "movieId",
73 | concat_arrays_udf(col("iFeatures"), col("uFeatures")).alias("features"),
74 | col("rating").cast("float"),
75 | )
76 |
77 | pandas_df = concat_train_df.toPandas()
78 | pandas_test_df = concat_test_df.toPandas()
79 |
80 | # This syntax will create a new DataFrame where elements of the 'features' vector
81 | # are each in their own column. This is what we'll train our neural network on.
82 | x_test = pd.DataFrame(pandas_test_df.features.values.tolist(), index=pandas_test_df.index)
83 | x_train = pd.DataFrame(pandas_df.features.values.tolist(), index=pandas_df.index)
84 |
85 | # Show matrix for example.
86 | print("Training matrix:")
87 | print(x_train)
88 |
89 | # Create our Keras model with two fully connected hidden layers.
90 | model = Sequential()
91 | model.add(Dense(30, input_dim=24, activation="relu"))
92 | model.add(Dense(hidden_units, activation="relu"))
93 | model.add(Dense(1, activation="linear"))
94 |
95 | model.compile(loss="mse", optimizer=keras.optimizers.Adam(lr=0.0001))
96 |
97 | filepath = "/tmp/ALS_checkpoint_weights.hdf5"
98 | early_stopping = EarlyStopping(monitor="val_loss", min_delta=0.0001, patience=2, mode="auto")
99 |
100 | model.fit(
101 | x_train,
102 | pandas_df["rating"],
103 | validation_split=0.2,
104 | verbose=2,
105 | epochs=3,
106 | batch_size=128,
107 | shuffle=False,
108 | callbacks=[early_stopping],
109 | )
110 |
111 | train_mse = model.evaluate(x_train, pandas_df["rating"], verbose=2)
112 | test_mse = model.evaluate(x_test, pandas_test_df["rating"], verbose=2)
113 | mlflow.log_metric("test_mse", test_mse)
114 | mlflow.log_metric("train_mse", train_mse)
115 |
116 | print("The model had a MSE on the test set of {0}".format(test_mse))
117 | mlflow.keras.log_model(model, "keras-model")
118 |
119 |
120 | if __name__ == "__main__":
121 | train_keras()
122 |
--------------------------------------------------------------------------------
/notebooks/1-sklearn-example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "## FILL IN YOUR NAME\n",
10 | "NAME = \"example\""
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "import os\n",
20 | "import warnings\n",
21 | "import sys\n",
22 | "\n",
23 | "import pandas as pd\n",
24 | "import numpy as np\n",
25 | "import pickle\n",
26 | "from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n",
27 | "from sklearn.model_selection import train_test_split\n",
28 | "from sklearn.linear_model import ElasticNet\n",
29 | "from urllib.parse import urlparse\n",
30 | "import mlflow\n",
31 | "import mlflow.sklearn\n",
32 | "\n",
33 | "import logging\n",
34 | "\n",
35 | "%load_ext dotenv\n",
36 | "%dotenv\n",
37 | "\n",
38 | "logging.basicConfig(level=logging.WARN)\n",
39 | "logger = logging.getLogger(__name__)"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "## Load Dataset\n",
47 | "\n",
48 | "We will use an example from the official documentation of MLflow. The goal is to model wine quality based on physicochemical tests (see more [here](http://www3.dsi.uminho.pt/pcortez/wine/)). "
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "metadata": {},
55 | "outputs": [],
56 | "source": [
57 | "warnings.filterwarnings(\"ignore\")\n",
58 | "np.random.seed(40)\n",
59 | "\n",
60 | "# Read the wine-quality csv file from the URL\n",
61 | "csv_url = (\n",
62 | " \"http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv\"\n",
63 | ")\n",
64 | "try:\n",
65 | " data = pd.read_csv(csv_url, sep=\";\")\n",
66 | "except Exception as e:\n",
67 | " logger.exception(\n",
68 | " \"Unable to download training & test CSV, check your internet connection. Error: %s\", e\n",
69 | " )\n",
70 | "\n",
71 | "# Split the data into training and test sets. (0.75, 0.25) split.\n",
72 | "train_data, test_data = train_test_split(data)\n",
73 | "\n",
74 | "# The predicted column is \"quality\" which is a scalar from [3, 9]\n",
75 | "train_x = train_data.drop([\"quality\"], axis=1)\n",
76 | "test_x = test_data.drop([\"quality\"], axis=1)\n",
77 | "train_y = train_data[[\"quality\"]]\n",
78 | "test_y = test_data[[\"quality\"]]"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "metadata": {},
84 | "source": [
85 | "## Training\n",
86 | "\n",
87 | "We will define a simple method to train the model. This method takes as inputs two of the hyperparameters of the model, namely `alpha` and `l1_ratio`. These parameters control the regularization of the model, so they will affect the complexity and the generalization power of the model (more [details](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html)). \n",
88 | "\n",
89 | "*Disclaimer: we will use the test set to evaluate multiple times the performance of the model while changing its hyperparameters. This is not a good practice and we are doing it here just for the sake of simplicity.*"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "def train(alpha=0.5, l1_ratio=0.5):\n",
99 | " \"\"\"Train an ElasticNet on the Wine Quality Dataset.\"\"\"\n",
100 | "\n",
101 | " # train model\n",
102 | " lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n",
103 | " lr.fit(train_x, train_y)\n",
104 | "\n",
105 | " # make predictions\n",
106 | " predicted_qualities = lr.predict(test_x)\n",
107 | "\n",
108 | " def eval_metrics(actual, pred):\n",
109 | " rmse = np.sqrt(mean_squared_error(actual, pred))\n",
110 | " mae = mean_absolute_error(actual, pred)\n",
111 | " r2 = r2_score(actual, pred)\n",
112 | " return rmse, mae, r2\n",
113 | " \n",
114 | " # evaluate trained model\n",
115 | " (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n",
116 | " print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n",
117 | " print(\" RMSE: %s\" % rmse)\n",
118 | " print(\" MAE: %s\" % mae)\n",
119 | " print(\" R2: %s\" % r2)"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "metadata": {},
125 | "source": [
126 | "Now we can train and evaluate the model by just calling the method `train`"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": null,
132 | "metadata": {
133 | "scrolled": true
134 | },
135 | "outputs": [],
136 | "source": [
137 | "train()"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "metadata": {},
143 | "source": [
144 | "We can see that the `r2` is quite low, let's if we can improve it by playing a bit with the hyperparameters."
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": null,
150 | "metadata": {},
151 | "outputs": [],
152 | "source": [
153 | "train(0.1, 0.1)"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "train(0.001, 0.001)"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {},
169 | "outputs": [],
170 | "source": [
171 | "train(0.00001, 0.00001)"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "metadata": {},
177 | "source": [
178 | "Okay, we have found a better performance by playing a bit with the hyperparameter :)\n",
179 | "\n",
180 | "On the other hand, printing the performance on the stdout doesn't seem like the best solution to track the progress. Let's see what can we do by using MLflow tracking module."
181 | ]
182 | },
183 | {
184 | "cell_type": "code",
185 | "execution_count": null,
186 | "metadata": {},
187 | "outputs": [],
188 | "source": []
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "# Tracking Experiments with MLflow\n",
195 | "\n",
196 | "Next, let's check how much effort it will take to use the MLflow Tracking module in order to keep track of our experiments. The method `train_mlflow` below trains and evaluates an ElasticNet model in exactly the same way we did with the method `train` but it also sends the run details to MLflow so we can later visualize them.\n",
197 | "\n",
198 | "Here are a few tips in case you want to try writing the method `train_mlflow` on your own:\n",
199 | "* Check the **template** below (we already imported the libraries and set the experiment name for you)\n",
200 | "* You can copy and pase the code in the method `train` above and then add the MLflow logging.\n",
201 | "* You should log parameters (like `alpha` and `l1_ratio`), metrics (`rmse`, `mae`, etc.), the model and optionally some tags and artifacts of your choice.\n",
202 | "* Check the [official documentation](https://www.mlflow.org/docs/latest/index.html) for more information.\n",
203 | "\n",
204 | "### Template\n",
205 | "\n",
206 | "
"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": null,
212 | "metadata": {},
213 | "outputs": [],
214 | "source": [
215 | "# Configure experiment\n",
216 | "experiment_name = f\"sklearn-{NAME}\"\n",
217 | "mlflow.set_experiment(experiment_name)\n",
218 | "\n",
219 | "def train_mlflow(alpha=0.5, l1_ratio=0.5):\n",
220 | " \"\"\"Train an ElasticNet on the Wine Quality Dataset and Log Experiment to MLflow.\"\"\"\n",
221 | "\n",
222 | " # Train model\n",
223 | " lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n",
224 | " lr.fit(train_x, train_y)\n",
225 | "\n",
226 | " # Make predictions\n",
227 | " predicted_qualities = lr.predict(test_x)\n",
228 | "\n",
229 | " def eval_metrics(actual, pred):\n",
230 | " rmse = np.sqrt(mean_squared_error(actual, pred))\n",
231 | " mae = mean_absolute_error(actual, pred)\n",
232 | " r2 = r2_score(actual, pred)\n",
233 | " return rmse, mae, r2\n",
234 | " \n",
235 | " # Evaluate trained model\n",
236 | " (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n",
237 | " print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n",
238 | " print(\" RMSE: %s\" % rmse)\n",
239 | " print(\" MAE: %s\" % mae)\n",
240 | " print(\" R2: %s\" % r2)\n",
241 | " \n",
242 | " \n",
243 | " # MLflow logging\n",
244 | " with mlflow.start_run():\n",
245 | " \n",
246 | " # Add tags to the run\n",
247 | " mlflow.set_tag('developer', 'cristian')\n",
248 | " \n",
249 | " # Log params\n",
250 | " mlflow.log_params({\n",
251 | " 'alpha': alpha,\n",
252 | " 'l1-ratio': l1_ratio\n",
253 | " })\n",
254 | " \n",
255 | " # Log metrics\n",
256 | " mlflow.log_metrics({\n",
257 | " 'rmse': rmse,\n",
258 | " 'mae': mae,\n",
259 | " 'r2': r2\n",
260 | " })\n",
261 | " \n",
262 | " # Log model\n",
263 | " mlflow.sklearn.log_model(lr, artifact_path='model')"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": null,
269 | "metadata": {},
270 | "outputs": [],
271 | "source": [
272 | "train_mlflow(0.1, 0.1)"
273 | ]
274 | },
275 | {
276 | "cell_type": "code",
277 | "execution_count": null,
278 | "metadata": {},
279 | "outputs": [],
280 | "source": [
281 | "train_mlflow(0.5, 0.3)"
282 | ]
283 | },
284 | {
285 | "cell_type": "code",
286 | "execution_count": null,
287 | "metadata": {},
288 | "outputs": [],
289 | "source": [
290 | "train_mlflow(0.1, 0.01)"
291 | ]
292 | },
293 | {
294 | "cell_type": "code",
295 | "execution_count": null,
296 | "metadata": {},
297 | "outputs": [],
298 | "source": []
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": null,
303 | "metadata": {},
304 | "outputs": [],
305 | "source": [
306 | "for alpha in np.logspace(-10, -1, 5):\n",
307 | " for l1_ratio in np.logspace(-10, -1, 5):\n",
308 | " train_mlflow(alpha, l1_ratio)"
309 | ]
310 | }
311 | ],
312 | "metadata": {
313 | "kernelspec": {
314 | "display_name": "Python 3 (MLflow)",
315 | "language": "python",
316 | "name": "mlflow-workshop-env"
317 | },
318 | "language_info": {
319 | "codemirror_mode": {
320 | "name": "ipython",
321 | "version": 3
322 | },
323 | "file_extension": ".py",
324 | "mimetype": "text/x-python",
325 | "name": "python",
326 | "nbconvert_exporter": "python",
327 | "pygments_lexer": "ipython3",
328 | "version": "3.6.13"
329 | }
330 | },
331 | "nbformat": 4,
332 | "nbformat_minor": 5
333 | }
334 |
--------------------------------------------------------------------------------
/notebooks/2-keras-example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "## FILL IN YOUR NAME\n",
10 | "NAME = \"example\""
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "import numpy as np\n",
20 | "import keras\n",
21 | "from keras.datasets import reuters\n",
22 | "from keras.models import Sequential\n",
23 | "from keras.layers import Dense, Dropout, Activation\n",
24 | "from keras.preprocessing.text import Tokenizer\n",
25 | "from tensorflow.keras.utils import to_categorical\n",
26 | "\n",
27 | "%load_ext dotenv\n",
28 | "%dotenv\n",
29 | "\n",
30 | "import mlflow.keras"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "## Configure Experiment Name"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "# configure experiment\n",
47 | "experiment_name = f\"keras-{NAME}\"\n",
48 | "mlflow.set_experiment(experiment_name)"
49 | ]
50 | },
51 | {
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "## Autolog\n",
56 | "\n",
57 | "In this example we will show how to automatically log parameters, metrics and artifacts with MLflow. We just need to add **one line of code** to enable the automatic logging.\n",
58 | "\n",
59 | "The following libraries support autologging:\n",
60 | "\n",
61 | "* Scikit-learn\n",
62 | "* TensorFlow and Keras\n",
63 | "* Gluon\n",
64 | "* XGBoost\n",
65 | "* LightGBM\n",
66 | "* Statsmodels\n",
67 | "* Spark\n",
68 | "* Fastai\n",
69 | "* Pytorch (using [Pytorch Lighting](https://pytorch-lightning.readthedocs.io/en/latest/))\n",
70 | "\n",
71 | "\n",
72 | "## Reuters Topic Classification\n",
73 | "\n",
74 | "In this example, we will be working with the Reuters dataset, a set of short newswires and their topics, published by Reuters in 1986. It's a very simple, widely used toy dataset for text classification. There are 46 different topics; some topics are more represented than others, but each topic has at least 10 examples in the training set. \n",
75 | "\n",
76 | "More info about this dataset can be found [here](https://keras.io/api/datasets/reuters/)."
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "# enable automatic logging\n",
86 | "mlflow.keras.autolog()\n",
87 | "\n",
88 | "max_words = 1000\n",
89 | "batch_size = 32\n",
90 | "epochs = 5\n",
91 | "\n",
92 | "print(\"Loading data...\")\n",
93 | "(x_train, y_train), (x_test, y_test) = \\\n",
94 | " reuters.load_data(num_words=max_words, test_split=0.2)\n",
95 | "\n",
96 | "print(len(x_train), \"train sequences\")\n",
97 | "print(len(x_test), \"test sequences\")\n",
98 | "\n",
99 | "num_classes = np.max(y_train) + 1\n",
100 | "print(num_classes, \"classes\")\n",
101 | "\n",
102 | "print(\"Vectorizing sequence data...\")\n",
103 | "tokenizer = Tokenizer(num_words=max_words)\n",
104 | "x_train = tokenizer.sequences_to_matrix(x_train, mode=\"binary\")\n",
105 | "x_test = tokenizer.sequences_to_matrix(x_test, mode=\"binary\")\n",
106 | "print(\"x_train shape:\", x_train.shape)\n",
107 | "print(\"x_test shape:\", x_test.shape)\n",
108 | "\n",
109 | "print(\"Convert class vector to binary class matrix \" \n",
110 | " \"(for use with categorical_crossentropy)\")\n",
111 | "y_train = to_categorical(y_train, num_classes)\n",
112 | "y_test = to_categorical(y_test, num_classes)\n",
113 | "print(\"y_train shape:\", y_train.shape)\n",
114 | "print(\"y_test shape:\", y_test.shape)\n",
115 | "\n",
116 | "print(\"Building model...\")\n",
117 | "model = Sequential()\n",
118 | "model.add(Dense(512, input_shape=(max_words,)))\n",
119 | "model.add(Activation(\"relu\"))\n",
120 | "model.add(Dropout(0.5))\n",
121 | "model.add(Dense(num_classes))\n",
122 | "model.add(Activation(\"softmax\"))\n",
123 | "\n",
124 | "model.compile(loss=\"categorical_crossentropy\", \n",
125 | " optimizer=\"adam\", \n",
126 | " metrics=[\"accuracy\"])\n",
127 | "\n",
128 | "history = model.fit(\n",
129 | " x_train, y_train, \n",
130 | " batch_size=batch_size, \n",
131 | " epochs=epochs, \n",
132 | " verbose=1, \n",
133 | " validation_split=0.1\n",
134 | ")\n",
135 | "score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)\n",
136 | "print(\"Test score:\", score[0])\n",
137 | "print(\"Test accuracy:\", score[1])"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "metadata": {},
144 | "outputs": [],
145 | "source": []
146 | },
147 | {
148 | "cell_type": "markdown",
149 | "metadata": {},
150 | "source": [
151 | "## Tracking Server\n",
152 | "\n",
153 | "Let's check the experiment logs in the tracking server!"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": null,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": []
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": null,
166 | "metadata": {},
167 | "outputs": [],
168 | "source": []
169 | }
170 | ],
171 | "metadata": {
172 | "kernelspec": {
173 | "display_name": "Python 3 (MLflow)",
174 | "language": "python",
175 | "name": "mlflow-workshop-env"
176 | },
177 | "language_info": {
178 | "codemirror_mode": {
179 | "name": "ipython",
180 | "version": 3
181 | },
182 | "file_extension": ".py",
183 | "mimetype": "text/x-python",
184 | "name": "python",
185 | "nbconvert_exporter": "python",
186 | "pygments_lexer": "ipython3",
187 | "version": "3.6.13"
188 | }
189 | },
190 | "nbformat": 4,
191 | "nbformat_minor": 5
192 | }
193 |
--------------------------------------------------------------------------------
/notebooks/3-mlflow-models-and-model-registry.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from datetime import datetime\n",
10 | "import pandas as pd\n",
11 | "import boto3\n",
12 | "import json\n",
13 | "import os\n",
14 | "\n",
15 | "import mlflow\n",
16 | "import mlflow.sagemaker as mfs\n",
17 | "from mlflow.entities import ViewType\n",
18 | "from mlflow.tracking import MlflowClient\n",
19 | "\n",
20 | "%load_ext dotenv\n",
21 | "%dotenv"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": []
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "# Register a model\n",
36 | "\n",
37 | "Let's use the MLflowClient object to retrieve the id of the best run for the experiment number 1, and register the model to the MLflow model registry."
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "# Get the best run from experiment ID 1 by r2 score\n",
47 | "run = MlflowClient().search_runs(\n",
48 | " experiment_ids=\"1\",\n",
49 | " filter_string=\"\",\n",
50 | " run_view_type=ViewType.ALL,\n",
51 | " max_results=1,\n",
52 | " order_by=[\"metrics.r2 DESC\"]\n",
53 | ")[0]"
54 | ]
55 | },
56 | {
57 | "cell_type": "markdown",
58 | "metadata": {},
59 | "source": [
60 | "To register a model we just need to pass the path to the model and the name of the registered model. If the name doesn't exist, MLflow will create a new one."
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "result = mlflow.register_model(\n",
70 | " f\"runs:/{run.info.run_id}/model\",\n",
71 | " \"wine-quality-predictor\"\n",
72 | ")"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": null,
78 | "metadata": {},
79 | "outputs": [],
80 | "source": []
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "# List registered models\n",
87 | "\n",
88 | "The following method prints the list of registered models and the latests versions."
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {
95 | "scrolled": false
96 | },
97 | "outputs": [],
98 | "source": [
99 | "client = MlflowClient()\n",
100 | "\n",
101 | "def print_model_info(models):\n",
102 | " \"\"\"Lists models registered in MLflow's Model Registry.\"\"\"\n",
103 | " \n",
104 | " for m in models:\n",
105 | " print(\"name: {}\".format(m.name))\n",
106 | " for mv in sorted(m.latest_versions, key=lambda x: x.version):\n",
107 | " print(\n",
108 | " \"\\tversion: {}, registration date: {}, stage: {}\"\n",
109 | " .format(mv.version, \n",
110 | " datetime.fromtimestamp(mv.creation_timestamp/1000.0), \n",
111 | " mv.current_stage)\n",
112 | " )\n",
113 | " print(\"--\")\n",
114 | "\n",
115 | "print_model_info(client.list_registered_models())"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": null,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": []
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "metadata": {},
128 | "source": [
129 | "# Transitioning an MLflow model's stage\n",
130 | "\n",
131 | "We can also use the MLflowClient object to transition registered models between stages and add annotations."
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {},
138 | "outputs": [],
139 | "source": [
140 | "new_stage = \"Production\"\n",
141 | "\n",
142 | "client.transition_model_version_stage(\n",
143 | " name=\"wine-quality-predictor\",\n",
144 | " version=1,\n",
145 | " stage=new_stage\n",
146 | ")"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "client.update_model_version(\n",
156 | " name=\"wine-quality-predictor\",\n",
157 | " version=1,\n",
158 | " description=f\"{new_stage} model since {datetime.today().date()}\"\n",
159 | ")"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {
166 | "scrolled": true
167 | },
168 | "outputs": [],
169 | "source": [
170 | "print_model_info(client.list_registered_models())"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "metadata": {},
177 | "outputs": [],
178 | "source": []
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "# Serve model locally \n",
185 | "\n",
186 | "MLflow also has a CLI that supports the following commands:\n",
187 | "\n",
188 | "* `serve` deploys the model as a local REST API server.\n",
189 | "\n",
190 | "* `build_docker` packages a REST API endpoint serving the model as a docker image.\n",
191 | "\n",
192 | "* `predict` uses the model to generate a prediction for a local CSV or JSON file. Note that this method only supports DataFrame input.\n",
193 | "\n",
194 | "We will deploy the latest production model as a local REST API server. To do so, we just need to run these command in a terminal:\n",
195 | "\n",
196 | "* `source .env`\n",
197 | "* `mlflow models serve -m models:/wine-quality-predictor/Production --no-conda`\n",
198 | "\n",
199 | "Then from another terminal, run this to send a prediction request to the server:\n",
200 | "\n",
201 | "`curl http://127.0.0.1:5000/invocations -H 'Content-Type: application/json' -d '{\"columns\":[\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\"],\"index\":[82],\"data\":[[7.4,0.5,0.47,2.0,0.086,21.0,73.0,0.997,3.36,0.57,9.1]]}'`"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": null,
207 | "metadata": {},
208 | "outputs": [],
209 | "source": []
210 | },
211 | {
212 | "cell_type": "markdown",
213 | "metadata": {},
214 | "source": [
215 | "## Deploy model in AWS Sagemaker\n",
216 | "\n",
217 | "**ATENTION**: *in order to deploy the model to SageMaker you will need to provide a valid execution role ARN and the image URL. These details need to be added to the .env file.*\n",
218 | "\n",
219 | "\n",
220 | "The `mlflow.sagemaker` module can deploy `python_function` models locally in a Docker container with SageMaker compatible environment and remotely on SageMaker. \n",
221 | "\n",
222 | "Usage:\n",
223 | "\n",
224 | "* `mlflow sagemaker build-and-push-container` - build the container (only needs to be called once)\n",
225 | "* `mlflow sagemaker run-local -m ` - test the model locally\n",
226 | "* `mlflow sagemaker deploy ` - deploy the model remotely\n",
227 | "\n",
228 | "Using the following code you can deploy the model to SageMaker:"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": null,
234 | "metadata": {},
235 | "outputs": [],
236 | "source": [
237 | "# Deployment configuration\n",
238 | "region = os.environ.get(\"AWS_DEFAULT_REGION\")\n",
239 | "aws_id = os.environ.get(\"AWS_ID\")\n",
240 | "arn = os.environ.get(\"AWS_ARN\")\n",
241 | "app_name = \"mlflow-example\"\n",
242 | "model_uri = \"models:/wine-quality-predictor/Production\"\n",
243 | "image_url = aws_id + \".dkr.ecr.\" + region + \".amazonaws.com/mlflow-pyfunc:1.14.1\"\n",
244 | "\n",
245 | "if aws_id and arn:\n",
246 | " mfs.deploy(app_name=app_name, \n",
247 | " model_uri=model_uri, \n",
248 | " region_name=region, \n",
249 | " mode=\"create\",\n",
250 | " execution_role_arn=arn,\n",
251 | " image_url=image_url)"
252 | ]
253 | },
254 | {
255 | "cell_type": "markdown",
256 | "metadata": {
257 | "scrolled": true
258 | },
259 | "source": [
260 | "### Calling the Inference Endpoint\n",
261 | "\n",
262 | "Let's use the inference endpoint provided by Sagemaker to make predictions, we are providing two utility methods to interact with it:\n",
263 | "\n",
264 | "- check_status: checks the status of our endpoint.\n",
265 | "- query_endpoint: sends an inference request to the inference endpoint and returns the predicted values."
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": null,
271 | "metadata": {},
272 | "outputs": [],
273 | "source": [
274 | "def check_status(app_name, region):\n",
275 | " sage_client = boto3.client('sagemaker', region_name=region)\n",
276 | " endpoint_description = sage_client.describe_endpoint(EndpointName=app_name)\n",
277 | " endpoint_status = endpoint_description[\"EndpointStatus\"]\n",
278 | " return endpoint_status\n",
279 | "\n",
280 | "if aws_id and arn:\n",
281 | " print(\"Application status is: {}\".format(check_status(app_name, region)))"
282 | ]
283 | },
284 | {
285 | "cell_type": "code",
286 | "execution_count": null,
287 | "metadata": {},
288 | "outputs": [],
289 | "source": [
290 | "def query_endpoint(app_name, input_json):\n",
291 | " client = boto3.session.Session().client(\"sagemaker-runtime\", region)\n",
292 | "\n",
293 | " response = client.invoke_endpoint(\n",
294 | " EndpointName=app_name,\n",
295 | " Body=input_json,\n",
296 | " ContentType='application/json; format=pandas-split',\n",
297 | " )\n",
298 | " preds = response['Body'].read().decode(\"ascii\")\n",
299 | " preds = json.loads(preds)\n",
300 | " print(\"Received response: {}\".format(preds))\n",
301 | " return preds\n",
302 | "\n",
303 | "if aws_id and arn:\n",
304 | " query_input = '{\"columns\":[\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\"],\"index\":[82],\"data\":[[7.4,0.5,0.47,2.0,0.086,21.0,73.0,0.997,3.36,0.57,9.1]]}'\n",
305 | " prediction1 = query_endpoint(app_name=app_name, input_json=query_input)"
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "execution_count": null,
311 | "metadata": {},
312 | "outputs": [],
313 | "source": []
314 | }
315 | ],
316 | "metadata": {
317 | "kernelspec": {
318 | "display_name": "Python 3 (MLflow)",
319 | "language": "python",
320 | "name": "mlflow-workshop-env"
321 | },
322 | "language_info": {
323 | "codemirror_mode": {
324 | "name": "ipython",
325 | "version": 3
326 | },
327 | "file_extension": ".py",
328 | "mimetype": "text/x-python",
329 | "name": "python",
330 | "nbconvert_exporter": "python",
331 | "pygments_lexer": "ipython3",
332 | "version": "3.6.13"
333 | }
334 | },
335 | "nbformat": 4,
336 | "nbformat_minor": 5
337 | }
338 |
--------------------------------------------------------------------------------
/notebooks/4-mlproject-example.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "## FILL IN YOUR NAME\n",
10 | "NAME = \"example\""
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {},
16 | "source": [
17 | "# Running MLprojects\n",
18 | "\n",
19 | "Any local directory or Git repository can be treated as an MLflow project. Let's run [an example project](https://github.com/mlflow/mlflow-example) from the official MLflow github repository.\n",
20 | "\n",
21 | "There are two ways to run the project:\n",
22 | "\n",
23 | "- Using the CLI: `mlflow run `\n",
24 | "- Using the Python API: `mlflow.projects.run()`\n",
25 | "\n",
26 | "For this example, we will use the Python API."
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "import mlflow\n",
36 | "\n",
37 | "%load_ext dotenv\n",
38 | "%dotenv\n",
39 | "\n",
40 | "project_uri = \"https://github.com/mlflow/mlflow-example\"\n",
41 | "params = {\"alpha\": 0.5, \"l1_ratio\": 0.01}\n",
42 | "\n",
43 | "# Run MLflow project and create a reproducible conda environment\n",
44 | "submitted_run = mlflow.run(project_uri, parameters=params, use_conda=False)"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": null,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "submitted_run"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "submitted_run.run_id"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": null,
68 | "metadata": {},
69 | "outputs": [],
70 | "source": []
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "# Retrieving Run Details\n",
77 | "\n",
78 | "Using the `submitted_run` object we can retrieve the details from the run that we just submitted. In order to do so, we will use the [Python API](https://www.mlflow.org/docs/latest/python_api/mlflow.tracking.html). In particular, we are interested in retrieving the path to the artifacts because this will be useful for us later. "
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "from mlflow.tracking import MlflowClient\n",
88 | "\n",
89 | "# retrive the run by using the MLflow client\n",
90 | "client = MlflowClient()\n",
91 | "run = client.get_run(submitted_run.run_id)"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "metadata": {},
98 | "outputs": [],
99 | "source": [
100 | "# inspect the info about the run\n",
101 | "run.info"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "metadata": {},
108 | "outputs": [],
109 | "source": [
110 | "# retrieve the run's artifacts path\n",
111 | "run.info.artifact_uri"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "metadata": {},
117 | "source": [
118 | "## What we just learned?\n",
119 | "\n",
120 | "* It is possible to run projects easily by using the Python API.\n",
121 | "* Projects can be stored as local folders or Git repositories.\n",
122 | "* After running a project we can use the `mlflow.tracking` module to retrieve all the information about the run.\n",
123 | "\n",
124 | "This will useful for the next exercise."
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "metadata": {},
131 | "outputs": [],
132 | "source": []
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "# Defining ML pipelines with MLflow\n",
139 | "\n",
140 | "\n",
141 | "MLflow allows us to chain together multiple different runs. Each run, encapsulates a transformation or training step. For this exercise, we will run the following ML pipeline using the MLproject module:\n",
142 | "\n",
143 | "\n",
144 | "\n",
145 | "There are four entry points that make up the pipeline:\n",
146 | "\n",
147 | "* **load_raw_data.py**: Downloads the MovieLens dataset (a set of triples of user id, movie id, and rating) as a CSV and puts it into the artifact store.\n",
148 | "* **etl_data.py**: Converts the MovieLens CSV from the previous step into Parquet, dropping unnecessary columns along the way. This reduces the input size from 500 MB to 49 MB, and allows columnar access of the data.\n",
149 | "* **als.py**: Runs Alternating Least Squares for collaborative filtering on the Parquet version of MovieLens to estimate the movieFactors and userFactors. This produces a relatively accurate estimator.\n",
150 | "* **train_keras.py**: Trains a neural network on the original data, supplemented by the ALS movie/userFactors -- we hope this can improve upon the ALS estimations.\n",
151 | "\n",
152 | "### Example: multi-step workflow\n",
153 | "\n",
154 | "While we can run each of these steps manually, here we have a **driver run**, defined as the method `mlflow_pipeline` below. This method will run the steps in order, passing the results of one step to the next. \n",
155 | "\n",
156 | "We will provide you with an auxiliary method that given an entry point and some parameters launch a run using the MLflow's Python API."
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": null,
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "def _run(entrypoint: str, parameters: dict, project_dir: str = '../mlflow-project/'):\n",
166 | " \"\"\"Launches an entry point by providing the given parameters.\"\"\"\n",
167 | " \n",
168 | " print(\"Launching new run for entrypoint=%s and parameters=%s\" % (entrypoint, parameters))\n",
169 | " submitted_run = mlflow.run(project_dir, \n",
170 | " entrypoint, \n",
171 | " parameters=parameters,\n",
172 | " use_conda=False,\n",
173 | " storage_dir=\"../../data/\")\n",
174 | " \n",
175 | " client = mlflow.tracking.MlflowClient()\n",
176 | " return client.get_run(submitted_run.run_id)"
177 | ]
178 | },
179 | {
180 | "cell_type": "markdown",
181 | "metadata": {},
182 | "source": [
183 | "Here are some tips in case you want to implement the code on your own:\n",
184 | "\n",
185 | "* You can use the provided method `_run` to execute each step of the pipeline\n",
186 | "* Make sure your are passing the correct values for `entrypoint` and `parameters`.\n",
187 | "* The entrypoint names and input parameters are defined in MLproject file located in the folder `mlflow-project` "
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "metadata": {},
194 | "outputs": [],
195 | "source": [
196 | "import os\n",
197 | "import mlflow\n",
198 | "\n",
199 | "%load_ext dotenv\n",
200 | "%dotenv\n",
201 | "\n",
202 | "# set experiment\n",
203 | "experiment_name = f\"pipeline-{NAME}\"\n",
204 | "mlflow.set_experiment(experiment_name)\n",
205 | "\n",
206 | "def mlflow_pipeline(als_max_iter, keras_hidden_units, max_row_limit):\n",
207 | " \n",
208 | " with mlflow.start_run() as active_run:\n",
209 | " os.environ[\"SPARK_CONF_DIR\"] = os.path.abspath(\".\")\n",
210 | " \n",
211 | " load_raw_data_run = _run(\"load_raw_data\", {})\n",
212 | " ratings_csv_uri = os.path.join(load_raw_data_run.info.artifact_uri, \"ratings-csv-dir\")\n",
213 | "\n",
214 | " etl_data_run = _run(\"etl_data\", {\"ratings_csv\": ratings_csv_uri, \"max_row_limit\": max_row_limit})\n",
215 | " ratings_parquet_uri = os.path.join(etl_data_run.info.artifact_uri, \"ratings-parquet-dir\")\n",
216 | "\n",
217 | " als_run = _run(\"als\", {\"ratings_data\": ratings_parquet_uri, \"max_iter\": str(als_max_iter)})\n",
218 | " als_model_uri = os.path.join(als_run.info.artifact_uri, \"als-model\")\n",
219 | "\n",
220 | " keras_params = {\n",
221 | " \"ratings_data\": ratings_parquet_uri, \n",
222 | " \"als_model_uri\": als_model_uri, \n",
223 | " \"hidden_units\": keras_hidden_units\n",
224 | " }\n",
225 | " train_keras_run = _run(\"train_keras\", keras_params)\n"
226 | ]
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "metadata": {},
231 | "source": [
232 | "After completing the code, run the next cell and go to the MLflow UI to check the results :)"
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "execution_count": null,
238 | "metadata": {},
239 | "outputs": [],
240 | "source": [
241 | "# once you finished with the method `mlflow_pipeline` run this line!\n",
242 | "mlflow_pipeline(als_max_iter=10, keras_hidden_units=20, max_row_limit=100000)"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": null,
248 | "metadata": {},
249 | "outputs": [],
250 | "source": []
251 | }
252 | ],
253 | "metadata": {
254 | "kernelspec": {
255 | "display_name": "Python 3",
256 | "language": "python",
257 | "name": "python3"
258 | },
259 | "language_info": {
260 | "codemirror_mode": {
261 | "name": "ipython",
262 | "version": 3
263 | },
264 | "file_extension": ".py",
265 | "mimetype": "text/x-python",
266 | "name": "python",
267 | "nbconvert_exporter": "python",
268 | "pygments_lexer": "ipython3",
269 | "version": "3.7.6"
270 | }
271 | },
272 | "nbformat": 4,
273 | "nbformat_minor": 5
274 | }
275 |
--------------------------------------------------------------------------------
/notebooks/experiment_tracking_template.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mac2bua/mlflow-tutorial-ai-marketplace/11445e1c59d3dabb8f8202d4a2ef8a3335f8db55/notebooks/experiment_tracking_template.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==0.12.0
2 | alembic==1.4.1
3 | appnope==0.1.2
4 | argon2-cffi==20.1.0
5 | astunparse==1.6.3
6 | async-generator==1.10
7 | attrs==20.3.0
8 | awscli==1.19.29
9 | backcall==0.2.0
10 | bleach==3.3.0
11 | boto3==1.17.29
12 | botocore==1.20.29
13 | cached-property==1.5.2
14 | cachetools==4.2.1
15 | certifi==2020.12.5
16 | cffi==1.14.5
17 | chardet==4.0.0
18 | click==7.1.2
19 | cliff==3.7.0
20 | cloudpickle==1.6.0
21 | cmaes==0.8.2
22 | cmd2==1.5.0
23 | colorama==0.4.3
24 | colorlog==5.0.1
25 | cycler==0.10.0
26 | databricks-cli==0.14.2
27 | decorator==4.4.2
28 | defusedxml==0.7.1
29 | docker==4.4.4
30 | docutils==0.15.2
31 | entrypoints==0.3
32 | Flask==1.1.2
33 | flatbuffers==1.12
34 | gast==0.3.3
35 | gitdb==4.0.5
36 | GitPython==3.1.14
37 | google-auth==1.27.1
38 | google-auth-oauthlib==0.4.3
39 | google-pasta==0.2.0
40 | grpcio==1.32.0
41 | gunicorn==20.0.4
42 | h5py==2.10.0
43 | idna==2.10
44 | importlib-metadata==3.7.2
45 | ipykernel==5.5.0
46 | ipython==7.16.1
47 | ipython-genutils==0.2.0
48 | ipywidgets==7.6.3
49 | itsdangerous==1.1.0
50 | jedi==0.18.0
51 | Jinja2==2.11.3
52 | jmespath==0.10.0
53 | joblib==1.0.1
54 | jsonschema==3.2.0
55 | jupyter==1.0.0
56 | jupyter-client==6.1.11
57 | jupyter-console==6.2.0
58 | jupyter-core==4.7.1
59 | jupyterlab-pygments==0.1.2
60 | jupyterlab-widgets==1.0.0
61 | Keras==2.4.3
62 | Keras-Preprocessing==1.1.2
63 | kiwisolver==1.3.1
64 | Mako==1.1.4
65 | Markdown==3.3.4
66 | MarkupSafe==1.1.1
67 | matplotlib==3.3.4
68 | mistune==0.8.4
69 | mlflow==1.14.1
70 | nbclient==0.5.3
71 | nbconvert==6.0.7
72 | nbformat==5.1.2
73 | nest-asyncio==1.5.1
74 | notebook==6.2.0
75 | numpy==1.19.5
76 | oauthlib==3.1.0
77 | opt-einsum==3.3.0
78 | optuna==2.7.0
79 | packaging==20.9
80 | pandas==1.1.5
81 | pandocfilters==1.4.3
82 | parso==0.8.1
83 | pbr==5.5.1
84 | pexpect==4.8.0
85 | pickleshare==0.7.5
86 | Pillow==8.1.2
87 | prettytable==2.1.0
88 | prometheus-client==0.9.0
89 | prometheus-flask-exporter==0.18.1
90 | prompt-toolkit==3.0.16
91 | protobuf==3.15.5
92 | ptyprocess==0.7.0
93 | pyasn1==0.4.8
94 | pyasn1-modules==0.2.8
95 | pycparser==2.20
96 | Pygments==2.8.1
97 | pyparsing==2.4.7
98 | pyperclip==1.8.2
99 | pyrsistent==0.17.3
100 | python-dateutil==2.8.1
101 | python-dotenv==0.15.0
102 | python-editor==1.0.4
103 | pytz==2021.1
104 | PyYAML==5.4.1
105 | pyzmq==22.0.3
106 | qtconsole==5.0.2
107 | QtPy==1.9.0
108 | querystring-parser==1.2.4
109 | requests==2.25.1
110 | requests-oauthlib==1.3.0
111 | rsa==4.5
112 | s3transfer==0.3.4
113 | scikit-learn==0.24.1
114 | scipy==1.5.4
115 | Send2Trash==1.5.0
116 | six==1.15.0
117 | smmap==3.0.5
118 | SQLAlchemy==1.3.23
119 | sqlparse==0.4.1
120 | stevedore==3.3.0
121 | tabulate==0.8.9
122 | tensorboard==2.4.1
123 | tensorboard-plugin-wit==1.8.0
124 | tensorflow==2.4.1
125 | tensorflow-estimator==2.4.0
126 | termcolor==1.1.0
127 | terminado==0.9.2
128 | testpath==0.4.4
129 | threadpoolctl==2.1.0
130 | torch==1.8.0
131 | torchvision==0.9.0
132 | tornado==6.1
133 | tqdm==4.60.0
134 | traitlets==4.3.3
135 | typing-extensions==3.7.4.3
136 | urllib3==1.26.3
137 | wcwidth==0.2.5
138 | webencodings==0.5.1
139 | websocket-client==0.58.0
140 | Werkzeug==1.0.1
141 | widgetsnbextension==3.5.1
142 | wrapt==1.12.1
143 | zipp==3.4.1
144 |
--------------------------------------------------------------------------------
/src/0_hello_world.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import mlflow
4 | from dotenv import load_dotenv, find_dotenv
5 |
6 | # load environment variables
7 | load_dotenv()
8 |
9 |
10 | if __name__ == '__main__':
11 |
12 | # log model params
13 | mlflow.log_param("alpha", 0.5)
14 | mlflow.log_param("beta", 0.5)
15 |
16 | for i in range(10):
17 | mlflow.log_metric("metric_1", random.random() + i, step=i)
18 |
19 | if not os.path.exists("models"):
20 | os.makedirs("models")
21 | with open("models/model.txt", "w") as f:
22 | f.write("hello world!")
23 |
24 | mlflow.log_artifact("models/model.txt")
25 |
--------------------------------------------------------------------------------
/src/1_sklearn_example.py:
--------------------------------------------------------------------------------
1 | # The data set used in this example is from http://archive.ics.uci.edu/ml/datasets/Wine+Quality
2 | # P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.
3 | # Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.
4 |
5 | import os
6 | import warnings
7 | import sys
8 |
9 | import pandas as pd
10 | import numpy as np
11 | import pickle
12 | from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
13 | from sklearn.model_selection import train_test_split
14 | from sklearn.linear_model import ElasticNet
15 | from urllib.parse import urlparse
16 | import mlflow
17 | import mlflow.sklearn
18 | from dotenv import load_dotenv
19 |
20 | import logging
21 |
22 | logging.basicConfig(level=logging.WARN)
23 | logger = logging.getLogger(__name__)
24 |
25 | load_dotenv()
26 |
27 | experiment_name = "sklearn-example"
28 | mlflow.set_experiment(experiment_name)
29 |
30 |
31 | def eval_metrics(actual, pred):
32 | rmse = np.sqrt(mean_squared_error(actual, pred))
33 | mae = mean_absolute_error(actual, pred)
34 | r2 = r2_score(actual, pred)
35 | return rmse, mae, r2
36 |
37 |
38 | if __name__ == "__main__":
39 |
40 | warnings.filterwarnings("ignore")
41 | np.random.seed(40)
42 |
43 | # Read the wine-quality csv file from the URL
44 | csv_url = (
45 | "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
46 | )
47 | try:
48 | data = pd.read_csv(csv_url, sep=";")
49 | except Exception as e:
50 | logger.exception(
51 | "Unable to download training & test CSV, check your internet connection. Error: %s", e
52 | )
53 |
54 | # Split the data into training and test sets. (0.75, 0.25) split.
55 | train, test = train_test_split(data)
56 |
57 | # The predicted column is "quality" which is a scalar from [3, 9]
58 | train_x = train.drop(["quality"], axis=1)
59 | test_x = test.drop(["quality"], axis=1)
60 | train_y = train[["quality"]]
61 | test_y = test[["quality"]]
62 |
63 | alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5
64 | l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5
65 |
66 | with mlflow.start_run() as run:
67 |
68 | lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
69 | lr.fit(train_x, train_y)
70 |
71 | predicted_qualities = lr.predict(test_x)
72 |
73 | (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
74 |
75 | print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
76 | print(" RMSE: %s" % rmse)
77 | print(" MAE: %s" % mae)
78 | print(" R2: %s" % r2)
79 |
80 | mlflow.log_param("alpha", alpha)
81 | mlflow.log_param("l1_ratio", l1_ratio)
82 | mlflow.log_metric("rmse", rmse)
83 | mlflow.log_metric("r2", r2)
84 | mlflow.log_metric("mae", mae)
85 |
86 | mlflow.sklearn.log_model(lr, artifact_path='wine-quality-predictor')
87 |
--------------------------------------------------------------------------------
/src/1_sklearn_optuna.py:
--------------------------------------------------------------------------------
1 | """
2 | Source: https://github.com/optuna/optuna/blob/master/examples/mlflow/keras_mlflow.py
3 |
4 | Optuna example that optimizes a neural network regressor for the
5 | wine quality dataset using Keras and records hyperparameters and metrics using MLflow.
6 | In this example, we optimize the learning rate and momentum of
7 | stochastic gradient descent optimizer to minimize the validation mean squared error
8 | for the wine quality regression.
9 | """
10 |
11 | from keras.backend import clear_session
12 | from keras.layers import Dense
13 | from keras.models import Sequential
14 | from keras.optimizers import SGD
15 | from sklearn.datasets import load_wine
16 | from sklearn.model_selection import train_test_split
17 | from sklearn.preprocessing import StandardScaler
18 | from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
19 |
20 | import mlflow
21 | import optuna
22 | import pickle
23 |
24 | from dotenv import load_dotenv
25 | import numpy as np
26 |
27 | # load environment variables
28 | load_dotenv()
29 |
30 | TEST_SIZE = 0.25
31 | BATCHSIZE = 16
32 | EPOCHS = 100
33 | DEVELOPER_NAME = "ilia"
34 |
35 | def standardize(data):
36 | return StandardScaler().fit_transform(data)
37 |
38 |
39 | def create_model(num_features, trial):
40 | model = Sequential()
41 | model.add(
42 | Dense(
43 | num_features,
44 | activation="relu",
45 | kernel_initializer="normal",
46 | input_shape=(num_features,),
47 | )
48 | ),
49 | model.add(Dense(16, activation="relu", kernel_initializer="normal"))
50 | model.add(Dense(16, activation="relu", kernel_initializer="normal"))
51 | model.add(Dense(1, kernel_initializer="normal", activation="linear"))
52 |
53 | optimizer = SGD(
54 | lr=trial.suggest_float("lr", 1e-5, 1e-1, log=True),
55 | momentum=trial.suggest_float("momentum", 0.0, 1.0),
56 | )
57 | model.compile(loss="mean_squared_error", optimizer=optimizer)
58 | return model
59 |
60 |
61 | def mlflow_callback(study, trial):
62 | trial_value = trial.value if trial.value is not None else float("nan")
63 | with mlflow.start_run(run_name=study.study_name):
64 | mlflow.log_params(trial.params)
65 | mlflow.log_metrics({"rmse": np.sqrt(trial_value)})
66 |
67 | def objective(trial):
68 | # Clear clutter from previous Keras session graphs.
69 | clear_session()
70 |
71 | X, y = load_wine(return_X_y=True)
72 | X = standardize(X)
73 | X_train, X_valid, y_train, y_valid = train_test_split(
74 | X, y, test_size=TEST_SIZE, random_state=42
75 | )
76 |
77 | model = create_model(X.shape[1], trial)
78 | model.fit(X_train, y_train, shuffle=True, batch_size=BATCHSIZE, epochs=EPOCHS, verbose=False)
79 |
80 | return model.evaluate(X_valid, y_valid, verbose=0)
81 |
82 |
83 | if __name__ == "__main__":
84 | mlflow.set_experiment("optuna-example")
85 |
86 | study = optuna.create_study(study_name='keras-optuna')
87 | study.optimize(objective, n_trials=10, timeout=600, callbacks=[mlflow_callback])
88 |
89 | print("Number of finished trials: {}".format(len(study.trials)))
90 |
91 | print("Best trial:")
92 | trial = study.best_trial
93 |
94 | print(" Value: {}".format(trial.value))
95 |
96 | print(" Params: ")
97 | for key, value in trial.params.items():
98 | print(" {}: {}".format(key, value))
99 |
100 | mlflow.set_experiment("sklearn-example")
101 | with mlflow.start_run():
102 | mlflow.set_tag("developer", DEVELOPER_NAME)
103 |
104 | # Preprocess dataset
105 | X, y = load_wine(return_X_y=True)
106 | scaler = StandardScaler()
107 | X = scaler.fit_transform(X)
108 | X_train, X_test, y_train, y_test = train_test_split(
109 | X, y, test_size=TEST_SIZE, random_state=42
110 | )
111 |
112 | # Log artifacts
113 | pickle.dump(scaler, open("scaler.pkl", "wb"))
114 | mlflow.log_artifact("scaler.pkl", artifact_path='preprocessing')
115 |
116 | # Log hyperparameters
117 | mlflow.log_params(trial.params)
118 |
119 | model = create_model(X.shape[1], trial)
120 | model.fit(
121 | X_train, y_train,
122 | shuffle=True,
123 | batch_size=BATCHSIZE,
124 | epochs=EPOCHS,
125 | verbose=False
126 | )
127 |
128 | print(model.evaluate(X_test, y_test, verbose=0))
129 |
130 |
131 | y_pred = model.predict(X_test)
132 |
133 | def eval_metrics(actual, pred):
134 | rmse = np.sqrt(mean_squared_error(actual, pred))
135 | mae = mean_absolute_error(actual, pred)
136 | r2 = r2_score(actual, pred)
137 | return rmse, mae, r2
138 |
139 | # Evaluate trained model
140 | (rmse, mae, r2) = eval_metrics(y_test, y_pred)
141 |
142 | # Log metrics
143 | mlflow.log_metrics({
144 | 'rmse': rmse,
145 | 'mae': mae,
146 | 'r2': r2
147 | })
148 |
149 | mlflow.keras.log_model(model, artifact_path='model')
150 |
--------------------------------------------------------------------------------
/src/2_keras_example.py:
--------------------------------------------------------------------------------
1 | """Trains and evaluate a simple MLP
2 | on the Reuters newswire topic classification task.
3 | """
4 | import numpy as np
5 | import keras
6 | from keras.datasets import reuters
7 | from keras.models import Sequential
8 | from keras.layers import Dense, Dropout, Activation
9 | from keras.preprocessing.text import Tokenizer
10 | from dotenv import load_dotenv
11 |
12 | # The following import and function call are the only additions to code required
13 | # to automatically log metrics and parameters to MLflow.
14 | import mlflow.keras
15 |
16 | load_dotenv()
17 |
18 | experiment_name = "keras-example"
19 | mlflow.set_experiment(experiment_name)
20 |
21 | mlflow.keras.autolog()
22 |
23 | max_words = 1000
24 | batch_size = 32
25 | epochs = 5
26 |
27 | print("Loading data...")
28 | (x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words, test_split=0.2)
29 |
30 | print(len(x_train), "train sequences")
31 | print(len(x_test), "test sequences")
32 |
33 | num_classes = np.max(y_train) + 1
34 | print(num_classes, "classes")
35 |
36 | print("Vectorizing sequence data...")
37 | tokenizer = Tokenizer(num_words=max_words)
38 | x_train = tokenizer.sequences_to_matrix(x_train, mode="binary")
39 | x_test = tokenizer.sequences_to_matrix(x_test, mode="binary")
40 | print("x_train shape:", x_train.shape)
41 | print("x_test shape:", x_test.shape)
42 |
43 | print("Convert class vector to binary class matrix " "(for use with categorical_crossentropy)")
44 | y_train = keras.utils.to_categorical(y_train, num_classes)
45 | y_test = keras.utils.to_categorical(y_test, num_classes)
46 | print("y_train shape:", y_train.shape)
47 | print("y_test shape:", y_test.shape)
48 |
49 | print("Building model...")
50 | model = Sequential()
51 | model.add(Dense(512, input_shape=(max_words,)))
52 | model.add(Activation("relu"))
53 | model.add(Dropout(0.5))
54 | model.add(Dense(num_classes))
55 | model.add(Activation("softmax"))
56 |
57 | model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
58 |
59 | history = model.fit(
60 | x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1
61 | )
62 | score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
63 | print("Test score:", score[0])
64 | print("Test accuracy:", score[1])
65 |
--------------------------------------------------------------------------------
/src/4_mlproject_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Downloads the MovieLens dataset, ETLs it into Parquet, trains an
3 | ALS model, and uses the ALS model to train a Keras neural network.
4 | See README.rst for more details.
5 | """
6 |
7 | import click
8 | import os
9 | from dotenv import load_dotenv
10 |
11 | import mlflow
12 | from mlflow.utils import mlflow_tags
13 | from mlflow.entities import RunStatus
14 | from mlflow.utils.logging_utils import eprint
15 |
16 | from mlflow.tracking.fluent import _get_experiment_id
17 |
18 | load_dotenv()
19 |
20 | experiment_name = "pipeline-example"
21 | mlflow.set_experiment(experiment_name)
22 |
23 | def _run(entrypoint: str, parameters: dict, project_dir: str = '../mlflow-project/'):
24 | """Launches an entry point by providing the given parameters."""
25 |
26 | print("Launching new run for entrypoint=%s and parameters=%s" % (entrypoint, parameters))
27 | submitted_run = mlflow.run(project_dir, entrypoint, parameters=parameters, storage_dir='../data')
28 |
29 | client = mlflow.tracking.MlflowClient()
30 | return client.get_run(submitted_run.run_id)
31 |
32 |
33 | @click.command()
34 | @click.option("--als-max-iter", default=10, type=int)
35 | @click.option("--keras-hidden-units", default=20, type=int)
36 | @click.option("--max-row-limit", default=100000, type=int)
37 | def mlflow_pipeline(als_max_iter, keras_hidden_units, max_row_limit):
38 | # Note: The entrypoint names are defined in MLproject. The artifact directories
39 | # are documented by each step's .py file.
40 | with mlflow.start_run() as active_run:
41 | os.environ["SPARK_CONF_DIR"] = os.path.abspath(".")
42 |
43 | load_raw_data_run = _run("load_raw_data", {})
44 | ratings_csv_uri = os.path.join(load_raw_data_run.info.artifact_uri, "ratings-csv-dir")
45 |
46 | etl_data_run = _run("etl_data", {"ratings_csv": ratings_csv_uri, "max_row_limit": max_row_limit})
47 | ratings_parquet_uri = os.path.join(etl_data_run.info.artifact_uri, "ratings-parquet-dir")
48 |
49 | als_run = _run("als", {"ratings_data": ratings_parquet_uri, "max_iter": str(als_max_iter)})
50 | als_model_uri = os.path.join(als_run.info.artifact_uri, "als-model")
51 |
52 | keras_params = {
53 | "ratings_data": ratings_parquet_uri,
54 | "als_model_uri": als_model_uri,
55 | "hidden_units": keras_hidden_units,
56 | }
57 | _run("train_keras", keras_params)
58 |
59 |
60 | if __name__ == "__main__":
61 | mlflow_pipeline()
62 |
--------------------------------------------------------------------------------