├── .gitignore
├── .graphqlconfig
├── .run
├── pytest for test_pytorch_methods.test_torch_load.run.xml
├── pytest for test_pytorch_methods.test_torch_load_sync.run.xml
├── pytest for test_pytorch_methods.test_torch_save.run.xml
├── pytest in test_pytorch_methods.py.run.xml
└── train.run.xml
├── LICENSE.md
├── MANIFEST.in
├── Makefile
├── README
├── README.md
├── VERSION
├── dash_server_specs
├── .gitignore
├── __init__.py
├── conftest.py
├── create_experiments.py
└── test_ml_dash.py
├── docs
├── .gitignore
├── Makefile
├── authors.rst
├── conf.py
├── contributing.rst
├── history.rst
├── index.rst
├── installation.rst
├── make.bat
├── readme.rst
├── requirements.txt
└── usage.rst
├── figures
├── example_log_output.png
├── hyperparameter-column.gif
├── logger_color_output.png
├── logging_images.png
├── ml-dash-v0.1.0.png
├── ml-dash-v3.gif
├── ml_visualization_dashboard_preview.png
├── tensorboard_example.png
└── visualization_column.png
├── ml_dash
├── .gitignore
├── __init__.py
├── app.py
├── config.py
├── example.py
├── file_events.py
├── file_handlers.py
├── file_utils.py
├── file_watcher.py
├── mime_types.py
├── schema
│ ├── __init__.py
│ ├── directories.py
│ ├── experiments.py
│ ├── files
│ │ ├── __init__.py
│ │ ├── file_helpers.py
│ │ ├── images.py
│ │ ├── metrics.py
│ │ ├── parameters.py
│ │ ├── series.py
│ │ └── videos.py
│ ├── helpers.py
│ ├── projects.py
│ ├── schema_helpers.py
│ └── users.py
├── server.py
└── sse.py
├── notes
├── ML-Dash Enhancement Plans.md
├── README.md
├── client design doc.md
├── dashboard design doc.md
└── setting_up_dash_server.md
├── requirements-dev.txt
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
3 | __generated__
4 | .pytest_cache
5 | # Created by .ignore support plugin (hsz.mobi)
6 | ### JetBrains template
7 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
8 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
9 |
10 | # pytest output directories
11 | test-logs
12 |
13 | # User-specific stuff:
14 | .idea
15 | runs
16 |
17 |
18 | ## File-based project format:
19 | *.iws
20 |
21 | ## Plugin-specific files:
22 |
23 | # IntelliJ
24 | /out/
25 |
26 | # mpeltonen/sbt-idea plugin
27 | .idea_modules/
28 |
29 | # JIRA plugin
30 | atlassian-ide-plugin.xml
31 |
32 | # Crashlytics plugin (for Android Studio and IntelliJ)
33 | com_crashlytics_export_strings.xml
34 | crashlytics.properties
35 | crashlytics-build.properties
36 | fabric.properties
37 | ### Python template
38 | # Byte-compiled / optimized / DLL files
39 | __pycache__/
40 | *.py[cod]
41 | *$py.class
42 |
43 | # C extensions
44 | *.so
45 |
46 | # Distribution / packaging
47 | .Python
48 | env/
49 | build/
50 | develop-eggs/
51 | dist/
52 | downloads/
53 | eggs/
54 | .eggs/
55 | #lib/
56 | lib64/
57 | parts/
58 | sdist/
59 | var/
60 | wheels/
61 | *.egg-info/
62 | .installed.cfg
63 | *.egg
64 |
65 | # PyInstaller
66 | # Usually these files are written by a python script from a template
67 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
68 | *.manifest
69 | *.spec
70 |
71 | # Installer logs
72 | pip-log.txt
73 | pip-delete-this-directory.txt
74 |
75 | # Unit test / coverage reports
76 | htmlcov/
77 | .tox/
78 | .coverage
79 | .coverage.*
80 | .cache
81 | nosetests.xml
82 | coverage.xml
83 | *,cover
84 | .hypothesis/
85 |
86 | # Translations
87 | *.mo
88 | *.pot
89 |
90 | # Django stuff:
91 | *.log
92 | local_settings.py
93 |
94 | # Flask stuff:
95 | instance/
96 | .webassets-cache
97 |
98 | # Scrapy stuff:
99 | .scrapy
100 |
101 | # Sphinx documentation
102 | docs/_build/
103 |
104 | # PyBuilder
105 | target/
106 |
107 | # Jupyter Notebook
108 | .ipynb_checkpoints
109 |
110 | # pyenv
111 | .python-version
112 |
113 | # celery beat schedule file
114 | celerybeat-schedule
115 |
116 | # SageMath parsed files
117 | *.sage.py
118 |
119 | # dotenv
120 | .env
121 |
122 | # virtualenv
123 | .venv
124 | venv/
125 | ENV/
126 |
127 | # Spyder project settings
128 | .spyderproject
129 |
130 | # Rope project settings
131 | .ropeproject
132 |
133 |
--------------------------------------------------------------------------------
/.graphqlconfig:
--------------------------------------------------------------------------------
1 | {
2 | "schemaPath": "ml-dash-client/schema.graphql",
3 | "extensions": {
4 | "endpoints": {
5 | "http://localhost:8091/graphql": {
6 | "url": "http://localhost:8091/graphql",
7 | "headers": {
8 | "user-agent": "JS GraphQL"
9 | }
10 | }
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/.run/pytest for test_pytorch_methods.test_torch_load.run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/.run/pytest for test_pytorch_methods.test_torch_load_sync.run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/.run/pytest for test_pytorch_methods.test_torch_save.run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/.run/pytest in test_pytorch_methods.py.run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/.run/train.run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | BSD License
2 |
3 | For ML-Logger software
4 |
5 | Copyright (c) 2017-present, Ge Yang. All rights reserved.
6 |
7 | Redistribution and use in source and binary forms, with or without modification,
8 | are permitted provided that the following conditions are met:
9 |
10 | * Redistributions of source code must retain the above copyright notice, this
11 | list of conditions and the following disclaimer.
12 |
13 | * Redistributions in binary form must reproduce the above copyright notice,
14 | this list of conditions and the following disclaimer in the documentation
15 | and/or other materials provided with the distribution.
16 |
17 | * Neither the name Ge Yang nor the names of its contributors may be used to
18 | endorse or promote products derived from this software without specific
19 | prior written permission.
20 |
21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
22 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
25 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
28 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include VERSION
2 | recursive-include ml_dash/client-dist *.*
3 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: build
2 | # shell option to use extended glob from from https://stackoverflow.com/a/6922447/1560241
3 | SHELL:=/bin/bash -O extglob
4 |
5 | VERSION=`< VERSION`
6 |
7 | author=$(Ge Yang)
8 | author_email=$(yangge1987@gmail.com)
9 |
10 | # notes on python packaging: http://python-packaging.readthedocs.io/en/latest/minimal.html
11 | default: publish release
12 | build:
13 | rm -rf dist
14 | python setup.py sdist
15 | dev:
16 | make build
17 | pip install --ignore-installed dist/ml_dash*.whl
18 | convert-rst:
19 | pandoc -s README.md -o README --to=rst
20 | sed -i '' 's/code/code-block/g' README
21 | sed -i '' 's/\.\. code-block:: log/.. code-block:: text/g' README
22 | sed -i '' 's/\.\//https\:\/\/github\.com\/episodeyang\/ml_logger\/tree\/master\/ml-dash-server\//g' README
23 | perl -p -i -e 's/\.(jpg|png|gif)/.$$1?raw=true/' README
24 | rst-lint README
25 | resize: # from https://stackoverflow.com/a/28221795/1560241
26 | echo ./figures/!(*resized).jpg
27 | convert ./figures/!(*resized).jpg -resize 888x1000 -set filename:f '%t' ./figures/'%[filename:f]_resized.jpg'
28 | update-doc: convert-rst
29 | python setup.py sdist upload
30 | release:
31 | git tag v$(VERSION) -m '$(msg)'
32 | git push origin --tags
33 | publish: convert-rst
34 | make test
35 | make build
36 | twine upload dist/*
37 | publish-no-test: convert-rst
38 | make build
39 | twine upload dist/*
40 | start: # dev start: use sanic to bootstrap.
41 | source activate playground && python -m ml_dash.main --host=0.0.0.0 --port=8081 --workers=4 --logdir="tests/runs"
42 | test:
43 | python -m pytest dash_server_specs/test_ml_dash.py --capture=no
44 |
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | ML-Dash, A Beautiful Visualization Dashboard for ML
2 | ===================================================
3 |
4 | |Downloads|
5 |
6 | *For detailed codumentation, see*
7 | `ml-dash-tutorial `__
8 |
9 | ML-dash replaces visdom and tensorboard. It allows you to see real-time
10 | updates, review 1000+ of experiments quickly, and dive in-depth into
11 | individual experiments with minimum mental effort.
12 |
13 | - **Parallel Coordinates**
14 | - **Aggregating Over Multiple Runs (with different seeds)**
15 | - **Preview Videos, ``matplotlib`` figures, and images.**
16 |
17 | Usage
18 | -----
19 |
20 | To make sure you **install** the newest version of ``ml_dash``:
21 |
22 | .. code-block:: bash
23 |
24 | conda install pycurl
25 | pip install ml-logger ml-dash --upgrade --no-cache
26 |
27 | Just doing this would not work. The landscape of python modules is a lot
28 | messier than that of javascript. The most up-to-date graphene requires
29 | the following versioned dependencies:
30 |
31 | .. code-block:: zsh
32 |
33 | pip install sanic==20.9.0
34 | pip install sanic-cors==0.10.0.post3
35 | pip install sanic-graphql==1.1.0
36 | yes | pip install graphene==2.1.3
37 | yes | pip install graphql-core==2.1
38 | yes | pip install graphql-relay==0.4.5
39 | yes | pip install graphql-server-core==1.1.1
40 |
41 | There are two servers:
42 |
43 | 1. a server that serves the static web-application files ``ml_dash.app``
44 |
45 | This is just a static server that serves the web application client.
46 |
47 | To run this:
48 |
49 | .. code-block:: bash
50 |
51 | python -m ml_dash.app
52 |
53 | 2. the visualization backend ``ml_dash.server``
54 |
55 | This server usually lives on your logging server. It offers a
56 | ``graphQL`` API backend for the dashboard client.
57 |
58 | .. code-block:: bash
59 |
60 | python -m ml_dash.server --logdir=my/folder
61 |
62 | **Note: the server accepts requests from ``localhost`` only by
63 | default for safety reasons.** To overwrite this, see the
64 | documentation here:
65 | `ml-dash-tutorial `__
66 |
67 | Implementation Notes
68 | ~~~~~~~~~~~~~~~~~~~~
69 |
70 | See `https://github.com/episodeyang/ml_logger/tree/master/ml-dash-server/notes/README.md `__
71 |
72 | .. |Downloads| image:: http://pepy.tech/badge/ml-dash
73 | :target: http://pepy.tech/project/ml-dash
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ML-Dash, A Beautiful Visualization Dashboard for ML
2 |
3 | [](http://pepy.tech/project/ml-dash)
4 |
5 |
6 |
7 | *For detailed codumentation, see [ml-dash-tutorial]*
8 |
9 | [ml-dash-tutorial]: https://ml-logger.readthedocs.io/en/latest/setting_up.html#ml-dash-tutorial
10 |
11 | ML-dash replaces visdom and tensorboard. It allows you to see real-time updates, review 1000+
12 | of experiments quickly, and dive in-depth into individual experiments with minimum mental effort.
13 |
14 | - **Parallel Coordinates**
15 | - **Aggregating Over Multiple Runs (with different seeds)**
16 | - **Preview Videos, `matplotlib` figures, and images.**
17 |
18 | ## Usage
19 |
20 | To make sure you **install** the newest version of `ml_dash`:
21 |
22 | ```bash
23 | conda install pycurl
24 | pip install ml-logger ml-dash --upgrade --no-cache
25 | ```
26 |
27 | Just doing this would not work. The landscape of python modules is a lot messier than that of javascript. The most up-to-date graphene requires the following versioned dependencies:
28 |
29 | ```zsh
30 | pip install sanic==20.9.0
31 | pip install sanic-cors==0.10.0.post3
32 | pip install sanic-graphql==1.1.0
33 | yes | pip install graphene==2.1.3
34 | yes | pip install graphql-core==2.1
35 | yes | pip install graphql-relay==0.4.5
36 | yes | pip install graphql-server-core==1.1.1
37 | ```
38 |
39 | There are two servers:
40 |
41 | 1. a server that serves the static web-application files `ml_dash.app`
42 |
43 | This is just a static server that serves the web application client.
44 |
45 | To run this:
46 |
47 | ```bash
48 | python -m ml_dash.app
49 | ```
50 |
51 | 2. the visualization backend `ml_dash.server`
52 |
53 | This server usually lives on your logging server. It offers a `graphQL`
54 | API backend for the dashboard client.
55 |
56 | ```bash
57 | python -m ml_dash.server --logdir=my/folder
58 | ```
59 |
60 | **Note: the server accepts requests from `localhost` only by default
61 | for safety reasons.** To overwrite this, see the documentation here:
62 | [ml-dash-tutorial]
63 |
64 |
65 | ### Implementation Notes
66 |
67 | See [./notes/README.md](./notes/README.md)
68 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 0.3.27
2 |
--------------------------------------------------------------------------------
/dash_server_specs/.gitignore:
--------------------------------------------------------------------------------
1 | runs
2 |
--------------------------------------------------------------------------------
/dash_server_specs/__init__.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | from termcolor import cprint
4 |
5 |
6 | def setup_yaml():
7 | """ https://stackoverflow.com/a/8661021 """
8 | import yaml
9 |
10 | def represent_dict_order(self, data):
11 | return self.represent_mapping('tag:yaml.org,2002:map', data.items())
12 |
13 | yaml.add_representer(OrderedDict, represent_dict_order)
14 |
15 | cprint('yaml is setup', 'green')
16 |
17 |
18 | setup_yaml()
19 |
20 |
21 | def show(obj):
22 | import yaml
23 | print(yaml.dump(obj))
24 |
25 |
26 | def shows(obj):
27 | import yaml
28 | return yaml.dump(obj)
29 |
--------------------------------------------------------------------------------
/dash_server_specs/conftest.py:
--------------------------------------------------------------------------------
1 | from os.path import expanduser
2 |
3 | TEST_LOG_DIR = expanduser('~/ml-logger-debug')
4 |
5 |
6 | def pytest_addoption(parser):
7 | parser.addoption('--logdir',
8 | action='store',
9 | default=TEST_LOG_DIR,
10 | help="The logging path for the test.")
11 |
--------------------------------------------------------------------------------
/dash_server_specs/create_experiments.py:
--------------------------------------------------------------------------------
1 | from os.path import expanduser
2 |
3 | import numpy as np
4 |
5 | from ml_logger import logger
6 |
7 | DEBUG_DIR = expanduser('~/ml-logger-debug')
8 |
9 | if __name__ == "__main__":
10 | from scipy.misc import face
11 |
12 | fn = lambda x: np.random.rand() + (1 + 0.001 * x) * np.sin(x * 0.1 / np.pi)
13 | fn_1 = lambda x: np.random.rand() + (1 + 0.001 * x) * np.sin(x * 0.04 / np.pi)
14 |
15 | for username in ["episodeyang", "amyzhang"]:
16 | for project in ['cpc-belief', 'playground']:
17 | for i in range(2):
18 | prefix = f"{username}/{project}/{'mdp/' if i < 5 else '/'}experiment_{i:02d}"
19 | logger.remove(prefix)
20 |
21 | logger.configure(root=DEBUG_DIR, prefix=prefix)
22 |
23 | logger.log_params(Args=dict(lr=10 ** (-2 - i),
24 | weight_decay=0.001,
25 | gradient_clip=0.9,
26 | env_id="GoalMassDiscreteIdLess-v0",
27 | seed=int(i * 100)))
28 | for ep in range(50 + 1):
29 | logger.log_metrics(epoch=ep, sine=fn(ep), slow_sine=fn_1(ep))
30 | logger.flush()
31 | if ep % 10 == 0:
32 | logger.save_image(face('gray'), f"figures/gray_{ep:04d}.png")
33 | logger.save_image(face('rgb'), f"figures/rgb_{ep:04d}.png")
34 |
35 | logger.save_image(face('gray'), "figures/face_gray.png")
36 | logger.save_image(face('rgb'), "figures/face_rgb.png")
37 |
38 | with logger.PrefixContext(f"runs/{username}/{project}"):
39 | logger.log_line("# Root Files\n", file="RAEDME.md")
40 |
41 | # import numpy as np
42 | # import matplotlib.pyplot as plt
43 | #
44 | # xs = np.arange(500)
45 | # ys = (1 + 0.001 * xs) * np.sin(xs * 0.1 / np.pi)
46 | # ys += np.random.rand(*ys.shape) * 1
47 | # plt.plot(xs, ys)
48 | # plt.show()
49 | #
50 | # import numpy as np
51 | # import matplotlib.pyplot as plt
52 | #
53 | # xs = np.arange(500)
54 | # ys = (1 + 0.001 * xs) * np.sin(xs * 0.02 / np.pi)
55 | # ys += np.random.rand(*ys.shape) * 1
56 | # plt.plot(xs, ys)
57 | # plt.show()
58 |
--------------------------------------------------------------------------------
/dash_server_specs/test_ml_dash.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from graphene.test import Client
3 | from graphql_relay import to_global_id
4 | from ml_dash.schema import schema
5 | from dash_server_specs import show, shows
6 |
7 |
8 | @pytest.fixture(scope='session')
9 | def log_dir(request):
10 | return request.config.getoption('--logdir')
11 |
12 |
13 | def test_delete_text_file(log_dir):
14 | from ml_dash.config import Args
15 | Args.logdir = log_dir
16 | client = Client(schema)
17 | query = """
18 | mutation AppMutation ($id: ID!) {
19 | deleteFile (input: {
20 | id: $id,
21 | clientMutationId: "10",
22 | }) { ok }
23 | }
24 | """
25 | path = "/episodeyang/cpc-belief/README.md"
26 | r = client.execute(query, variables=dict(id=to_global_id("File", path)))
27 |
28 | if 'errors' in r:
29 | raise RuntimeError("\n" + shows(r['errors']))
30 | else:
31 | print(">>")
32 | show(r['data'])
33 |
34 |
35 | def test_mutate_text_file(log_dir):
36 | from ml_dash.config import Args
37 | Args.logdir = log_dir
38 | client = Client(schema)
39 | query = """
40 | mutation AppMutation ($id: ID!) {
41 | updateText (input: {
42 | id: $id,
43 | text: "new text!!\\n1\\n2\\n3\\n4\\n5\\n6",
44 | clientMutationId: "10",
45 | }) {
46 | file { id name text (stop:5) }
47 | }
48 | }
49 | """
50 | path = "/episodeyang/cpc-belief/README.md"
51 | r = client.execute(query, variables=dict(id=to_global_id("File", path)))
52 |
53 | if 'errors' in r:
54 | raise RuntimeError("\n" + shows(r['errors']))
55 | else:
56 | print(">>")
57 | show(r['data'])
58 |
59 |
60 | def test_mutate_json_file(log_dir):
61 | from ml_dash.config import Args
62 | Args.logdir = log_dir
63 | client = Client(schema)
64 | query = """
65 | mutation AppMutation ($id: ID!) {
66 | updateJson (input: {
67 | id: $id,
68 | data: {text: "hey", key: 10},
69 | clientMutationId: "10",
70 | }) {
71 | file { id name json text yaml }
72 | }
73 | }
74 | """
75 | path = "/episodeyang/cpc-belief/README.md"
76 | r = client.execute(query, variables=dict(id=to_global_id("File", path)))
77 |
78 | if 'errors' in r:
79 | raise RuntimeError("\n" + shows(r['errors']))
80 | else:
81 | print(">>")
82 | show(r['data'])
83 |
84 |
85 | def test_mutate_yaml_file(log_dir):
86 | from ml_dash.config import Args
87 | Args.logdir = log_dir
88 | client = Client(schema)
89 | query = """
90 | mutation AppMutation ($id: ID!) {
91 | updateYaml (input: {
92 | id: $id,
93 | data: {text: "hey", key: 10, charts: [0, 1]},
94 | clientMutationId: "10",
95 | }) {
96 | file { id name text yaml }
97 | }
98 | }
99 | """
100 | path = "/episodeyang/cpc-belief/README.md"
101 | r = client.execute(query, variables=dict(id=to_global_id("File", path)))
102 |
103 | if 'errors' in r:
104 | raise RuntimeError("\n" + shows(r['errors']))
105 | else:
106 | print(">>")
107 | show(r['data'])
108 |
109 |
110 | def test_glob_files(log_dir):
111 | from ml_dash.config import Args
112 | Args.logdir = log_dir
113 | client = Client(schema)
114 | query = """
115 | query AppQuery ($cwd: String!, $query: String) {
116 | glob ( cwd: $cwd, query: $query) { id name path }
117 | }
118 | """
119 | path = "/episodeyang/cpc-belief/mdp/experiment_00/"
120 | r = client.execute(query, variables=dict(cwd=path, query="figures/*.png"))
121 | if 'errors' in r:
122 | raise RuntimeError(r['errors'])
123 | else:
124 | print(">>")
125 | show(r['data'])
126 |
127 |
128 | def test_experiment(log_dir):
129 | from ml_dash.config import Args
130 | Args.logdir = log_dir
131 | client = Client(schema)
132 | query = """
133 | query AppQuery ($id: ID!) {
134 | experiment ( id: $id ) {
135 | id
136 | name
137 | parameters { id name }
138 | files (first:10) {
139 | edges {
140 | node {
141 | id name path
142 | }
143 | }
144 | }
145 | directories (first:10) {
146 | edges {
147 | node {
148 | id name path
149 | }
150 | }
151 | }
152 | }
153 | }
154 | """
155 | path = "/episodeyang/cpc-belief/mdp/experiment_01"
156 | r = client.execute(query, variables=dict(id=to_global_id("Experiment", path)))
157 | if 'errors' in r:
158 | raise RuntimeError(r['errors'])
159 | else:
160 | print(">>")
161 | show(r['data'])
162 |
163 |
164 | def test_directory(log_dir):
165 | from ml_dash.config import Args
166 | Args.logdir = log_dir
167 | client = Client(schema)
168 | query = """
169 | query AppQuery ($id: ID!) {
170 | directory ( id: $id ) {
171 | id
172 | name
173 | path
174 | readme {
175 | id name path
176 | text(stop:11)
177 | }
178 | dashConfigs(first:10) {
179 | edges {
180 | node {
181 | id name
182 | path
183 | yaml
184 | text(stop:11)
185 | }
186 | }
187 | }
188 |
189 | charts(first:10) {
190 | edges {
191 | node {
192 | id name
193 | dir
194 | path
195 | yaml
196 | text(stop:11)
197 | }
198 | }
199 | }
200 |
201 | directories (first:10) {
202 | edges {
203 | node {
204 | id name path
205 | directories (first:10) {
206 | edges {
207 | node {
208 | id name
209 | }
210 | }
211 | }
212 | }
213 | }
214 | }
215 | experiments (first:10) {
216 | edges { node {
217 | id name path
218 | parameters {keys flat}
219 | files (first:10) { edges { node { id, name} } }
220 | } }
221 | }
222 | }
223 | }
224 | """
225 | path = "/episodeyang/cpc-belief/mdp"
226 | r = client.execute(query, variables=dict(id=to_global_id("Directory", path)))
227 | if 'errors' in r:
228 | raise RuntimeError(r['errors'])
229 | else:
230 | print(">>")
231 | show(r['data'])
232 |
233 |
234 | # todo: add chunked loading for the text field. Necessary for long log files.
235 | def test_reac_text_file(log_dir):
236 | from ml_dash.config import Args
237 | Args.logdir = log_dir
238 | client = Client(schema)
239 | query = """
240 | query AppQuery ($id: ID!) {
241 | file ( id: $id) {
242 | id name text
243 | }
244 | }
245 | """
246 | path = "/episodeyang/cpc-belief/README.md"
247 | r = client.execute(query, variables=dict(id=to_global_id("File", path)))
248 | if 'errors' in r:
249 | raise RuntimeError(r['errors'])
250 | else:
251 | print(">>")
252 | show(r['data'])
253 |
254 |
255 | def test_series_2(log_dir):
256 | query = """
257 | query LineChartsQuery(
258 | $prefix: String
259 | $xKey: String
260 | $yKey: String
261 | $yKeys: [String]
262 | $metricsFiles: [String]!
263 | ) {
264 | series(metricsFiles: $metricsFiles, prefix: $prefix, k: 10, xKey: $xKey, yKey: $yKey, yKeys: $yKeys) {
265 | id
266 | xKey
267 | yKey
268 | xData
269 | yMean
270 | yCount
271 | }
272 | }
273 | """
274 | variables = {"prefix": None, "xKey": "epoch", "yKey": "slow_sine", "yKeys": None,
275 | "metricsFiles": ["/episodeyang/cpc-belief/mdp/experiment_01/metrics.pkl"]}
276 |
277 | from ml_dash.config import Args
278 | Args.logdir = log_dir
279 | client = Client(schema)
280 | r = client.execute(query, variables=variables)
281 | if 'errors' in r:
282 | raise RuntimeError(r['errors'])
283 | else:
284 | print(">>")
285 | show(r['data'])
286 |
287 | assert r['data']['series']['xData'].__len__() == 10
288 |
289 |
290 | def test_series_x_limit(log_dir):
291 | query = """
292 | query LineChartsQuery(
293 | $prefix: String
294 | $xKey: String
295 | $yKey: String
296 | $yKeys: [String]
297 | $metricsFiles: [String]!
298 | ) {
299 | series(
300 | metricsFiles: $metricsFiles,
301 | prefix: $prefix,
302 | k: 10,
303 | xLow: 41,
304 | xKey: $xKey,
305 | yKey: $yKey,
306 | yKeys: $yKeys
307 | ) {
308 | id
309 | xKey
310 | yKey
311 | xData
312 | yMean
313 | yCount
314 | }
315 | }
316 | """
317 |
318 | from ml_dash.config import Args
319 | Args.logdir = log_dir
320 | client = Client(schema)
321 |
322 | variables = {"prefix": None, "xKey": "epoch", "yKey": "slow_sine", "yKeys": None,
323 | "metricsFiles": ["/episodeyang/cpc-belief/mdp/experiment_01/metrics.pkl"]}
324 | r = client.execute(query, variables=variables)
325 | print(r['data']['series']['xData'])
326 | assert r['data']['series']['xData'].__len__() == 10
327 |
328 | variables = {"prefix": None, "yKey": "slow_sine", "yKeys": None,
329 | "metricsFiles": ["/episodeyang/cpc-belief/mdp/experiment_01/metrics.pkl"]}
330 | r = client.execute(query, variables=variables)
331 | print(r['data']['series']['xData'])
332 | assert r['data']['series']['xData'].__len__() == 10
333 |
334 |
335 | def test_series_last(log_dir):
336 | query = """
337 | query LastMetricQuery(
338 | $yKey: String
339 | $last: Int
340 | $metricsFiles: [String]!
341 | ) {
342 | series(metricsFiles: $metricsFiles, k: 1, yKey: $yKey, tail: $last) {
343 | id
344 | yKey
345 | yMean
346 | yCount
347 | }
348 | }
349 | """
350 | variables = {"yKey": "slow_sine",
351 | "last": 10,
352 | "metricsFiles": ["/episodeyang/cpc-belief/mdp/experiment_01/metrics.pkl"]}
353 |
354 | from ml_dash.config import Args
355 | Args.logdir = log_dir
356 | client = Client(schema)
357 | r = client.execute(query, variables=variables)
358 | if 'errors' in r:
359 | raise RuntimeError(r['errors'])
360 | else:
361 | print(">>")
362 | show(r['data'])
363 | assert not not r['data']['series']['yMean'], "the yMean should NOT be empty"
364 | assert not not r['data']['series']['yCount'] == [10.0]
365 |
366 |
367 | # can we do the average first?
368 | def test_series_group(log_dir):
369 | from ml_dash.config import Args
370 | Args.logdir = log_dir
371 | client = Client(schema)
372 | query = """
373 | query AppQuery {
374 | series (
375 | k:30
376 | tail: 100
377 | xLow: 25
378 | prefix:"/episodeyang/cpc-belief/mdp"
379 | metricsFiles:["experiment_00/metrics.pkl", "experiment_01/metrics.pkl", "experiment_02/metrics.pkl"]
380 | xKey: "epoch"
381 | yKeys: ["sine", "slow_sine"]
382 | # xAlign: "start"
383 | ) {
384 | id
385 | xKey
386 | yKey
387 | yKeys
388 | xData
389 | yMean
390 | yMedian
391 | y25pc
392 | y75pc
393 | y05pc
394 | y95pc
395 | yMedian
396 | yCount
397 | }
398 | }
399 | """
400 | r = client.execute(query, variables=dict(username="episodeyang"))
401 | if 'errors' in r:
402 | raise RuntimeError(r['errors'])
403 | else:
404 | print(">>")
405 | show(r['data'])
406 |
407 |
408 | def test_metric(log_dir):
409 | from ml_dash.config import Args
410 | Args.logdir = log_dir
411 | client = Client(schema)
412 | query = """
413 | query AppQuery {
414 | metrics(id:"TWV0cmljczovZXBpc29kZXlhbmcvY3BjLWJlbGllZi9tZHAvZXhwZXJpbWVudF8wMC9tZXRyaWNzLnBrbA==" ) {
415 | id
416 | keys
417 | path
418 | value (keys: ["sine"])
419 | }
420 | }
421 | """
422 | r = client.execute(query, variables=dict(username="episodeyang"))
423 | if 'errors' in r:
424 | raise RuntimeError(r['errors'])
425 | else:
426 | print(">>")
427 | show(r['data'])
428 |
429 |
430 | def test_schema(log_dir):
431 | from ml_dash.config import Args
432 | Args.logdir = log_dir
433 | client = Client(schema)
434 | query = """
435 | query AppQuery ($username:String) {
436 | user (username: $username) {
437 | username
438 | name
439 | projects(first: 1) {
440 | edges {
441 | node {
442 | id
443 | name
444 | experiments(first:10) { edges { node {
445 | id
446 | name
447 | parameters {value keys flat raw}
448 | metrics {
449 | id
450 | keys
451 | value (keys: ["sine"])
452 | }
453 | } } }
454 | directories(first:10){
455 | edges {
456 | node {
457 | id
458 | name
459 | files(first:10){
460 | edges {
461 | node { name }
462 | }
463 | }
464 | directories(first:10){
465 | edges {
466 | node { name }
467 | }
468 | }
469 | }
470 | }
471 | }
472 | }
473 | }
474 | }
475 | }
476 | }
477 | """
478 | r = client.execute(query, variables=dict(username="episodeyang"))
479 | if 'errors' in r:
480 | raise RuntimeError(r['errors'])
481 | else:
482 | _ = r['data']['user']
483 | show(_)
484 |
485 |
486 | def test_node(log_dir):
487 | from ml_dash.config import Args
488 | Args.logdir = log_dir
489 | client = Client(schema)
490 | query = """
491 | query AppQuery ($id:ID!) {
492 | node (id: $id) {
493 | id
494 | ... on File {
495 | name
496 | text
497 | }
498 | }
499 | }
500 | """
501 | path = "/episodeyang/cpc-belief/README.md"
502 | r = client.execute(query, variables=dict(id=to_global_id("File", path)))
503 | if 'errors' in r:
504 | raise RuntimeError(r['errors'])
505 | else:
506 | print(">>")
507 | show(r['data'])
508 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | _build
2 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
21 |
22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
23 |
24 | help:
25 | @echo "Please use \`make ' where is one of"
26 | @echo " html to make standalone HTML files"
27 | @echo " dirhtml to make HTML files named index.html in directories"
28 | @echo " singlehtml to make a single large HTML file"
29 | @echo " pickle to make pickle files"
30 | @echo " json to make JSON files"
31 | @echo " htmlhelp to make HTML files and a HTML help project"
32 | @echo " qthelp to make HTML files and a qthelp project"
33 | @echo " devhelp to make HTML files and a Devhelp project"
34 | @echo " epub to make an epub"
35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
36 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
38 | @echo " text to make text files"
39 | @echo " man to make manual pages"
40 | @echo " texinfo to make Texinfo files"
41 | @echo " info to make Texinfo files and run them through makeinfo"
42 | @echo " gettext to make PO message catalogs"
43 | @echo " changes to make an overview of all changed/added/deprecated items"
44 | @echo " xml to make Docutils-native XML files"
45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
46 | @echo " linkcheck to check all external links for integrity"
47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
48 |
49 | clean:
50 | rm -rf $(BUILDDIR)/*
51 |
52 | html:
53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
54 | @echo
55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 |
57 | dirhtml:
58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
59 | @echo
60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
61 |
62 | singlehtml:
63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
64 | @echo
65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
66 |
67 | pickle:
68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
69 | @echo
70 | @echo "Build finished; now you can process the pickle files."
71 |
72 | json:
73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
74 | @echo
75 | @echo "Build finished; now you can process the JSON files."
76 |
77 | htmlhelp:
78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
79 | @echo
80 | @echo "Build finished; now you can run HTML Help Workshop with the" \
81 | ".hhp project file in $(BUILDDIR)/htmlhelp."
82 |
83 | qthelp:
84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
85 | @echo
86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ReadtheDocsTemplate.qhcp"
89 | @echo "To view the help file:"
90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ReadtheDocsTemplate.qhc"
91 |
92 | devhelp:
93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
94 | @echo
95 | @echo "Build finished."
96 | @echo "To view the help file:"
97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/ReadtheDocsTemplate"
98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ReadtheDocsTemplate"
99 | @echo "# devhelp"
100 |
101 | epub:
102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
103 | @echo
104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
105 |
106 | latex:
107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
108 | @echo
109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
111 | "(use \`make latexpdf' here to do that automatically)."
112 |
113 | latexpdf:
114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
115 | @echo "Running LaTeX files through pdflatex..."
116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
118 |
119 | latexpdfja:
120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
121 | @echo "Running LaTeX files through platex and dvipdfmx..."
122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
124 |
125 | text:
126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
127 | @echo
128 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
129 |
130 | man:
131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
132 | @echo
133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
134 |
135 | texinfo:
136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
137 | @echo
138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
139 | @echo "Run \`make' in that directory to run these through makeinfo" \
140 | "(use \`make info' here to do that automatically)."
141 |
142 | info:
143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
144 | @echo "Running Texinfo files through makeinfo..."
145 | make -C $(BUILDDIR)/texinfo info
146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
147 |
148 | gettext:
149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
150 | @echo
151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
152 |
153 | changes:
154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
155 | @echo
156 | @echo "The overview file is in $(BUILDDIR)/changes."
157 |
158 | linkcheck:
159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
160 | @echo
161 | @echo "Link check complete; look for any errors in the above output " \
162 | "or in $(BUILDDIR)/linkcheck/output.txt."
163 |
164 | doctest:
165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
166 | @echo "Testing of doctests in the sources finished, look at the " \
167 | "results in $(BUILDDIR)/doctest/output.txt."
168 |
169 | xml:
170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
171 | @echo
172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
173 |
174 | pseudoxml:
175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
176 | @echo
177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
178 |
--------------------------------------------------------------------------------
/docs/authors.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../AUTHORS.rst
2 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import sys
5 |
6 | sys.path.insert(0, os.path.abspath('../'))
7 | # extensions = []
8 | # templates_path = ['_templates']
9 | source_suffix = '.rst'
10 | # source_encoding = 'utf-8-sig'
11 | master_doc = 'index'
12 |
13 | # General information about the project.
14 | project = u'ML-Logger'
15 | copyright = u'2019, Ge Yang'
16 |
17 | # The short X.Y version.
18 | version = '1.0'
19 | # The full version, including alpha/beta/rc tags.
20 | release = '1.0'
21 |
22 | language = 'en'
23 |
24 | today_fmt = '%Y %B. %d'
25 |
26 | # List of patterns, relative to source directory, that match files and
27 | # directories to ignore when looking for source files.
28 | exclude_patterns = ['_build']
29 |
30 | extensions = [
31 | # 'recommonmark',
32 | 'sphinx.ext.autodoc',
33 | 'sphinx.ext.doctest',
34 | 'sphinx.ext.coverage',
35 | 'sphinx.ext.mathjax',
36 | 'sphinx.ext.viewcode',
37 | # 'sphinx.ext.githubpages',
38 | 'sphinx.ext.napoleon'
39 | ]
40 |
41 | # Add any paths that contain templates here, relative to this directory.
42 | templates_path = [
43 | '_templates'
44 | ]
45 | # html_theme = 'alabaster'
46 |
47 | # The reST default role (used for this markup: `text`) to use for all
48 | # documents.
49 | # default_role = None
50 |
51 | # If true, '()' will be appended to :func: etc. cross-reference text.
52 | # add_function_parentheses = True
53 |
54 | # If true, the current module name will be prepended to all description
55 | # unit titles (such as .. function::).
56 | # add_module_names = True
57 |
58 | # If true, sectionauthor and moduleauthor directives will be shown in the
59 | # output. They are ignored by default.
60 | # show_authors = False
61 |
62 | # The name of the Pygments (syntax highlighting) style to use.
63 | pygments_style = 'sphinx'
64 |
65 | # A list of ignored prefixes for module index sorting.
66 | # modindex_common_prefix = []
67 |
68 | # If true, keep warnings as "system message" paragraphs in the built documents.
69 | # keep_warnings = False
70 |
71 |
72 | # html_theme = 'default'
73 | import guzzle_sphinx_theme
74 |
75 | html_theme_path = guzzle_sphinx_theme.html_theme_path()
76 | html_theme = 'guzzle_sphinx_theme'
77 |
78 | # Register the theme as an extension to generate a sitemap.xml
79 | extensions.append("guzzle_sphinx_theme")
80 |
81 | # Guzzle theme options (see theme.conf for more information)
82 | html_theme_options = {
83 |
84 | # # Set the path to a special layout to include for the homepage
85 | # "index_template": "special_index.html",
86 |
87 | # Set the name of the project to appear in the left sidebar.
88 | "project_nav_name": "Project Name",
89 |
90 | # # Set your Disqus short name to enable comments
91 | # "disqus_comments_shortname": "my_disqus_comments_short_name",
92 | #
93 | # # Set you GA account ID to enable tracking
94 | # "google_analytics_account": "my_ga_account",
95 | #
96 | # # Path to a touch icon
97 | # "touch_icon": "",
98 | #
99 | # # Specify a base_url used to generate sitemap.xml links. If not
100 | # # specified, then no sitemap will be built.
101 | # "base_url": "",
102 | #
103 | # # Allow a separate homepage from the master_doc
104 | # "homepage": "index",
105 |
106 | # # Allow the project link to be overriden to a custom URL.
107 | # "projectlink": "http://myproject.url",
108 | #
109 | # # Visible levels of the global TOC; -1 means unlimited
110 | # "globaltoc_depth": 3,
111 | #
112 | # # If False, expand all TOC entries
113 | # "globaltoc_collapse": False,
114 | #
115 | # # If True, show hidden TOC entries
116 | # "globaltoc_includehidden": False,
117 | }
118 |
119 | # import sphinx_readable_theme
120 | #
121 | # html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
122 | # html_theme = 'readable'
123 |
124 | # html_theme_options = {}
125 |
126 | # Add any paths that contain custom themes here, relative to this directory.
127 | # html_theme_path = []
128 |
129 | # The name for this set of Sphinx documents. If None, it defaults to
130 | # " v documentation".
131 | # html_title = None
132 |
133 | # A shorter title for the navigation bar. Default is the same as html_title.
134 | # html_short_title = None
135 |
136 | # The name of an image file (relative to this directory) to place at the top
137 | # of the sidebar.
138 | # html_logo = None
139 |
140 | # The name of an image file (within the static path) to use as favicon of the
141 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
142 | # pixels large.
143 | # html_favicon = None
144 |
145 | # Add any paths that contain custom static files (such as style sheets) here,
146 | # relative to this directory. They are copied after the builtin static files,
147 | # so a file named "default.css" will overwrite the builtin "default.css".
148 | html_static_path = ['_static']
149 |
150 | # Add any extra paths that contain custom files (such as robots.txt or
151 | # .htaccess) here, relative to this directory. These files are copied
152 | # directly to the root of the documentation.
153 | # html_extra_path = []
154 |
155 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
156 | # using the given strftime format.
157 | # html_last_updated_fmt = '%b %d, %Y'
158 |
159 | # If true, SmartyPants will be used to convert quotes and dashes to
160 | # typographically correct entities.
161 | # html_use_smartypants = True
162 |
163 | # Custom sidebar templates, maps document names to template names.
164 | # html_sidebars = {}
165 |
166 | # Additional templates that should be rendered to pages, maps page names to
167 | # template names.
168 | # html_additional_pages = {}
169 |
170 | # If false, no module index is generated.
171 | # html_domain_indices = True
172 |
173 | # If false, no index is generated.
174 | # html_use_index = True
175 |
176 | # If true, the index is split into individual pages for each letter.
177 | # html_split_index = False
178 |
179 | # If true, links to the reST sources are added to the pages.
180 | # html_show_sourcelink = True
181 |
182 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
183 | # html_show_sphinx = True
184 |
185 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
186 | # html_show_copyright = True
187 |
188 | # If true, an OpenSearch description file will be output, and all pages will
189 | # contain a tag referring to it. The value of this option must be the
190 | # base URL from which the finished HTML is served.
191 | # html_use_opensearch = ''
192 |
193 | # This is the file name suffix for HTML files (e.g. ".xhtml").
194 | # html_file_suffix = None
195 |
196 | # Output file base name for HTML help builder.
197 | htmlhelp_basename = 'ReadtheDocsTemplatedoc'
198 |
199 | # -- Options for LaTeX output ---------------------------------------------
200 |
201 | latex_elements = {
202 | # The paper size ('letterpaper' or 'a4paper').
203 | # 'papersize': 'letterpaper',
204 |
205 | # The font size ('10pt', '11pt' or '12pt').
206 | # 'pointsize': '10pt',
207 |
208 | # Additional stuff for the LaTeX preamble.
209 | # 'preamble': '',
210 | }
211 |
212 | # Grouping the document tree into LaTeX files. List of tuples
213 | # (source start file, target name, title,
214 | # author, documentclass [howto, manual, or own class]).
215 | latex_documents = [
216 | ('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
217 | u'Read the Docs', 'manual'),
218 | ]
219 |
220 | # The name of an image file (relative to this directory) to place at the top of
221 | # the title page.
222 | # latex_logo = None
223 |
224 | # For "manual" documents, if this is true, then toplevel headings are parts,
225 | # not chapters.
226 | # latex_use_parts = False
227 |
228 | # If true, show page references after internal links.
229 | # latex_show_pagerefs = False
230 |
231 | # If true, show URL addresses after external links.
232 | # latex_show_urls = False
233 |
234 | # Documents to append as an appendix to all manuals.
235 | # latex_appendices = []
236 |
237 | # If false, no module index is generated.
238 | # latex_domain_indices = True
239 |
240 |
241 | # -- Options for manual page output ---------------------------------------
242 |
243 | # One entry per manual page. List of tuples
244 | # (source start file, name, description, authors, manual section).
245 | man_pages = [
246 | ('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
247 | [u'Read the Docs'], 1)
248 | ]
249 |
250 | # If true, show URL addresses after external links.
251 | # man_show_urls = False
252 |
253 |
254 | # -- Options for Texinfo output -------------------------------------------
255 |
256 | # Grouping the document tree into Texinfo files. List of tuples
257 | # (source start file, target name, title, author,
258 | # dir menu entry, description, category)
259 | texinfo_documents = [
260 | ('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
261 | u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
262 | 'Miscellaneous'),
263 | ]
264 |
265 | # Documents to append as an appendix to all manuals.
266 | # texinfo_appendices = []
267 |
268 | # If false, no module index is generated.
269 | # texinfo_domain_indices = True
270 |
271 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
272 | # texinfo_show_urls = 'footnote'
273 |
274 | # If true, do not generate a @detailmenu in the "Top" node's menu.
275 | # texinfo_no_detailmenu = False
276 |
277 |
--------------------------------------------------------------------------------
/docs/contributing.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../CONTRIBUTING.rst
2 |
--------------------------------------------------------------------------------
/docs/history.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../HISTORY.rst
2 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to ML-Logger!
2 | =====================================
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 | :glob:
7 |
8 | .. automodule:: ml_logger.ml_logger
9 | :members:
10 |
11 |
12 |
13 | Indices and tables
14 | ==================
15 |
16 | * :ref:`genindex`
17 | * :ref:`modindex`
18 | * :ref:`search`
19 |
--------------------------------------------------------------------------------
/docs/installation.rst:
--------------------------------------------------------------------------------
1 | ============
2 | Installation
3 | ============
4 |
5 | Install the package with pip::
6 |
7 | $ pip install read-the-docs-template
8 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | REM Command file for Sphinx documentation
4 |
5 | if "%SPHINXBUILD%" == "" (
6 | set SPHINXBUILD=sphinx-build
7 | )
8 | set BUILDDIR=_build
9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
10 | set I18NSPHINXOPTS=%SPHINXOPTS% .
11 | if NOT "%PAPER%" == "" (
12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
14 | )
15 |
16 | if "%1" == "" goto help
17 |
18 | if "%1" == "help" (
19 | :help
20 | echo.Please use `make ^` where ^ is one of
21 | echo. html to make standalone HTML files
22 | echo. dirhtml to make HTML files named index.html in directories
23 | echo. singlehtml to make a single large HTML file
24 | echo. pickle to make pickle files
25 | echo. json to make JSON files
26 | echo. htmlhelp to make HTML files and a HTML help project
27 | echo. qthelp to make HTML files and a qthelp project
28 | echo. devhelp to make HTML files and a Devhelp project
29 | echo. epub to make an epub
30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
31 | echo. text to make text files
32 | echo. man to make manual pages
33 | echo. texinfo to make Texinfo files
34 | echo. gettext to make PO message catalogs
35 | echo. changes to make an overview over all changed/added/deprecated items
36 | echo. xml to make Docutils-native XML files
37 | echo. pseudoxml to make pseudoxml-XML files for display purposes
38 | echo. linkcheck to check all external links for integrity
39 | echo. doctest to run all doctests embedded in the documentation if enabled
40 | goto end
41 | )
42 |
43 | if "%1" == "clean" (
44 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
45 | del /q /s %BUILDDIR%\*
46 | goto end
47 | )
48 |
49 |
50 | %SPHINXBUILD% 2> nul
51 | if errorlevel 9009 (
52 | echo.
53 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
54 | echo.installed, then set the SPHINXBUILD environment variable to point
55 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
56 | echo.may add the Sphinx directory to PATH.
57 | echo.
58 | echo.If you don't have Sphinx installed, grab it from
59 | echo.http://sphinx-doc.org/
60 | exit /b 1
61 | )
62 |
63 | if "%1" == "html" (
64 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
65 | if errorlevel 1 exit /b 1
66 | echo.
67 | echo.Build finished. The HTML pages are in %BUILDDIR%/html.
68 | goto end
69 | )
70 |
71 | if "%1" == "dirhtml" (
72 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
73 | if errorlevel 1 exit /b 1
74 | echo.
75 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
76 | goto end
77 | )
78 |
79 | if "%1" == "singlehtml" (
80 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
81 | if errorlevel 1 exit /b 1
82 | echo.
83 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
84 | goto end
85 | )
86 |
87 | if "%1" == "pickle" (
88 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
89 | if errorlevel 1 exit /b 1
90 | echo.
91 | echo.Build finished; now you can process the pickle files.
92 | goto end
93 | )
94 |
95 | if "%1" == "json" (
96 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
97 | if errorlevel 1 exit /b 1
98 | echo.
99 | echo.Build finished; now you can process the JSON files.
100 | goto end
101 | )
102 |
103 | if "%1" == "htmlhelp" (
104 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
105 | if errorlevel 1 exit /b 1
106 | echo.
107 | echo.Build finished; now you can run HTML Help Workshop with the ^
108 | .hhp project file in %BUILDDIR%/htmlhelp.
109 | goto end
110 | )
111 |
112 | if "%1" == "qthelp" (
113 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
114 | if errorlevel 1 exit /b 1
115 | echo.
116 | echo.Build finished; now you can run "qcollectiongenerator" with the ^
117 | .qhcp project file in %BUILDDIR%/qthelp, like this:
118 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\complexity.qhcp
119 | echo.To view the help file:
120 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\complexity.ghc
121 | goto end
122 | )
123 |
124 | if "%1" == "devhelp" (
125 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
126 | if errorlevel 1 exit /b 1
127 | echo.
128 | echo.Build finished.
129 | goto end
130 | )
131 |
132 | if "%1" == "epub" (
133 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
134 | if errorlevel 1 exit /b 1
135 | echo.
136 | echo.Build finished. The epub file is in %BUILDDIR%/epub.
137 | goto end
138 | )
139 |
140 | if "%1" == "latex" (
141 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
142 | if errorlevel 1 exit /b 1
143 | echo.
144 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
145 | goto end
146 | )
147 |
148 | if "%1" == "latexpdf" (
149 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
150 | cd %BUILDDIR%/latex
151 | make all-pdf
152 | cd %BUILDDIR%/..
153 | echo.
154 | echo.Build finished; the PDF files are in %BUILDDIR%/latex.
155 | goto end
156 | )
157 |
158 | if "%1" == "latexpdfja" (
159 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
160 | cd %BUILDDIR%/latex
161 | make all-pdf-ja
162 | cd %BUILDDIR%/..
163 | echo.
164 | echo.Build finished; the PDF files are in %BUILDDIR%/latex.
165 | goto end
166 | )
167 |
168 | if "%1" == "text" (
169 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
170 | if errorlevel 1 exit /b 1
171 | echo.
172 | echo.Build finished. The text files are in %BUILDDIR%/text.
173 | goto end
174 | )
175 |
176 | if "%1" == "man" (
177 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
178 | if errorlevel 1 exit /b 1
179 | echo.
180 | echo.Build finished. The manual pages are in %BUILDDIR%/man.
181 | goto end
182 | )
183 |
184 | if "%1" == "texinfo" (
185 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
186 | if errorlevel 1 exit /b 1
187 | echo.
188 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
189 | goto end
190 | )
191 |
192 | if "%1" == "gettext" (
193 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
194 | if errorlevel 1 exit /b 1
195 | echo.
196 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
197 | goto end
198 | )
199 |
200 | if "%1" == "changes" (
201 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
202 | if errorlevel 1 exit /b 1
203 | echo.
204 | echo.The overview file is in %BUILDDIR%/changes.
205 | goto end
206 | )
207 |
208 | if "%1" == "linkcheck" (
209 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
210 | if errorlevel 1 exit /b 1
211 | echo.
212 | echo.Link check complete; look for any errors in the above output ^
213 | or in %BUILDDIR%/linkcheck/output.txt.
214 | goto end
215 | )
216 |
217 | if "%1" == "doctest" (
218 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
219 | if errorlevel 1 exit /b 1
220 | echo.
221 | echo.Testing of doctests in the sources finished, look at the ^
222 | results in %BUILDDIR%/doctest/output.txt.
223 | goto end
224 | )
225 |
226 | if "%1" == "xml" (
227 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
228 | if errorlevel 1 exit /b 1
229 | echo.
230 | echo.Build finished. The XML files are in %BUILDDIR%/xml.
231 | goto end
232 | )
233 |
234 | if "%1" == "pseudoxml" (
235 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
236 | if errorlevel 1 exit /b 1
237 | echo.
238 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
239 | goto end
240 | )
241 |
242 | :end
243 |
--------------------------------------------------------------------------------
/docs/readme.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../README.rst
2 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | sphinx-rtd-theme
3 | sphinxcontrib-napoleon
4 | sphinxcontrib-websupport
5 |
6 | joblib==0.12.2
7 | PyPrind
8 |
9 | # copied from setup.py
10 | numpy
11 | scipy
12 | python_dateutil
13 | cloudpickle
14 | dill
15 | imageio
16 | more-itertools
17 | numpy
18 | pillow
19 | params-proto
20 | requests
21 | requests-futures
22 | ruamel.yaml
23 | sanic
24 | sanic-cors
25 | termcolor
26 | typing
27 | urllib3
28 | # sphinx-readable-theme
29 | guzzle_sphinx_theme
30 |
31 |
--------------------------------------------------------------------------------
/docs/usage.rst:
--------------------------------------------------------------------------------
1 | ========
2 | Usage
3 | ========
4 |
5 | To use this template, simply update it::
6 |
7 | import read-the-docs-template
8 |
--------------------------------------------------------------------------------
/figures/example_log_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/example_log_output.png
--------------------------------------------------------------------------------
/figures/hyperparameter-column.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/hyperparameter-column.gif
--------------------------------------------------------------------------------
/figures/logger_color_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/logger_color_output.png
--------------------------------------------------------------------------------
/figures/logging_images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/logging_images.png
--------------------------------------------------------------------------------
/figures/ml-dash-v0.1.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/ml-dash-v0.1.0.png
--------------------------------------------------------------------------------
/figures/ml-dash-v3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/ml-dash-v3.gif
--------------------------------------------------------------------------------
/figures/ml_visualization_dashboard_preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/ml_visualization_dashboard_preview.png
--------------------------------------------------------------------------------
/figures/tensorboard_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/tensorboard_example.png
--------------------------------------------------------------------------------
/figures/visualization_column.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/figures/visualization_column.png
--------------------------------------------------------------------------------
/ml_dash/.gitignore:
--------------------------------------------------------------------------------
1 | client-dist
2 |
--------------------------------------------------------------------------------
/ml_dash/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/ml_dash/__init__.py
--------------------------------------------------------------------------------
/ml_dash/app.py:
--------------------------------------------------------------------------------
1 | import os
2 | from sanic import Sanic
3 | from sanic.exceptions import FileNotFound
4 | from sanic.response import file
5 | from params_proto import ParamsProto, Proto
6 |
7 | # gets current directory
8 | BASE = os.path.realpath(__file__)
9 | build_path = os.path.join(os.path.dirname(BASE), "client-dist")
10 |
11 | app = Sanic("ml_dash.app")
12 | # serve js file for webpack
13 | app.static('/', build_path)
14 |
15 |
16 | @app.route('/')
17 | @app.exception(FileNotFound)
18 | async def index(request, exception=None):
19 | print('hey ====', [exception])
20 | return await file(build_path + '/index.html')
21 |
22 |
23 | class AppServerArgs(ParamsProto):
24 | """
25 | Configuration Arguments for the Sanic App that serves
26 | the static web-application.
27 |
28 | [Usage]
29 |
30 | To launch the web-app client, do
31 |
32 | python -m ml_dash.app port=3001 host=0.0.0.0 workers=4 debug=True
33 | """
34 | host = Proto("", help="use 0.0.0.0 if you want external clients to be able to access this.")
35 | port = Proto(3001, help="the port")
36 | workers = Proto(1, help="the number of worker processes")
37 | debug = False
38 | access_log = True
39 |
40 |
41 | if __name__ == '__main__':
42 | import socket
43 | from termcolor import cprint, colored as c
44 |
45 | if AppServerArgs.host:
46 | from requests import get
47 |
48 | host_ip = get('https://api.ipify.org').text
49 | del get
50 | ip_string = f"""
51 | Local: {c(f'http://localhost:{AppServerArgs.port}/', 'green')}
52 | External Ip: {c(f'http://{host_ip}:{AppServerArgs.port}/', 'green')}"""
53 | else:
54 | ip_string = f"""
55 | Local: {c(f'http://localhost:{AppServerArgs.port}/', 'green')}"""
56 | # try:
57 | # hostname = socket.gethostname()
58 | # host_ip = socket.gethostbyname(hostname)
59 | # except Exception as e:
60 | # host_ip = ""
61 |
62 | print(f"""
63 | You can now view {c('ml-dash client', 'white')} in the browser.
64 | {ip_string}
65 |
66 | To update to the newer version, do
67 | {c('~>', 'blue')} {c('pip install --upgrade ml-dash', 'red')}
68 |
69 | """)
70 | app.run(**vars(AppServerArgs))
71 |
--------------------------------------------------------------------------------
/ml_dash/config.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from params_proto import ParamsProto, Proto, Flag
4 |
5 |
6 | class Args(ParamsProto):
7 | """
8 | ML-Dash
9 | -------
10 |
11 | This module contains `ml_dash.server`, the visualization backend, and `ml_dash.app`, a
12 | static server hosting the web application.
13 |
14 | Usage
15 | -----
16 |
17 | python -m ml_dash.server --port 8090 --host 0.0.0.0 --workers 10
18 |
19 | """
20 | logdir = Proto(os.path.realpath("."), help="the root directory for all of the logs")
21 |
22 |
23 | class ServerArgs(ParamsProto):
24 | host = Proto("", help="use 0.0.0.0 if you want external clients to be able to access this.")
25 | port = Proto(8081, help="the port")
26 | workers = Proto(1, help="the number of worker processes")
27 | debug = Flag("use to turn on debug mode")
28 |
29 |
30 | class SSLArgs(ParamsProto):
31 | cert = Proto(None, dtype=str, help="the path to the SSL certificate")
32 | key = Proto(None, dtype=str, help="the path to the SSL key")
33 |
--------------------------------------------------------------------------------
/ml_dash/example.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dash-ml/dash-server/daf2c50b570906ffa8fbdf312ac839e6a3b9697f/ml_dash/example.py
--------------------------------------------------------------------------------
/ml_dash/file_events.py:
--------------------------------------------------------------------------------
1 | from hachiko.hachiko import AIOEventHandler, AIOWatchdog
2 | from asyncio import coroutine, Queue, sleep
3 | from sanic import response
4 | from sanic.exceptions import RequestTimeout
5 |
6 | from ml_dash.file_utils import path_match
7 | from termcolor import cprint
8 |
9 | from . import config
10 | import json
11 |
12 | subscriptions = []
13 | watcher = None
14 |
15 |
16 | class Handler(AIOEventHandler):
17 | def __init__(self, *args, **kwargs):
18 | super().__init__(*args, **kwargs)
19 |
20 | @coroutine
21 | async def on_any_event(self, event):
22 | _event = dict(src_path=event.src_path, event_type=event.event_type, is_directory=event.is_directory)
23 | for que in subscriptions:
24 | await que.put(_event)
25 | # self._loop.create_task(que.put(event))
26 |
27 |
28 | def setup_watch_queue(app, loop):
29 | print('setting up watch queue')
30 | start_watcher()
31 | cprint('watcher setup complete!', "green")
32 |
33 |
34 | def start_watcher():
35 | global watcher
36 |
37 | handler = Handler()
38 | print('starting file watcher...')
39 | watcher = AIOWatchdog(config.Args.logdir, event_handler=handler)
40 | watcher.start()
41 | print('watcher start is complete')
42 |
43 |
44 | import os
45 |
46 |
47 | # server does not have access to a disconnect event.
48 | # currently subscriptions only grows.
49 | # Will add timeout based cleanup after.
50 | async def file_events(request, file_path="", query="*"):
51 | q = Queue()
52 | subscriptions.append(q)
53 |
54 | async def streaming_fn(response):
55 | try:
56 | while True:
57 | print('subscription que started')
58 | file_event = await q.get()
59 | src_path = file_event['src_path']
60 | if src_path.startswith(os.path.join(config.Args.logdir, file_path)) and path_match(file_path, query):
61 | file_event['src_path'] = src_path[len(config.Args.logdir):]
62 | print("=>>", file_event)
63 | response.write(f"data: {json.dumps(file_event)}\r\n\r\n".encode())
64 | sleep(0.1)
65 | # todo: this timeout doesn't really work.
66 | # todo: also add handling of stream is terminated logic (separate from above).
67 | except RequestTimeout:
68 | subscriptions.remove(q)
69 |
70 | return response.stream(streaming_fn, content_type="text/event-stream")
71 | # subscriptions.remove(q)
72 |
--------------------------------------------------------------------------------
/ml_dash/file_handlers.py:
--------------------------------------------------------------------------------
1 | import os
2 | import stat
3 | from glob import iglob
4 | from shutil import rmtree
5 | from sanic import response
6 |
7 | from . import config
8 |
9 |
10 | def get_type(mode):
11 | if stat.S_ISDIR(mode) or stat.S_ISLNK(mode):
12 | type = 'dir'
13 | else:
14 | type = 'file'
15 | return type
16 |
17 |
18 | async def remove_path(request, file_path=""):
19 | print(file_path)
20 | path = os.path.join(config.Args.logdir, file_path)
21 | if os.path.isdir(path):
22 | rmtree(path)
23 | res = response.text("ok", status=204)
24 | elif os.path.isfile(path):
25 | os.remove(path)
26 | res = response.text("ok", status=204)
27 | else:
28 | res = response.text('Not found', status=404)
29 | return res
30 |
31 |
32 | from contextlib import contextmanager
33 |
34 |
35 | @contextmanager
36 | def cwdContext(path):
37 | owd = os.getcwd()
38 | os.chdir(path)
39 | try:
40 | yield
41 | finally:
42 | os.chdir(owd)
43 |
44 |
45 | async def batch_get_path(request):
46 | try:
47 | data = request.json
48 |
49 | file_paths = data['paths']
50 | options = data['options']
51 |
52 | batch_res_data = dict()
53 |
54 | if options.get('json', False):
55 | for path in file_paths:
56 | from ml_logger.helpers import load_from_pickle
57 | batch_res_data[path] = [_ for _ in load_from_pickle(path)]
58 |
59 | res = response.json(batch_res_data, status=200, content_type='application/json')
60 | return res
61 |
62 | except Exception as e:
63 | print('Exception: ', e)
64 | res = response.text('Internal Error' + str(e), status=502)
65 | return res
66 |
67 |
68 | async def get_path(request, file_path=""):
69 | print(file_path)
70 |
71 | as_records = request.args.get('records')
72 | as_json = request.args.get('json')
73 | as_log = request.args.get('log')
74 | as_attachment = int(request.args.get('download', '0'))
75 | is_recursive = request.args.get('recursive')
76 | show_hidden = request.args.get('hidden')
77 | query = request.args.get('query', "*").strip()
78 |
79 | _start = request.args.get('start', None)
80 | _stop = request.args.get('stop', None)
81 | start = None if _start is None else int(_start)
82 | stop = None if _stop is None else int(_stop)
83 |
84 | reservoir_k = int(request.args.get('reservoir', '200'))
85 |
86 | # limit for the search itself.
87 | search_limit = 500
88 |
89 | path = os.path.join(config.Args.logdir, file_path)
90 | print("=============>", [query], [path], os.path.isdir(path))
91 |
92 | if os.path.isdir(path):
93 | from itertools import islice
94 | with cwdContext(path):
95 | print(os.getcwd(), query, is_recursive)
96 | file_paths = list(islice(iglob(query, recursive=is_recursive), start or 0, stop or 200))
97 | files = map(file_stat, file_paths)
98 | res = response.json(files, status=200)
99 | elif os.path.isfile(path):
100 | if as_records:
101 | from ml_logger.helpers import load_pickle_as_dataframe
102 | df = load_pickle_as_dataframe(path, reservoir_k)
103 | res = response.text(df.to_json(orient="records"), status=200, content_type='application/json')
104 | elif as_log:
105 | from ml_logger.helpers import load_pickle_as_dataframe
106 | df = load_pickle_as_dataframe(path, reservoir_k)
107 | res = response.text(df.to_json(orient="records"), status=200, content_type='application/json')
108 | elif as_json:
109 | from ml_logger.helpers import load_from_pickle
110 | data = [_ for _ in load_from_pickle(path)]
111 | res = response.json(data, status=200, content_type='application/json')
112 | elif type(start) is int or type(stop) is int:
113 | from itertools import islice
114 | with open(path, 'r') as f:
115 | text = ''.join([l for l in islice(f, start, stop)])
116 | res = response.text(text, status=200)
117 | else:
118 | # todo: check the file handling here. Does this use correct
119 | # mimeType for text files?
120 | res = await response.file(path)
121 | if as_attachment:
122 | res.headers['Content-Disposition'] = 'attachment'
123 | else:
124 | res = response.text('Not found', status=404)
125 | return res
126 |
127 |
128 | # use glob! LOL
129 | def file_stat(file_path):
130 | # this looped over is very slow. Fine for a small list of files though.
131 | stat_res = os.stat(file_path)
132 | ft = get_type(stat_res.st_mode)
133 | sz = stat_res.st_size
134 | return dict(
135 | name=os.path.basename(file_path),
136 | path=file_path,
137 | mtime=stat_res.st_mtime,
138 | ctime=stat_res.st_ctime,
139 | type=ft,
140 | size=sz,
141 | )
142 |
--------------------------------------------------------------------------------
/ml_dash/file_utils.py:
--------------------------------------------------------------------------------
1 | def path_match(query, pattern):
2 | import glob, re
3 | regex = fnmatch.translate(pattern)
4 | reobj = re.compile(regex)
5 | return reobj.match(query)
6 |
--------------------------------------------------------------------------------
/ml_dash/file_watcher.py:
--------------------------------------------------------------------------------
1 | # from . import config
2 | import asyncio
3 | from hachiko.hachiko import AIOWatchdog
4 |
5 |
6 | class Handler:
7 | def dispatch(self, *args, **kwargs):
8 | print(args, kwargs)
9 |
10 | @asyncio.coroutine
11 | def watch_fs(path):
12 | watch = AIOWatchdog(path, event_handler=Handler())
13 | watch.start()
14 | while True:
15 | yield from asyncio.sleep(10)
16 | watch.stop()
17 |
18 |
19 |
20 | if __name__ == "__main__":
21 | # asyncio.get_event_loop().run_until_complete(watch_fs("/Users/ge/machine_learning/berkeley-playground/ins-runs"))
22 | # asyncio.get_event_loop().run_until_complete(watch_fs("."))
23 | path = "."
24 | watch = AIOWatchdog(path, event_handler=Handler())
25 | watch.start()
26 | import time
27 | print('watch is setup')
28 | while True:
29 | time.sleep(10)
30 |
31 |
--------------------------------------------------------------------------------
/ml_dash/mime_types.py:
--------------------------------------------------------------------------------
1 | ignored = ['.bzr', '$RECYCLE.BIN', '.DAV', '.DS_Store', '.git', '.hg', '.htaccess', '.htpasswd', '.Spotlight-V100',
2 | '.svn', '__MACOSX', 'ehthumbs.db', 'robots.txt', 'Thumbs.db', 'thumbs.tps']
3 | datatypes = {'audio': 'm4a,mp3,oga,ogg,webma,wav',
4 | 'archive': '7z,zip,rar,gz,tar',
5 | 'image': 'gif,ico,jpe,jpeg,jpg,png,svg,webp',
6 | 'pdf': 'pdf',
7 | 'quicktime': '3g2,3gp,3gp2,3gpp,mov,qt',
8 | 'source': 'atom,bat,bash,c,cmd,coffee,css,hml,js,json,java,less,markdown,md,php,pl,py,rb,rss,sass,scpt,swift,scss,sh,xml,yml,plist',
9 | 'text': 'txt',
10 | 'video': 'mp4,m4v,ogv,webm',
11 | 'website': 'htm,html,mhtm,mhtml,xhtm,xhtml'}
12 | icontypes = {'fa-music': 'm4a,mp3,oga,ogg,webma,wav',
13 | 'fa-archive': '7z,zip,rar,gz,tar',
14 | 'fa-picture-o': 'gif,ico,jpe,jpeg,jpg,png,svg,webp',
15 | 'fa-file-text': 'pdf',
16 | 'fa-film': '3g2,3gp,3gp2,3gpp,mov,qt',
17 | 'fa-code': 'atom,plist,bat,bash,c,cmd,coffee,css,hml,js,json,java,less,markdown,md,php,pl,py,rb,rss,sass,scpt,swift,scss,sh,xml,yml',
18 | 'fa-file-text-o': 'txt',
19 | 'fa-film': 'mp4,m4v,ogv,webm',
20 | 'fa-globe': 'htm,html,mhtm,mhtml,xhtm,xhtml'}
21 |
--------------------------------------------------------------------------------
/ml_dash/schema/__init__.py:
--------------------------------------------------------------------------------
1 | from graphene import relay, ObjectType, Float, Schema, List, String, Field, Int
2 | from ml_dash.schema.files.series import Series, get_series, SeriesArguments
3 | from ml_dash.schema.files.metrics import Metrics, get_metrics
4 | from ml_dash.schema.schema_helpers import bind, bind_args
5 | from ml_dash.schema.users import User, get_users, get_user
6 | from ml_dash.schema.projects import Project
7 | from ml_dash.schema.directories import Directory, get_directory
8 | from ml_dash.schema.files import File, FileConnection, MutateTextFile, MutateJSONFile, MutateYamlFile, \
9 | DeleteFile, DeleteDirectory, find_files_by_query
10 | # MutateJSONFile, MutateYamlFile
11 | from ml_dash.schema.experiments import Experiment
12 |
13 |
14 | class EditText(relay.ClientIDMutation):
15 | class Input:
16 | text = String(required=True, description='updated content for the text file')
17 |
18 | text = String(description="the updated content for the text file")
19 |
20 | @classmethod
21 | def mutate_and_get_payload(cls, root, info, text, ):
22 | return dict(text=text)
23 |
24 |
25 | class Query(ObjectType):
26 | node = relay.Node.Field()
27 | # context?
28 | # todo: files
29 | # todo: series
30 |
31 | users = Field(List(User), resolver=bind_args(get_users))
32 | user = Field(User, username=String(), resolver=bind_args(get_user))
33 | series = Field(Series, resolver=bind_args(get_series), **SeriesArguments)
34 |
35 | project = relay.Node.Field(Project)
36 | experiment = relay.Node.Field(Experiment)
37 | metrics = relay.Node.Field(Metrics)
38 | directory = relay.Node.Field(Directory)
39 | file = relay.Node.Field(File)
40 |
41 | glob = Field(List(File), cwd=String(required=True), query=String(), start=Int(), stop=Int(),
42 | resolver=bind_args(find_files_by_query))
43 |
44 |
45 | class Mutation(ObjectType):
46 | # todo: create_file
47 | # done: edit_file
48 | # done: remove_file
49 | # todo: move_file
50 | # todo: copy_file
51 |
52 | # do we need to have separate deleteDirectory? (look up relay client-side macros)
53 |
54 | delete_file = DeleteFile.Field()
55 | delete_directory = DeleteDirectory.Field()
56 | # update_text = EditText.Field()
57 | update_text = MutateTextFile.Field()
58 | update_json = MutateJSONFile.Field()
59 | update_yaml = MutateYamlFile.Field()
60 |
61 |
62 | schema = Schema(query=Query, mutation=Mutation)
63 |
--------------------------------------------------------------------------------
/ml_dash/schema/directories.py:
--------------------------------------------------------------------------------
1 | from os import listdir
2 | from os.path import isfile, join, split
3 | from graphene import ObjectType, relay, String, Field
4 | from ml_dash import schema
5 |
6 |
7 | class Directory(ObjectType):
8 | class Meta:
9 | interfaces = relay.Node,
10 |
11 | name = String(description='name of the directory')
12 | path = String(description='absolute path of the directory')
13 |
14 | readme = Field(lambda: schema.files.File)
15 |
16 | def resolve_readme(self, info, *args, **kwargs):
17 | # note: keep it simple, just use README for now.
18 | readmes = schema.files.find_files_by_query(cwd=self.id, query="README.md")
19 | return readmes[0] if readmes else None
20 |
21 | # deprecate this
22 | dash_configs = relay.ConnectionField(lambda: schema.files.FileConnection)
23 |
24 | def resolve_dash_configs(self, info, *args, **kwargs):
25 | return schema.files.find_files_by_query(cwd=self.id, query="*.dashcfg")
26 |
27 | charts = relay.ConnectionField(lambda: schema.files.FileConnection)
28 |
29 | def resolve_charts(self, info, *args, **kwargs):
30 | return schema.files.find_files_by_query(cwd=self.id, query="**/*.chart.yml")
31 |
32 | experiments = relay.ConnectionField(lambda: schema.experiments.ExperimentConnection)
33 |
34 | def resolve_experiments(self, info, first=None, **kwargs):
35 | if first is not None:
36 | return schema.experiments.find_experiments(cwd=self.id, stop=first)
37 | return schema.experiments.find_experiments(cwd=self.id)
38 |
39 | directories = relay.ConnectionField(lambda: schema.directories.DirectoryConnection)
40 |
41 | def resolve_directories(self, info, **kwargs):
42 | from ml_dash.config import Args
43 | root_dir = join(Args.logdir, self.id[1:])
44 | return [get_directory(join(self.id, _))
45 | for _ in listdir(root_dir) if not isfile(join(root_dir, _))]
46 |
47 | files = relay.ConnectionField(lambda: schema.files.FileConnection)
48 |
49 | def resolve_files(self, info, **kwargs):
50 | from ml_dash.config import Args
51 | root_dir = join(Args.logdir, self.id[1:])
52 | return [schema.files.File(id=join(self.id, _), name=_)
53 | for _ in listdir(root_dir) if isfile(join(root_dir, _))]
54 |
55 | @classmethod
56 | def get_node(cls, info, id):
57 | return get_directory(id)
58 |
59 |
60 | class DirectoryConnection(relay.Connection):
61 | class Meta:
62 | node = Directory
63 |
64 |
65 | def get_directory(id):
66 | _id = id.rstrip('/')
67 | return Directory(id=_id, name=split(_id[1:])[-1], path=_id)
68 |
--------------------------------------------------------------------------------
/ml_dash/schema/experiments.py:
--------------------------------------------------------------------------------
1 | from os import listdir
2 | from os.path import isfile, join, basename, realpath, isabs, split
3 |
4 | from graphene import ObjectType, relay, String, Field
5 | from ml_dash import schema
6 | from ml_dash.schema import files
7 | from ml_dash.schema.files.file_helpers import find_files
8 | from ml_dash.schema.files.metrics import find_metrics
9 | from ml_dash.schema.files.parameters import find_parameters
10 |
11 |
12 | class Experiment(ObjectType):
13 | class Meta:
14 | interfaces = relay.Node,
15 |
16 | name = String(description='name of the directory')
17 | path = String(description="path to the experiment")
18 |
19 | readme = Field(lambda: schema.files.File)
20 | parameters = Field(lambda: files.parameters.Parameters, )
21 | metrics = Field(lambda: files.metrics.Metrics)
22 |
23 | def resolve_readme(self, info, *args, **kwargs):
24 | # note: keep it simple, just use README for now.
25 | readmes = schema.files.find_files_by_query(cwd=self.id, query="README.md")
26 | return readmes[0] if readmes else None
27 |
28 | def resolve_parameters(self, info):
29 | # note: when called with wrong path, parasitically
30 | # slow b/c list all metric files.
31 | for p in find_parameters(self.id):
32 | return p
33 | return None
34 |
35 | def resolve_metrics(self, info):
36 | # note: when called with wrong path, parasitically
37 | # slow b/c list all metric files.
38 | for m in find_metrics(self.id):
39 | return m
40 | return None
41 |
42 | directories = relay.ConnectionField(lambda: schema.directories.DirectoryConnection)
43 | files = relay.ConnectionField(lambda: schema.files.FileConnection)
44 |
45 | def resolve_directories(self, info, **kwargs):
46 | from ml_dash.config import Args
47 | root_dir = join(Args.logdir, self.id[1:])
48 | return [schema.directories.get_directory(join(self.id, _))
49 | for _ in listdir(root_dir) if not isfile(join(root_dir, _))]
50 |
51 | def resolve_files(self, info, **kwargs):
52 | from ml_dash.config import Args
53 | root_dir = join(Args.logdir, self.id[1:])
54 | return [schema.files.File(id=join(self.id, _), name=_)
55 | for _ in listdir(root_dir) if isfile(join(root_dir, _))]
56 |
57 | @classmethod
58 | def get_node(cls, info, id):
59 | return get_experiment(id)
60 |
61 |
62 | class ExperimentConnection(relay.Connection):
63 | class Meta:
64 | node = Experiment
65 |
66 |
67 | def find_experiments(cwd, stop, **kwargs):
68 | """
69 | find all experiments
70 |
71 | :param cwd:
72 | :param start:
73 | :param stop:
74 | :return:
75 | """
76 | from ml_dash.config import Args
77 | assert isabs(cwd), "the current work directory need to be an absolute path."
78 | _cwd = realpath(join(Args.logdir, cwd[1:])).rstrip('/')
79 | parameter_files = find_files(_cwd, "**/parameters.pkl", stop=stop + 1, **kwargs)
80 | return [
81 | # note: not sure about the name.
82 | Experiment(id=join(cwd.rstrip('/'), p['dir']),
83 | name=basename(p['dir']) or ".",
84 | path=join(cwd.rstrip('/'), p['dir']),
85 | parameters=join(cwd.rstrip('/'), p['path']), )
86 | for p in parameter_files
87 | ]
88 |
89 |
90 | def get_experiment(id):
91 | _id = id.rstrip('/')
92 | return Experiment(id=_id, name=split(_id[1:])[-1], path=_id)
93 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from os.path import split, isabs, realpath, join, basename, dirname
3 | from graphene import ObjectType, relay, String, Int, Mutation, ID, Field, Node, Boolean
4 | from graphene.types.generic import GenericScalar
5 | from graphql_relay import from_global_id
6 | from ml_dash.schema.files.file_helpers import find_files
7 |
8 | from . import parameters, metrics
9 |
10 |
11 | class File(ObjectType):
12 | class Meta:
13 | interfaces = relay.Node,
14 |
15 | dir = String(description="the parent directory")
16 | name = String(description='name of the file')
17 | stem = String(description="stem of the file name")
18 | path = String(description='path to the file')
19 |
20 | def resolve_dir(self, info):
21 | return os.path.dirname(self.id)
22 |
23 | def resolve_stem(self, info, ):
24 | return self.name.split('.')[0]
25 |
26 | def resolve_path(self, info):
27 | return self.id
28 |
29 | text = String(description='text content of the file',
30 | start=Int(required=False, default_value=0),
31 | stop=Int(required=False, default_value=None))
32 |
33 | def resolve_text(self, info, start=0, stop=None):
34 | from ml_dash.config import Args
35 | try:
36 | with open(join(Args.logdir, self.id[1:]), "r") as f:
37 | lines = list(f)[start: stop]
38 | return "".join(lines)
39 | except FileNotFoundError:
40 | return None
41 |
42 | json = GenericScalar(description="the json content of the file")
43 |
44 | def resolve_json(self, info):
45 | import json
46 | try:
47 | from ml_dash.config import Args
48 | with open(join(Args.logdir, self.id[1:]), "r") as f:
49 | return json.load(f)
50 | except FileNotFoundError:
51 | return None
52 |
53 | yaml = GenericScalar(description="the content of the file using yaml")
54 |
55 | def resolve_yaml(self, info):
56 | import ruamel.yaml
57 | if ruamel.yaml.version_info < (0, 15):
58 | yaml = ruamel.yaml
59 | load_fn = yaml.safe_load
60 | else:
61 | from ruamel.yaml import YAML
62 | yaml = YAML()
63 | yaml.explict_start = True
64 | load_fn = yaml.load
65 |
66 | from ml_dash.config import Args
67 | try:
68 | with open(join(Args.logdir, self.id[1:]), "r") as f:
69 | return load_fn('\n'.join(f))
70 | except FileNotFoundError:
71 | return None
72 |
73 | @classmethod
74 | def get_node(cls, info, id):
75 | return get_file(id)
76 |
77 |
78 | class FileConnection(relay.Connection):
79 | class Meta:
80 | node = File
81 |
82 |
83 | def get_file(id):
84 | return File(id=id, name=basename(id), path=id)
85 |
86 |
87 | def find_files_by_query(cwd, query="**/*.*", **kwargs):
88 | from ml_dash.config import Args
89 | assert isabs(cwd), "the current work directory need to be an absolute path."
90 | _cwd = realpath(join(Args.logdir, cwd[1:])).rstrip('/')
91 | parameter_files = find_files(_cwd, query, **kwargs)
92 | return [
93 | # note: not sure about the name.
94 | File(id=join(cwd.rstrip('/'), p['path']),
95 | name=basename(p['path']),
96 | path=join(cwd.rstrip('/'), p['path']))
97 | for p in parameter_files
98 | ]
99 |
100 |
101 | def save_text_to_file(path, text):
102 | from ml_dash.config import Args
103 | assert isabs(path), "the path has to be absolute path."
104 | _path = join(Args.logdir, path[1:])
105 | with open(_path, "w") as f:
106 | f.write(text)
107 | return get_file(path)
108 |
109 |
110 | def save_yaml_to_file(path, data):
111 | from ml_dash.config import Args
112 | assert isabs(path), "the path has to be absolute path."
113 | _path = join(Args.logdir, path[1:])
114 | # note: assume all text format
115 | with open(_path, "w+") as f:
116 | import yaml
117 | _ = yaml.dump(data, f)
118 | return get_file(path)
119 |
120 |
121 | def save_json_to_file(path, data):
122 | from ml_dash.config import Args
123 | assert isabs(path), "the path has to be absolute path."
124 | _path = join(Args.logdir, path[1:])
125 | # note: assume all text format
126 | with open(_path, "w+") as f:
127 | import json
128 | _ = json.dumps(data, sort_keys=True, indent=2)
129 | f.write(_)
130 | return get_file(path)
131 |
132 |
133 | def remove_file(path):
134 | """remove does not work with directories"""
135 | from ml_dash.config import Args
136 | assert isabs(path), "the path has to be absolute path."
137 | _path = join(Args.logdir, path[1:])
138 | os.remove(_path)
139 |
140 |
141 | def remove_directory(path):
142 | """rmtree does not work with files"""
143 | import shutil
144 | from ml_dash.config import Args
145 | assert isabs(path), "the path has to be absolute path."
146 | _path = join(Args.logdir, path[1:])
147 | shutil.rmtree(_path)
148 |
149 |
150 | class MutateTextFile(relay.ClientIDMutation):
151 | class Input:
152 | id = ID()
153 | text = String(required=True)
154 |
155 | file = Field(File)
156 |
157 | @classmethod
158 | def mutate_and_get_payload(cls, root, info, id, text, client_mutation_id):
159 | _type, path = from_global_id(id)
160 | return MutateTextFile(file=save_text_to_file(path, text))
161 |
162 |
163 | class MutateYamlFile(relay.ClientIDMutation):
164 | """
165 | serializes the data to a yaml file format
166 | """
167 |
168 | class Input:
169 | id = ID()
170 | data = GenericScalar()
171 |
172 | file = Field(File)
173 |
174 | @classmethod
175 | def mutate_and_get_payload(self, root, info, id, data, client_mutation_id):
176 | _type, path = from_global_id(id)
177 | return MutateYamlFile(file=save_yaml_to_file(path, data))
178 |
179 |
180 | class MutateJSONFile(relay.ClientIDMutation):
181 | """
182 | serializes the data to a json file format
183 | """
184 |
185 | class Input:
186 | id = ID()
187 | data = GenericScalar()
188 |
189 | file = Field(File)
190 |
191 | @classmethod
192 | def mutate_and_get_payload(self, root, info, id, data, client_mutation_id):
193 | _type, path = from_global_id(id)
194 | return MutateJSONFile(file=save_json_to_file(path, data))
195 |
196 |
197 | class DeleteFile(relay.ClientIDMutation):
198 | class Input:
199 | id = ID()
200 |
201 | ok = Boolean()
202 | id = ID()
203 |
204 | @classmethod
205 | def mutate_and_get_payload(cls, root, info, id, client_mutation_id):
206 | _type, path = from_global_id(id)
207 | try:
208 | remove_file(path)
209 | return DeleteFile(ok=True, id=id)
210 | except FileNotFoundError:
211 | return DeleteFile(ok=False)
212 |
213 |
214 | class DeleteDirectory(relay.ClientIDMutation):
215 | class Input:
216 | id = ID()
217 |
218 | ok = Boolean()
219 | id = ID()
220 |
221 | @classmethod
222 | def mutate_and_get_payload(cls, root, info, id, client_mutation_id):
223 | _type, path = from_global_id(id)
224 | try:
225 | remove_directory(path)
226 | return DeleteDirectory(ok=True, id=id)
227 | except FileNotFoundError:
228 | return DeleteDirectory(ok=False)
229 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/file_helpers.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 | from glob import iglob
3 | from os import stat
4 | from os.path import basename, join, realpath, dirname
5 |
6 | from ml_dash.file_handlers import cwdContext
7 |
8 |
9 | def file_stat(file_path, no_stat=True):
10 | """
11 | getting the stats of the file.
12 |
13 | no_stat turns the stat call off.
14 |
15 | :param file_path:
16 | :param no_stat:
17 | :return:
18 | """
19 | # note: this when looped over is very slow. Fine for a small list of files though.
20 | if no_stat:
21 | return dict(
22 | name=basename(file_path),
23 | path=file_path,
24 | dir=dirname(file_path),
25 | )
26 |
27 | stat_res = stat(file_path)
28 | sz = stat_res.st_size
29 | return dict(
30 | name=basename(file_path),
31 | path=file_path,
32 | dir=dirname(file_path),
33 | time_modified=stat_res.st_mtime,
34 | time_created=stat_res.st_ctime,
35 | # type=ft,
36 | size=sz,
37 | )
38 |
39 |
40 | def fast_glob(query, wd, skip_children=False):
41 | """
42 | ignore subtree when file is found under a certain directory.
43 | :param skip_childre:
44 | :return:
45 | """
46 | raise NotImplementedError()
47 |
48 |
49 | def find_files(cwd, query, start=None, stop=None, no_stat=True, show_progress=False):
50 | """
51 | find files by iGlob.
52 |
53 | :param cwd: the context folder for the glob, excluded from returned path list.
54 | :param query: glob query
55 | :param start: starting index for iGlob.
56 | :param stop: ending index for iGlob
57 | :param no_stat: boolean flag to turn off the file_stat call.
58 | :return:
59 | """
60 | from itertools import islice
61 |
62 | # https://stackoverflow.com/a/58126417/1560241
63 | if query.endswith('**'):
64 | query += "/*"
65 |
66 | with cwdContext(cwd):
67 | _ = islice(pathlib.Path(".").glob(query), start, stop)
68 | if show_progress:
69 | from tqdm import tqdm
70 | _ = tqdm(_, desc="@find_files")
71 | for i, file in enumerate(_):
72 | print(str(file))
73 | yield file_stat(str(file), no_stat=no_stat)
74 |
75 |
76 | def read_dataframe(path, k=None):
77 | from ml_logger.helpers import load_pickle_as_dataframe
78 | try:
79 | return load_pickle_as_dataframe(path, k)
80 | except FileNotFoundError:
81 | return None
82 |
83 |
84 | def read_records(path, k=200):
85 | from ml_logger.helpers import load_pickle_as_dataframe
86 | df = load_pickle_as_dataframe(path, k)
87 | return df.to_json(orient="records")
88 |
89 |
90 | def read_log(path, k=200):
91 | from ml_logger.helpers import load_pickle_as_dataframe
92 | df = load_pickle_as_dataframe(path, k)
93 | return df.to_json(orient="records")
94 |
95 |
96 | def read_pikle(path):
97 | from ml_logger.helpers import load_from_pickle
98 | data = [_ for _ in load_from_pickle(path)]
99 | return data
100 |
101 |
102 | def read_pickle_for_json(path):
103 | """convert non JSON serializable types to string"""
104 | from ml_logger.helpers import load_from_pickle, regularize_for_json
105 | data = [regularize_for_json(_) for _ in load_from_pickle(path)]
106 | return data
107 |
108 |
109 | def read_text(path, start, stop):
110 | from itertools import islice
111 | with open(path, 'r') as f:
112 | text = ''.join([l for l in islice(f, start, stop)])
113 | return text
114 |
115 |
116 | def read_binary():
117 | raise NotImplementedError()
118 | # todo: check the file handling here. Does this use correct
119 | # mimeType for text files?
120 | # res = await response.file(path)
121 | # if as_attachment:
122 | # res.headers['Content-Disposition'] = 'attachment'
123 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/images.py:
--------------------------------------------------------------------------------
1 | from os.path import split
2 | from graphene import ObjectType, relay, String
3 | from ml_dash import schema
4 |
5 |
6 | class File(ObjectType):
7 | class Meta:
8 | interfaces = relay.Node,
9 |
10 | name = String(description='name of the directory')
11 |
12 | # description = String(description='string serialized data')
13 | # experiments = List(lambda: schema.Experiments)
14 |
15 | @classmethod
16 | def get_node(cls, info, id):
17 | return get_file(id)
18 |
19 |
20 | class FileConnection(relay.Connection):
21 | class Meta:
22 | node = File
23 |
24 |
25 | def get_file(id):
26 | # path = os.path.join(Args.logdir, id[1:])
27 | return File(id=id, name=split(id[1:])[1])
28 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/metrics.py:
--------------------------------------------------------------------------------
1 | from os.path import split, realpath, join, splitext, basename
2 | from graphene import relay, ObjectType, String, List, JSONString, Int
3 | from graphene.types.generic import GenericScalar
4 | from ml_dash.config import Args
5 | from ml_dash.schema.files.file_helpers import find_files, read_records, read_dataframe
6 |
7 |
8 | class Metrics(ObjectType):
9 | class Meta:
10 | interfaces = relay.Node,
11 |
12 | path = String(description="path to the file")
13 | name = String(description="path to the file")
14 | stem = String(description="stem of the file name")
15 |
16 | def resolve_path(self, info):
17 | return self.id
18 |
19 | def resolve_stem(self, info, ):
20 | return self.name.split('.')[0]
21 |
22 | keys = List(String, description="list of keys for the metrics")
23 |
24 | # value = List(GenericScalar, description="the raw value")
25 | value = GenericScalar(description="The value of the metrics file",
26 | keys=List(String),
27 | k=Int(required=False),
28 | last=Int(required=False),
29 | window=Int(required=False))
30 |
31 | def resolve_keys(self, info):
32 | df = read_dataframe(join(Args.logdir, self.id[1:]))
33 | keys = df.keys()
34 | return list(keys)
35 |
36 | # todo: add more complex queries.
37 | def resolve_value(self, info, keys=None, k=None, last=None, window=None):
38 | path = join(Args.logdir, self.id[1:])
39 | realpath(path)
40 | _ = read_dataframe(path)
41 | if keys:
42 | df = _[keys].dropna()
43 | return {k: df[k].values.tolist() for k in keys}
44 | else:
45 | df = _.dropna()
46 | return {k: v.values.tolist() for k, v in df.items()}
47 |
48 | @classmethod
49 | def get_node(cls, info, id):
50 | return Metrics(id, )
51 |
52 |
53 | class MetricsConnection(relay.Connection):
54 | class Meta:
55 | node = Metrics
56 |
57 |
58 | def get_metrics(id):
59 | return Metrics(id=id, name=basename(id), path=id)
60 |
61 |
62 | def find_metrics(cwd, **kwargs):
63 | from ml_dash.config import Args
64 | _cwd = realpath(join(Args.logdir, cwd[1:]))
65 | parameter_files = find_files(_cwd, "**/metrics.pkl", **kwargs)
66 | for p in parameter_files:
67 | yield Metrics(id=join(cwd, p['path']), name="metrics.pkl")
68 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/parameters.py:
--------------------------------------------------------------------------------
1 | from functools import reduce
2 | from os.path import split, join as pJoin, basename, realpath
3 | from graphene import ObjectType, relay, String, List
4 | from graphene.types.generic import GenericScalar
5 | from ml_dash.config import Args
6 | from ml_dash.schema.files.file_helpers import find_files, read_pickle_for_json
7 | from ml_dash.schema.helpers import assign, dot_keys, dot_flatten
8 |
9 |
10 | class Parameters(ObjectType):
11 | class Meta:
12 | interfaces = relay.Node,
13 |
14 | name = String(description="The true path to the parameter file. Internal use only")
15 | path = String(description="The true path to the parameter file. Internal use only")
16 | keys = List(String, description="list of parameter keys")
17 | value = GenericScalar(description="the json value for the parameters")
18 | raw = GenericScalar(description="the raw data object for the parameters")
19 | flat = GenericScalar(description="the raw data object for the parameters")
20 |
21 | def resolve_name(self, info):
22 | return basename(self.id)
23 |
24 | def resolve_path(self, info):
25 | return self.id
26 |
27 | def resolve_keys(self, info):
28 | value = reduce(assign, read_pickle_for_json(pJoin(Args.logdir, self.id[1:])) or [{}])
29 | return dot_keys(value)
30 |
31 | def resolve_value(self, info, **kwargs):
32 | return reduce(assign, read_pickle_for_json(pJoin(Args.logdir, self.id[1:])) or [{}])
33 |
34 | def resolve_raw(self, info, **kwargs):
35 | return read_pickle_for_json(pJoin(Args.logdir, self.id[1:]))
36 |
37 | def resolve_flat(self, info, **kwargs):
38 | # note: this always gives truncated some-folder/arameter.pkl path.
39 | value = reduce(assign, read_pickle_for_json(pJoin(Args.logdir, self.id[1:])) or [{}])
40 | return dot_flatten(value)
41 |
42 | # description = String(description='string serialized data')
43 | # experiments = List(lambda: schema.Experiments)
44 |
45 | @classmethod
46 | def get_node(cls, info, id):
47 | return get_parameters(id)
48 |
49 |
50 | class ParameterConnection(relay.Connection):
51 | class Meta:
52 | node = Parameters
53 |
54 |
55 | def get_parameters(id):
56 | return Parameters(id=id)
57 |
58 |
59 | def find_parameters(cwd, **kwargs):
60 | from ml_dash.config import Args
61 | _cwd = realpath(pJoin(Args.logdir, cwd[1:]))
62 | parameter_files = find_files(_cwd, "parameters.pkl", **kwargs)
63 | for p in parameter_files:
64 | yield Parameters(id=pJoin(cwd, p['path']))
65 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/series.py:
--------------------------------------------------------------------------------
1 | from os.path import join, isabs
2 |
3 | import numpy as np
4 | import pandas as pd
5 | from graphene import relay, ObjectType, String, List, ID, Int, Float
6 | from graphene.types.generic import GenericScalar
7 | from ml_dash.config import Args
8 | from ml_dash.schema.files.file_helpers import read_dataframe
9 |
10 |
11 | def get_column(df, key, stat_key):
12 | try:
13 | return df[key][stat_key].to_numpy().tolist()
14 | except:
15 | return []
16 |
17 |
18 | def get_columns(df, keys, stat_key):
19 | return {k: get_column(df, k, stat_key) for k in keys}
20 |
21 |
22 | class Series(ObjectType):
23 | class Meta:
24 | interfaces = relay.Node,
25 |
26 | path = String(description="the file path for the configuration file")
27 |
28 | prefix = String(description='path prefix for the metrics files')
29 | metrics_files = List(ID, description="List of metrics file IDs that we use to aggregate this series")
30 |
31 | _df = GenericScalar(description='the processed dataframe object that aggregates all metrics files.')
32 |
33 | window = Float(description="the window for the rolling average")
34 |
35 | label = String(description="the lable for the series")
36 | x_key = String(description="key for the x")
37 | y_key = String(description="key for the y axis")
38 | y_keys = List(String, description="list of keys for the y axis")
39 |
40 | # stype = SeriesTyes(description="the type of series data")
41 |
42 | # resolved from dataset
43 | x_data = GenericScalar(description="x data")
44 | y_mean = GenericScalar(description="y data from the mean of the window")
45 | # y_mode = GenericScalar(description="y data as from mode of the window")
46 | y_median = GenericScalar(description="y data as from mode of the window")
47 | y_min = GenericScalar(description="min in each bin")
48 | y_max = GenericScalar(description="max in each bin")
49 | y_25pc = GenericScalar(description="quarter quantile")
50 | y_75pc = GenericScalar(description="3/4th quantile")
51 | y_95pc = GenericScalar(description="95th quantile")
52 | y_05pc = GenericScalar(description="5th quantile")
53 | # Note: confidence level only applies to mean. So only group has it.
54 | # y_c95 = GenericScalar(description="95th confidence")
55 | y_count = GenericScalar(description="the number of datapoints used to compute each tick")
56 |
57 | warning = String(description="Warning Message")
58 |
59 | # todo: start time
60 |
61 | # todo: need to move the keys out, so that we can dropnan on the joint table.
62 | # Otherwise the different data columns would not necessarily be the same length.
63 | def resolve_x_data(self, info):
64 | # note: new in 0.24.1.
65 | # ~> df.value.dtype does NOT work for categorical data.
66 | _ = self._df['__x'].to_numpy()
67 | if np.issubdtype(_.dtype, np.datetime64):
68 | return (_.astype(int) / 1000).tolist()
69 | elif np.issubdtype(_.dtype, np.timedelta64):
70 | return (_.astype(int) / 1000).tolist()
71 | return _.tolist()
72 |
73 | def resolve_y_mean(self, info):
74 | if self.y_key is not None:
75 | return get_column(self._df, self.y_key, 'mean')
76 | return get_columns(self._df, self.y_keys, 'mean')
77 |
78 | # def resolve_y_mode(self, info):
79 | # if self.y_key is not None:
80 | # return self._df[self.y_key]['mode'].to_numpy().tolist()
81 | # return {k: self._df[k]['mode'].to_numpy().tolist() for k in self.y_keys}
82 |
83 | def resolve_y_min(self, info):
84 | if self.y_key is not None:
85 | return get_column(self._df, self.y_key, 'min')
86 | return get_columns(self._df, self.y_keys, 'min')
87 |
88 | def resolve_y_max(self, info):
89 | if self.y_key is not None:
90 | return get_column(self._df, self.y_key, 'max')
91 | return get_columns(self._df, self.y_keys, 'max')
92 |
93 | def resolve_y_median(self, info):
94 | if self.y_key is not None:
95 | return get_column(self._df, self.y_key, '50%')
96 | return get_columns(self._df, self.y_keys, '50%')
97 |
98 | def resolve_y_25pc(self, info):
99 | if self.y_key is not None:
100 | return get_column(self._df, self.y_key, '25%')
101 | return get_columns(self._df, self.y_keys, '25%')
102 |
103 | def resolve_y_75pc(self, info):
104 | if self.y_key is not None:
105 | return get_column(self._df, self.y_key, '75%')
106 | return get_columns(self._df, self.y_keys, '75%')
107 |
108 | def resolve_y_95pc(self, info):
109 | if self.y_key is not None:
110 | return get_column(self._df, self.y_key, '95%')
111 | return get_columns(self._df, self.y_keys, '95%')
112 |
113 | def resolve_y_05pc(self, info):
114 | if self.y_key is not None:
115 | return get_column(self._df, self.y_key, '5%')
116 | return get_columns(self._df, self.y_keys, '5%')
117 |
118 | def resolve_y_count(self, info):
119 | if self.y_key is not None:
120 | return get_column(self._df, self.y_key, 'count')
121 | return get_columns(self._df, self.y_keys, 'count')
122 |
123 | @classmethod
124 | def get_node(cls, info, id):
125 | return Series(id)
126 |
127 |
128 | def get_series(metrics_files=tuple(),
129 | prefix=None,
130 | head=None,
131 | tail=None,
132 | x_low=None,
133 | x_high=None,
134 | x_edge=None, # OneOf('start', 'after', 'mid', 'mode')
135 | k=None,
136 | x_align=None, # OneOf(int, 'left', 'right')
137 | x_key=None,
138 | y_key=None,
139 | y_keys=None,
140 | label=None):
141 | warning = None
142 | assert not y_key or not y_keys, "yKey and yKeys can not be trueful at the same time"
143 | assert y_key or y_keys, "yKey and yKeys can not be both falseful."
144 | assert head is None or tail is None, "head and tail can not be trueful at the same time"
145 | if not prefix:
146 | for id in metrics_files:
147 | assert isabs(id), f"metricFile need to be absolute path is prefix is {prefix}. It is {id} instead."
148 |
149 | ids = [join(prefix or "", id) for id in metrics_files]
150 | dfs = [read_dataframe(join(Args.logdir, _id[1:])) for _id in ids]
151 |
152 | y_keys = y_keys or [y_key]
153 | join_keys = [k for k in {x_key, *y_keys} if k is not None]
154 |
155 | dataframes = []
156 | for df in dfs:
157 | if df is None:
158 | continue
159 | elif x_key is not None:
160 | df.set_index(x_key)
161 | if x_align is None:
162 | pass
163 | elif x_align == "start": # todo: this needs to be part of the join
164 | df[x_key] -= df[x_key][0]
165 | elif x_align == "end":
166 | df[x_key] -= df[x_key][-1]
167 | else:
168 | df[x_key] -= x_align
169 | else:
170 | df = df[y_keys]
171 | df['index'] = df.index
172 | df.set_index('index')
173 |
174 | # todo: maybe apply tail and head *after* dropna??
175 | if tail is not None:
176 | df = df.tail(tail)
177 | if head is not None:
178 | df = df.head(head)
179 | inds = True
180 | if x_low is not None:
181 | inds &= df[x_key or "index"] >= x_low
182 | # print("x_low >>>", inds)
183 | if x_high is not None:
184 | inds &= df[x_key or "index"] <= x_high
185 | # print("x_high >>>", inds)
186 | if inds is not True:
187 | df = df.loc[inds]
188 |
189 | # todo: only dropna if we are not using ranges.
190 | try:
191 | column = df[join_keys]
192 | if head is None and tail is None:
193 | dataframes.append(column.dropna())
194 | else:
195 | dataframes.append(column)
196 | except KeyError as e:
197 | raise KeyError(f"{join_keys} contain keys that is not in the dataframe. "
198 | f"Keys available include {df.keys()}") from e
199 |
200 | if not dataframes: # No dataframe, return `null`.
201 | return None
202 |
203 | all = pd.concat(dataframes)
204 |
205 | if x_key:
206 | all = all.set_index(x_key)
207 |
208 | all.rank(method='first')
209 |
210 | if k is not None:
211 | bins = pd.qcut(all.index, k, duplicates='drop')
212 | grouped = all.groupby(bins)
213 | # df
214 | else:
215 | grouped = all.groupby(level=0)
216 |
217 | # treat all numbers in bin as equal. For raw (not averaged, or averaged)
218 | grouped[y_keys].agg(['count', 'mean', 'min', 'max'])
219 | df = grouped[y_keys].describe(percentiles=[0.25, 0.75, 0.5, 0.05, 0.95]).reset_index()
220 |
221 | if k is not None:
222 | if 'index' not in df:
223 | df['index'] = df.index
224 | if x_edge == "right" or x_edge is None:
225 | df['__x'] = df['index'].apply(lambda r: r.right)
226 | elif x_edge == "left":
227 | df['__x'] = df['index'].apply(lambda r: r.left)
228 | elif x_edge == "mean":
229 | df['__x'] = df['index'].apply(lambda r: 0.5 * (r.left + r.right))
230 | # todo: use mode of each bin
231 | else:
232 | raise KeyError(f"x_edge {[x_edge]} should be OneOf['start', 'after', 'mid', 'mode']")
233 | else:
234 | df['__x'] = df.index
235 |
236 | return Series(metrics_files,
237 | _df=df.sort_values(by="__x"),
238 | metrics_files=metrics_files,
239 | prefix=prefix,
240 | x_key=x_key or "index",
241 | y_key=y_key,
242 | y_keys=y_keys,
243 | label=label,
244 | warning=warning)
245 |
246 |
247 | SeriesArguments = dict(
248 | metrics_files=List(String, required=True),
249 | prefix=String(description="prefix to the metricFiles.", required=False),
250 | head=Int(description="the number of datapoints (for each metrics file) to take from the head-end"),
251 | tail=Int(description="the number of datapoints (for each metrics file) to take from the tail-end"),
252 | x_low=Float(description="the (inclusive) lower end of the x column"),
253 | x_high=Float(description="the (inclusive) higher end of the x column"),
254 | k=Int(required=False, description='the number of datapoints to return.'),
255 | x_align=String(description="a number (anchor point), 'start', 'end'"),
256 | x_key=String(),
257 | y_key=String(description="You can leave the xKey, but the yKey is required."),
258 | y_keys=List(String, description="Alternatively you can pass a list of keys to yKey*s*."),
259 | label=String(),
260 | warning=String(),
261 | )
262 |
263 | # if __name__ == "__main__":
264 |
--------------------------------------------------------------------------------
/ml_dash/schema/files/videos.py:
--------------------------------------------------------------------------------
1 | from os.path import split
2 | from graphene import ObjectType, relay, String
3 | from ml_dash import schema
4 |
5 |
6 | class File(ObjectType):
7 | class Meta:
8 | interfaces = relay.Node,
9 |
10 | name = String(description='name of the directory')
11 |
12 | # description = String(description='string serialized data')
13 | # experiments = List(lambda: schema.Experiments)
14 |
15 | @classmethod
16 | def get_node(cls, info, id):
17 | return get_file(id)
18 |
19 |
20 | class FileConnection(relay.Connection):
21 | class Meta:
22 | node = File
23 |
24 |
25 | def get_file(id):
26 | # path = os.path.join(Args.logdir, id[1:])
27 | return File(id=id, name=split(id[1:])[1])
28 |
--------------------------------------------------------------------------------
/ml_dash/schema/helpers.py:
--------------------------------------------------------------------------------
1 | from typing import List, Any
2 |
3 |
4 | def assign(d1, d2):
5 | if not d2:
6 | return d1
7 | for k, v in d2.items():
8 | if isinstance(d1.get(k, None), dict):
9 | d1[k] = assign(d1[k], v)
10 | else:
11 | d1[k] = v
12 | return d1
13 |
14 |
15 | if __name__ == "__main__":
16 | object1 = {"a": 1, "b": 2, "c": 3}
17 | object2 = assign({"c": 4, "d": 5}, object1)
18 | assert object2['c'] == 3
19 | assert object2['d'] == 5
20 |
21 |
22 | def idot_keys(d, strict=True):
23 | for k, v in d.items():
24 | if isinstance(v, dict):
25 | if not strict:
26 | yield k
27 | for _ in idot_keys(v, strict):
28 | yield k + "." + _
29 | else:
30 | yield k
31 |
32 |
33 | def dot_keys(d, strict=True):
34 | return [*idot_keys(d, strict)]
35 |
36 |
37 | if __name__ == "__main__":
38 | object = {"a": 1, "b": 2, "c": 3, "child": {"a": 3, "grandchild": {'d': 8}}}
39 | assert dot_keys(object) == ['a', 'b', 'c', 'child.a', 'child.grandchild.d']
40 | assert dot_keys(object, strict=False) == ['a', 'b', 'c', 'child', 'child.a', 'child.grandchild',
41 | 'child.grandchild.d']
42 |
43 |
44 | def idot_flatten(d, ancestors: List[Any] = tuple()):
45 | """
46 | returns a flattened dictionary with the keys of the nexted dictionaries converted into dot-separated keys.
47 |
48 | :param d: map
49 | :return: flat map
50 | """
51 | for k, v in d.items():
52 | if isinstance(v, dict):
53 | for _k, _v in idot_flatten(v):
54 | yield k + "." + _k, _v
55 | else:
56 | yield k, v
57 |
58 |
59 | def dot_flatten(d):
60 | # note: dictionaries are ordered by default in python 3.7.
61 | return dict(idot_flatten(d))
62 |
63 |
64 | if __name__ == "__main__":
65 | object = {"a": 1, "b": 2, "c": 3, "child": {"a": 3, "grandchild": {'d': 8}}}
66 | assert list(dot_flatten(object).keys()) == ['a', 'b', 'c', 'child.a', 'child.grandchild.d']
67 |
--------------------------------------------------------------------------------
/ml_dash/schema/projects.py:
--------------------------------------------------------------------------------
1 | from os import listdir
2 | from os.path import isfile, join, split
3 |
4 | from graphene import ObjectType, relay, String, List
5 | from ml_dash import schema
6 |
7 |
8 | class Project(ObjectType):
9 | class Meta:
10 | interfaces = relay.Node,
11 |
12 | name = String(description='name of the project')
13 |
14 | # description = String(description='string serialized data')
15 | # experiments = List(lambda: schema.Experiments)
16 |
17 | experiments = relay.ConnectionField(lambda: schema.experiments.ExperimentConnection)
18 |
19 | def resolve_experiments(self, info, before=None, after=None, first=None, last=None):
20 | # todo: add support for before after and last
21 | if first is not None:
22 | return schema.experiments.find_experiments(cwd=self.id, stop=first)
23 | return schema.experiments.find_experiments(cwd=self.id)
24 |
25 | directories = relay.ConnectionField(lambda: schema.directories.DirectoryConnection)
26 |
27 | def resolve_directories(self, info, before=None, after=None, first=None, last=None):
28 | from ml_dash.config import Args
29 | root_dir = join(Args.logdir, self.id[1:])
30 | return [schema.Directory(id=join(self.id, _), name=_)
31 | for _ in listdir(root_dir) if not isfile(join(root_dir, _))]
32 |
33 | files = relay.ConnectionField(lambda: schema.files.FileConnection)
34 |
35 | def resolve_files(self, info, before=None, after=None, first=None, last=None):
36 | from ml_dash.config import Args
37 | root_dir = join(Args.logdir, self.id[1:])
38 | return [schema.Directory(id=join(self.id, _), name=_)
39 | for _ in listdir(root_dir) if isfile(join(root_dir, _))]
40 |
41 | @classmethod
42 | def get_node(cls, info, id):
43 | return get_project(id)
44 |
45 |
46 | class ProjectConnection(relay.Connection):
47 | class Meta:
48 | node = Project
49 |
50 |
51 | def get_projects(username):
52 | import os
53 | from ml_dash.config import Args
54 | user_root = join(Args.logdir, username)
55 | return [Project(name=_, id=join('/', username, _))
56 | for _ in os.listdir(user_root) if not isfile(_)]
57 |
58 |
59 | def get_project(id):
60 | from ml_dash.config import Args
61 | path = join(Args.logdir, id[1:])
62 | return Project(id=id, name=split(id[1:])[1], _path=path)
63 |
--------------------------------------------------------------------------------
/ml_dash/schema/schema_helpers.py:
--------------------------------------------------------------------------------
1 | def bind(fn):
2 | """
3 | Binds the function to the class.
4 |
5 | :param fn:
6 | :return: bound_fn
7 | """
8 | return lambda _, *args, **kwargs: fn(*args, **kwargs)
9 |
10 |
11 | def bind_args(fn):
12 | """
13 | Binds args after info.
14 |
15 | :param fn:
16 | :return: bound_fn
17 | """
18 | return lambda _, info, *args, **kwargs: fn(*args, **kwargs)
19 |
20 |
--------------------------------------------------------------------------------
/ml_dash/schema/users.py:
--------------------------------------------------------------------------------
1 | from os.path import isfile
2 | from graphene import ObjectType, relay, String
3 | from ml_dash import schema
4 |
5 |
6 | class User(ObjectType):
7 | class Meta:
8 | interfaces = relay.Node,
9 |
10 | @classmethod
11 | def get_node(_, info, id):
12 | print(info, id)
13 | return get_user(id)
14 |
15 | username = String(description='string serialized data')
16 | name = String(description='string serialized data')
17 |
18 | projects = relay.ConnectionField(lambda: schema.projects.ProjectConnection)
19 |
20 | def resolve_projects(self, info, before=None, after=None, first=None, last=None):
21 | # todo: figure out a good way for pagination.
22 | # note: project does not support first, last
23 | return schema.projects.get_projects(self.username)
24 |
25 | # teams = List(lambda: schema.Team)
26 |
27 |
28 | def get_users(ids=None):
29 | import os
30 | from ml_dash.config import Args
31 | return [User(username=_, name="Ge Yang") for _ in os.listdir(Args.logdir) if not isfile(_)]
32 |
33 |
34 | def get_user(username):
35 | return User(username=username, name="Ge Yang", id=username)
36 |
--------------------------------------------------------------------------------
/ml_dash/server.py:
--------------------------------------------------------------------------------
1 | from sanic import Sanic, views
2 | from sanic_cors import CORS
3 | from sanic_graphql import GraphQLView
4 |
5 | from ml_dash.schema import schema
6 |
7 | # to support HTTPS.
8 | views.HTTP_METHODS += ('FETCH', 'OPTIONS')
9 |
10 | app = Sanic("ml_dash.server")
11 | # CORS(app)
12 | CORS(app, resources={r"/*": {"origins": "*"}}, automatic_options=True)
13 |
14 | # NOTE: disable for laziness. Should enable it in the future.
15 | # @app.listener('before_server_start')
16 | # def init_graphql(app, loop):
17 | # app.add_route(GraphQLView.as_view(schema=schema, executor=AsyncioExecutor(loop=loop)), '/graphql')
18 |
19 | # new graphQL endpoints
20 | app.add_route(GraphQLView.as_view(schema=schema, graphiql=True), '/graphql',
21 | methods=['GET', 'POST', 'FETCH', 'OPTIONS'])
22 | app.add_route(GraphQLView.as_view(schema=schema, batch=True), '/graphql/batch',
23 | methods=['GET', 'POST', 'FETCH', 'OPTIONS'])
24 |
25 |
26 | @app.listener('before_server_start')
27 | def setup_static(app, loop):
28 | from ml_dash import config
29 | from os.path import expanduser
30 | app.static('/files', expanduser(config.Args.logdir),
31 | use_modified_since=True, use_content_range=True, stream_large_files=True)
32 |
33 |
34 | # note: currently disabled, file events API.
35 | # from .file_events import file_events, setup_watch_queue
36 | # app.add_route(file_events, '/file-events', methods=['GET', 'OPTIONS'])
37 | # app.add_route(file_events, '/file-events/', methods=['GET', 'OPTIONS'])
38 | # app.listener('before_server_start')(setup_watch_queue)
39 |
40 |
41 | def run(logdir=None, **kwargs):
42 | from ml_dash import config
43 | from termcolor import cprint
44 |
45 | if logdir:
46 | config.Args.logdir = logdir
47 |
48 | cprint("launched server with config:", "green")
49 | cprint("Args:", 'yellow')
50 | print(vars(config.Args))
51 | cprint("Sanic Server Args:", 'yellow')
52 | print(vars(config.ServerArgs))
53 | cprint("SSL Args:", 'yellow')
54 | print(vars(config.SSLArgs))
55 |
56 | config.ServerArgs._update(**kwargs)
57 | if config.SSLArgs.cert:
58 | app.run(**vars(config.ServerArgs), ssl=vars(config.SSLArgs))
59 | else:
60 | app.run(**vars(config.ServerArgs))
61 |
62 |
63 | if __name__ == "__main__":
64 | # see: https://sanic.readthedocs.io/en/latest/sanic/deploying.html
65 | # call this as `python -m ml_logger.main`
66 | run()
67 |
--------------------------------------------------------------------------------
/ml_dash/sse.py:
--------------------------------------------------------------------------------
1 | # SSE "protocol" is described here: http://mzl.la/UPFyxY
2 | class ServerSentEvent(object):
3 |
4 | def __init__(self, data):
5 | self.data = data
6 | self.event = None
7 | self.id = None
8 | self.desc_map = {
9 | self.data: "data",
10 | self.event: "event",
11 | self.id: "id"
12 | }
13 |
14 | def __str__(self):
15 | if not self.data:
16 | return ""
17 | lines = [f"{v}: {k}" for k, v in self.desc_map.items() if k]
18 | return "%s\n\n" % "\n".join(lines)
19 |
--------------------------------------------------------------------------------
/notes/ML-Dash Enhancement Plans.md:
--------------------------------------------------------------------------------
1 | # ML-Dash Enhancement Plans
2 |
3 | Main Objectives
4 | 1. run-management: monitor, kill, relaunch runs.
5 | 2. better UI (lean, no-bullshit, exploratory)
6 |
7 | ## Todo List
8 |
9 | - [ ] comment out the
10 | - [ ] Add keyword glob to the keys
11 | - [ ] parallel coordinates
12 |
--------------------------------------------------------------------------------
/notes/README.md:
--------------------------------------------------------------------------------
1 | # Implementation Plans
2 |
3 | 1. build schema
4 | 2. make single dataview
5 | 3. make file-list view
6 | 4. make diff view
7 | 5. make layout
8 | 6. make chart key view
9 | 7. single experiment view?
10 |
11 | ## ToDos
12 |
13 |
14 | ### Parallel Coordinates
15 |
16 | - [ ] redirect to profile if no user profile exist
17 |
18 | 1. show nexted children
19 | click on row to expand
20 |
21 | right now the biggest problem is that we don't know how to render only the row on the right.
22 | Can make-do with some kind of DOM away container, but it is not ideal.
23 |
24 | hidden column:
25 | 1. be able to select columns
26 | 2. be able to add those to hide
27 | 3. show hidden columns
28 | 5. select hidden
29 | 4. allow unhide
30 | - column head: expand hidden (not a good idea b/c )
31 |
32 | allow resizing columns:
33 | 1. select column
34 | 2. resize
35 | or
36 | 1. mouse over
37 | allow reordering of columns:
38 | 1. move the header on top
39 |
40 | group rows:
41 | 1. select row 1
42 | 2. select row 2
43 | 3. panel opens on the right, show keys that are different
44 | ```yaml
45 | columns: key, exp1, exp2
46 | ```
47 |
48 | select multiple rows:
49 | 1. select row 1
50 | 2. select row 2
51 | 3. panel opens on the right showing different keys
52 | 4. select row 3
53 | 5. select row 4
54 | 6. panel on the right changes to
55 | ```yaml
56 | key 1, key 2, key 3
57 | exp 1,
58 | exp 2, ...
59 | ```
60 | 7. panel on the right turn into a parallel coordinate plot
61 |
62 | - [ ] add multiple folder selection in the navbar to show experiments from those folders.
63 |
64 | feature parity:
65 |
66 | multi-key support for the `LineChart`? Need to be able to see all of the lines before selecting one or a few
67 |
68 | so this is wild-card support in keys. In the input box, we putin a search query. it finds relevant keys
69 | in the metric file before generating the plots.
70 |
71 | - [ ] show grouped rows, organized by groupBy groupIgnore
72 | - [ ] show
73 | - [x] fixed column width, use `hidden` for horizontal scroll inside cells
74 | - [x] Drag and Drop column position
75 | - [ ] organize the state, save column width to configuration.
76 | - widths: {key, width}
77 | - [ ] resize column
78 | - [ ] hide column
79 | - [ ] add column (button, click and select/type and select)
80 | - [ ]
81 | - [ ] order by column
82 | - [ ] need to save column position
83 | - [ ] minor point: editors need to support folding.
84 | - [ ] add column width to keys
85 | - [ ] column drag and drop:
86 | https://github.com/ant-design/ant-design/issues/4639#issuecomment-316351389
87 |
88 | - [x] chart row
89 | - checkbox
90 | - add and remove columns from table, with dropdown
91 | - remove front page (redirect to /profiles)
92 | - add image support
93 | - compare results
94 |
95 | New features:
96 | - add header
97 | - remove header
98 | - order by column
99 | - group by column
100 |
101 |
102 | - resize
103 | - row render
104 | - infinite scroll
105 |
106 | - [ ] window for adding the keys
107 | - [ ] add multiKey support?? does it work now?
108 | - multi-key support already exist on the backend.
109 | - [x] need to add support in line chart.
110 | - [ ] **Reach Feature Parity!!!!!!!**
111 | - [ ] package for deployment
112 | - start vis app
113 | - start vis server (existing connection: local)
114 | - add configuration to turn off local config
115 | - add config for vis-only.
116 | - add file API
117 |
118 | ## Grid View
119 |
120 |
121 | typical workflow:
122 | > search for parameters, add filters to search to condense. (logger add start time and end time!)
123 | - parameter search
124 | - add experiment start and update time to parameters
125 | - add view for experiment quick view
126 |
127 | - [x] change toggle to hide first
128 |
129 | ### Urgent ToDos
130 |
131 | - [ ] RC-table
132 | - scrollable support
133 | - full colSpan support
134 | - header click handle
135 | - column width
136 |
137 |
138 | - allow removal of column names
139 | - [ ] Need to add `startTime`, `endTime`, `length` to create
140 | - always add during experiment registration
141 | - for endTime, use metrics file __timestamp
142 | - If endTime large than 5 min, show stopped.
143 | - add title bard to charts
144 | - Allow adding charts to exp
145 |
146 |
147 | ### Rest Usability Issues
148 |
149 | - [ ] add dates to directories (no need for now)
150 | - [ ] add `load more` button to Directories
151 | - allow reorder of column names via drag and drop
152 | - add DATES to the NavBar
153 | - add order (accent/decent) button to directories
154 | - add delete button to Directories
155 | - add rename button to Directories
156 | - allow ordering by column
157 | - allow grouping of columns (shift select)
158 | - add `load more...` to the folders and files on the left.
159 | - add search to `Directories (find)` and `Files (find)`. For `Experiments`,
160 |
161 | - Do not know what keys there are
162 | - Do not know what metrics there are
163 | - Would be good if we could order all experiments by dates. Starting from year, unfold by dates started
164 |
165 | - At Project root, there are a lot of folders. Navbar only shows a few of them
166 | - Need to be able to order the project folders
167 | - Need to be able to navigate back to parent folder
168 | - Need to support Images
169 | - Need to support summary in table
170 | - Want to be able to search
171 | - Need to support sorting in table
172 | - Need to add `startTime`, `endTime`, `length` to table for sorting
173 | - use infinite list for table
174 | - use draggable column width for table
175 |
176 |
177 | - [ ] fix the trace ordering issue
178 | - [ ] Change UI, to add table columns, with typeahead
179 | - [ ] add title bar to chart
180 | - [ ] next to title, add (...) button for modifying the chart config
181 | - [ ] add in-line block to create chart
182 | - [ ] add yaml encoding write
183 |
184 | - [ ] table show metric results [need metrics query api, better to be single call]
185 | - [ ] simple table component
186 | - [ ] launch new vis server backend
187 | - [ ] select charts to compare with each other
188 | - [ ] `summary {
189 | metricFiles: []
190 | prefix: string!
191 | key: string
192 | tail (n) {mean pc25 pc75 pc95 pc05 pc84 pc16 stddev mode}
193 | last
194 | }`
195 |
196 | - **default values for `prefix`** key.
197 | the value for the prefix key should be the same to the current context
198 | and the relative paths should require a prefix key on the API side.
199 | The powerful thing is that we can encapsulate these as react components.
200 |
201 |
202 | - [ ] default unhide 3 expeirments in list view
203 |
204 |
205 | - [x] implement end-point configuration
206 | - [ ] deploy new visualization graphql server
207 | - [ ] serve from the `./build` folder
208 | - [ ] file container:
209 | - search in loaded list of files.
210 | - or search via query
211 | - [ ] image scroller component
212 | - [ ] video scroller component
213 | - [ ] chart component with title
214 |
215 | - [ ] **Comparison View**: Multiple Selection
216 | - [ ] `parameters` field:
217 | - [ ] hide row (place to the end?)
218 | - [ ] order by column
219 | - [ ] add selection to table
220 | - [ ] add chart to table row
221 | - [ ] add row selector
222 | - [ ] add delete button
223 | - [ ] add comparison
224 | - [ ] add image
225 | - [ ] add video
226 |
227 | - [ ]
228 |
229 | I need to focus on getting this dashboard to production ready.
230 | - [ ] add range to plots
231 | - [ ] comparison view (is this slow?)
232 | - [ ] show figures in expanded list view
233 | - [ ] chart builder
234 | - in narrow viewport size, path#file? collapses into just file view. handle this inside the Dash module
235 | - [x] write chat library in hooks
236 | - tab bar:
237 | - [x] create dash config
238 | - [x] get dash config
239 | - [x] edit dash config
240 | - [x] change to relay mutation
241 | - [x] get readme
242 | - [x] display Readme note
243 | - [x] show readme
244 | - [x] write readme
245 | - [ ] order by value (on client side)
246 | - [ ] make single file view
247 | - [ ] make first version of chart def.
248 | - [ ] show the first chart
249 | - [ ] show content of the dahsboard config file
250 | - [ ] create readme (client side only)
251 | - [ ] change readme filename (more involved)
252 | - [ ] Where to put the button for the README?
253 |
254 | - on the right: hamburger button to expand the dash configs
255 | - need `default.dashcfg` support
256 | - show `some_config.dashcfg`
257 |
258 | dash config supports multiple fines in the same yml configuration file.
259 |
260 | - if is experiment: show experiment view in tab bar? or the same as the dash?
261 | - search container
262 | > directory id
263 | > search query
264 | > experiments
265 |
266 | How is the experiments listed at the moment? (list *all* experiments under directory)
267 |
268 | - [ ] result table (get aggregated slice from an averaged metric)
269 | - parameter keys
270 | - metric keys
271 | - [ ] image component: epoch number
272 | - [ ] video component: epoch number
273 | - [ ] parameter table: define the API
274 | - [ ] file-serving api: for image, video, and text.
275 |
276 | > package these nicely, into some stand-along component that I can use.
277 | - [ ] Advanced React Patterns:
278 | - ConnectionContainer: need to add search
279 | - ContextContainer
280 | - FragmentContainer
281 | - Getting Query Container to work with `found`
282 | - [ ] Show an averaged plot
283 | - [ ] frontend layout
284 | - [ ] different views
285 | - [ ] get bindr to work
286 | - [ ] think of front end design
287 | - [ ] add minimal front end
288 | - [ ] get parallel coordinate to work
289 | - [ ] get single chart to work
290 |
291 | do these after the frontend is working
292 | - [ ] actually return aggregated metric series
293 | - [ ] add rolling average window
294 |
295 | not working on.
296 | - [ ] unify `project`, `directory`, and `experiment` with the same master type
297 | - [ ] make the sample experiment 500MB large.
298 | - [ ] navigate to child context
299 | - [ ] make file explorer
300 | - [ ] setting up flow
301 | - [ ] add mutation (for files)
302 | - [ ] view queries in grahiQL
303 | - [ ] make chart container
304 | - [ ] copy experiment data
305 |
306 | ### Done
307 |
308 | - [x] get first schema to run (file)
309 | - [x] finish schema v0
310 | - [x] generate sample experiment records
311 | - [x] finish relay tutorial
312 | - [x] add routing
313 | > now with `farce` and `found`.
314 |
315 | - [x] add routing, start with user, expand to projects
316 | > main project, fork project etc.
317 |
318 | - [x] break routes down
319 | - [x] Add home page
320 | - [x] add link to your own username
321 | - [x] get user by username
322 | - [x] add test?
323 | - [x] list projects under user
324 | - [x] add id to project
325 | - [x] list directories under project
326 | - [x] list directory under directory
327 | - [x] list files under directory
328 | - [x] list experiments under project
329 | - [x] list directory under experiment
330 | - [x] list files under experiment
331 | - [x] list files under current context
332 | - [x] flat keys for parameters
333 | - [x] flat dictionary of parameters
334 | - [x] metric files
335 | - [x] metric file keys
336 | - [x] metric file value query with keys
337 | - [x] fix id to be absolute w.r.t `Args.logdir`
338 | > right now it is the server absolute path.
339 |
340 | Now all uses `join(Args.logdir, self.id)`. We could replace this with a helper function.
341 | - [x] add metric query to root query.
342 | - [x] support queries with multiple metric files
343 | - [x] CLEANUP: remove Starship starter code from the backend
344 | - [x] add shim EditText mutation
345 | - [x] Make sure that the client still works
346 | - [x] wrap up and take a break.
347 | - [x] break is over!
348 | - [x] Parallel Coordinates
349 | Visualizations like this is not a magic bullet. I will still need to code up
350 | the rest of the front-end to feature-parity.
351 | - [x] show current project name
352 | - [x] show current directory name
353 | - [x] connection container directories
354 | - [x] connection container for experiments
355 | - [x] Build view components
356 |
357 |
358 | ---
359 | - [x] File Browser
360 | - [x] Experiment Row
361 | - [x] Parameter Key Tags (and expanded view)
362 | > save as default.parameters
363 | > parameters: ["Args.lr", dict(name="some", domain=['some'])]
364 | > charts: ["some", "chart:chart-name"]
365 | **Aggregate**: choose `seed` to average? Need to key by *other* params first.
366 | - [x] Show list of directories under current directory
367 | - [x] Show list of experiments under current directory
368 |
369 |
370 | - [x] make charts from table
371 | - [x] grid view
372 |
373 | - [x] change ExperimentDash to react hook
374 | - [x] add selections box
375 | - [x] get all selected rows
376 | - [x] add toggle editDash
377 | - [x] add toggle editReadme
378 | - [x] add toggle showReadme
379 |
380 | - [x] add **Inline Charts**
381 |
--------------------------------------------------------------------------------
/notes/client design doc.md:
--------------------------------------------------------------------------------
1 | ### Pages:
2 |
3 | - Profile
4 | - Project
5 | - Dash
6 |
7 |
8 | #### Dash View Specifications
9 |
10 | - [ ] Left Bar
11 | 1. quick selection
12 | Show number of experiments
13 | 2. dash
14 | 3. experiments
15 |
16 | - [ ] Experiment View (Originally List View)
17 | - Experiment Name
18 | - tab bar
19 | Choosing from Multiple Parameter Key configs
20 | `[ > config name | key_1 | key_2 ... | input-box | ⬇️ ]`
21 | - parallel coordinates?
22 | - experiment list
23 |
24 | You can expand the experiment list to show details of all of the experiments
25 | - [ ] Details Pane
26 | - tab bar (the name of the view gets saved)
27 | - Details of the visualization
28 |
29 | ---
30 |
31 | - [ ] Need to offer a good way for people to explore the folder.
32 | 1. User comes in, with a few filter options available. These are
33 | - date
34 | - time (to minute)
35 | - run script
36 |
37 | The left bar should show these things. Where should we save it?
38 |
39 | - [ ] Experiment View (Originally List View)
40 | 0. default view:
41 | - left nav bar collapses into 56 px wide, also being used as selection bar
42 | - Initially just 800 px wide. Expand once more charts are added.
43 | - Initially Grid View, List View After.
44 | - Grid View sub charts arranged in views
45 | - selection box on left, entire vertical span
46 | > **old idea** show parameter table, animate expand to full list view, show keys containing `loss`,
47 | `success`, `acuracy` then others by default
48 | 1. On selection:
49 | When an experiment is selected, show (add to quick selection) in the tab bar (top)
50 | - Show "Selection" in tab bar
51 | - flash "quick selection" in nav bar.
52 | - quick selection is the global bin, saved under project root.
53 | To view quick selections, go to project root
54 | - show parameter table in expanded tab bar
55 |
56 | When two experiments are selected, expand tab, highlight `selection` tab, show plot overlay
57 |
58 | - Show (expand view) on the right of each experiment block/row. Allow detailed view on the right split-view
59 | 2. Tab View:
60 | - Show filter fields (used to specify parallel coordinates)
61 | - Underneath, show parallel coordinates
62 |
63 | - [ ] Choosing from Multiple Parameter Key configs
64 | `[ > config name | key_1 | key_2 ... | input-box | ⬇️ ]`
65 | - [ ] Project View:
66 | - Now a list of experiments/dashboards
67 | - + quick selection
68 | - [ ] Experiment View (Originally List View)
69 | 0. default view:
70 | - left nav bar collapses into 56 px wide, also being used as selection bar
71 | - Initially just 800 px wide. Expand once more charts are added.
72 | - Initially Grid View, List View After.
73 | - Grid View sub charts arranged in views
74 | - selection box on left, entire vertical span
75 | > **old idea** show parameter table, animate expand to full list view, show keys containing `loss`,
76 | `success`, `acuracy` then others by default
77 | 1. On selection:
78 | When an experiment is selected, show (add to quick selection) in the tab bar (top)
79 | - Show "Selection" in tab bar
80 | - flash "quick selection" in nav bar.
81 | - quick selection is the global bin, saved under project root.
82 | To view quick selections, go to project root
83 | - show parameter table in expanded tab bar
84 |
85 | When two experiments are selected, expand tab, highlight `selection` tab, show plot overlay
86 |
87 | - Show (expand view) on the right of each experiment block/row. Allow detailed view on the right split-view
88 | 2. Tab View:
89 | - Show filter fields (used to specify parallel coordinates)
90 | - Underneath, show parallel coordinates
91 | - [ ] Single Experiment View
92 | 1. No params vis on top
93 | 2. Show different dashboard (inherit from top, allow override)
94 | Expand out to full grid
95 | 3. save view in `default.dashcfg`
96 | - [ ] Left Bar
97 | 1. quick selection
98 | Show number of experiments
99 | 2. dash
100 | 3. experiments
101 |
--------------------------------------------------------------------------------
/notes/dashboard design doc.md:
--------------------------------------------------------------------------------
1 |
2 | example dash config
3 |
4 | ```yaml
5 | charts:
6 | - series:
7 | - metricFiles: ['experiment_00/metrics.pkl', 'experiment_01/metrics.pkl']
8 | prefix: 'episodeyang/playground/mdp'
9 | xKey: __timestamp
10 | yKey: sine
11 | interpolation: null
12 | k: 100 # the number of points to return
13 | ```
14 |
15 | ```yaml
16 | - parameters:
17 | - Args.seed: "sum"
18 | - Args.lr: "=10"
19 | - Args.learn_mode: "in ['mdp', 'passive']"
20 | cursor:
21 | epoch: 10
22 | charts:
23 | - name: "Learning Rate"
24 | type: "series"
25 | x_key: "epoch"
26 | y_key: "lr/mean"
27 | y_label: "learning rate (mean)"
28 | label: "{Args.env_id} {Args.lr}"
29 | - name: "epsilon greedy"
30 | type: "series"
31 | x_key: "__timestamps"
32 | x_label: "Wall Time"
33 | y_key: "lr/mean"
34 | y_label: "Learning Rate (mean)"
35 | label: "{Args.env_id} {Args.lr}"
36 | - name: "Policy"
37 | type: "video"
38 | filePath: "videos/q_{cursor.epoch:04d}.mp4"
39 | - name: "Value Map"
40 | type: "image"
41 | filePath: "videos/q_{cursor.epoch:04d}.mp4"
42 | - experiments: # these are the filtered experiments
43 | - "some-id-1"
44 | - "some-id-2"
45 | ```
46 |
47 | Views
48 |
49 | ```yaml
50 |
51 | ```
52 |
53 | Modifying the grid specs
54 |
--------------------------------------------------------------------------------
/notes/setting_up_dash_server.md:
--------------------------------------------------------------------------------
1 | # Setting Up Dash Server
2 |
3 |
4 |
5 | ```bash
6 | openssl genrsa 2048 > host.key
7 | chmod 400 host.key
8 | openssl req -new -x509 -nodes -sha256 -days 365 -key host.key -out host.cert
9 | ```
10 |
11 |
12 |
13 |
14 |
15 | To Launch the Server
16 |
17 |
18 |
19 | ```bash
20 | SANIC_REQUEST_MAX_SIZE=5000000000 SANIC_REQUEST_TIMEOUT=3600 SANIC_RESPONSE_TIMEOUT=3600 screen -dm python -m ml_logger.server --logdir ~/runs --port 8080 --host 0.0.0.0 --workers 16
21 | ```
22 |
23 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | tqdm
2 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from os import path
2 | from setuptools import setup, find_packages
3 |
4 | cwd = path.dirname(__file__)
5 | with open(path.join(cwd, 'README'), encoding='utf-8') as f:
6 | long_description = f.read()
7 |
8 | with open(path.join(cwd, 'VERSION'), encoding='utf-8') as f:
9 | version = f.read()
10 |
11 | setup(name="ml-dash",
12 | description="A Beautiful Visualization Dashboard For Machine Learning",
13 | long_description=long_description,
14 | version=version,
15 | url="https://github.com/dash-ml/dash-server",
16 | author="Ge Yang",
17 | author_email="ge.ike.yang@gmail.com",
18 | license=None,
19 | keywords=["ml_logger",
20 | "ml-logger",
21 | "ml dash",
22 | "ml-dash",
23 | "ml_dash",
24 | "dashboard",
25 | "machine learning",
26 | "vis_server",
27 | "logging",
28 | "debug",
29 | "debugging"],
30 | classifiers=[
31 | "Development Status :: 4 - Beta",
32 | "Intended Audience :: Science/Research",
33 | "Programming Language :: Python :: 3"
34 | ],
35 | packages=[p for p in find_packages() if p != "tests"],
36 | include_package_data=True,
37 | install_requires=[
38 | "cloudpickle==3.1.1",
39 | 'dill',
40 | "graphene==2.1.3",
41 | "graphql-core==2.1",
42 | "graphql-relay==0.4.5",
43 | "graphql-server-core==1.1.1",
44 | "multidict",
45 | "numpy",
46 | 'pandas',
47 | "params_proto>=2.10.5",
48 | "requests",
49 | "requests_futures",
50 | 'ruamel.yaml',
51 | 'sanic==20.9.0',
52 | 'sanic-cors',
53 | 'Sanic-GraphQL',
54 | "termcolor",
55 | "typing"
56 | ])
57 |
--------------------------------------------------------------------------------