├── .eslintignore
├── .eslintrc.yaml
├── .flake8
├── .git-pre-commit
├── .github
└── workflows
│ ├── docs.yaml
│ ├── lint.yaml
│ └── test.yaml
├── .gitignore
├── .pep8speaks.yml
├── .pre-commit-config.yaml
├── Makefile
├── README.md
├── __init__.py
├── app
├── __init__.py
├── access.py
├── app_server.py
├── config.py
├── custom_exceptions.py
├── env.py
├── flow.py
├── handlers
│ ├── __init__.py
│ ├── auth.py
│ ├── base.py
│ ├── mainpage.py
│ ├── profile.py
│ └── socket_auth.py
├── json_util.py
├── model_util.py
├── models.py
├── psa.py
└── test_util.py
├── conf
└── supervisor
│ └── supervisor.conf.template
├── config.yaml.defaults
├── doc
├── Makefile
├── conf.py
├── dev.md
├── extending.md
├── index.rst
├── setup.md
└── usage.md
├── github_deploy_key.enc
├── log.py
├── requirements.docs.txt
├── requirements.txt
├── services
├── app
│ ├── app.py
│ └── supervisor.conf.template
├── cron
│ ├── cron.py
│ └── supervisor.conf
├── dask
│ └── supervisor.conf.template
├── external_logging
│ ├── external_logging.py
│ └── supervisor.conf
├── fake_oauth2
│ ├── fake_oauth2.py
│ └── supervisor.conf
├── message_proxy
│ ├── message_proxy.py
│ └── supervisor.conf
├── migration_manager
│ ├── migration_manager.py
│ └── supervisor.conf
├── nginx
│ ├── mime.types
│ ├── nginx.conf.template
│ └── supervisor.conf
├── rspack
│ ├── rspack.py
│ └── supervisor.conf
├── status_server
│ ├── status_server.py
│ └── supervisor.conf
└── websocket_server
│ ├── supervisor.conf
│ └── websocket_server.py
├── static
├── favicon.ico
├── img
│ ├── logo.png
│ └── logo.svg
└── js
│ ├── API.js
│ ├── MessageHandler.js
│ ├── components
│ ├── Notifications.jsx
│ └── WebSocket.jsx
│ ├── cookies.js
│ └── reconnecting-websocket.js
└── tools
├── check_app_environment.py
├── check_js_deps.sh
├── check_js_updates.sh
├── db_init.py
├── env_summary.py
├── fill_conf_values.py
├── junitxml_report.py
├── makefile_to_help.py
├── pip_install_requirements.py
├── setup_services.py
├── silent_monitor.py
├── status.py
├── supervisor_status.py
├── test_frontend.py
├── update_eslint.sh
└── watch_logs.py
/.eslintignore:
--------------------------------------------------------------------------------
1 | doc/*
2 | static/js/cookies.js
3 | static/js/reconnecting-websocket.js
4 |
--------------------------------------------------------------------------------
/.eslintrc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | env:
3 | browser: true
4 | extends: ["airbnb", "plugin:react/recommended"]
5 | plugins: ["react"]
6 |
7 | rules:
8 | import/no-unresolved: 0
9 | camelcase: off
10 | comma-dangle: [1, only-multiline]
11 | no-else-return: 0
12 | no-param-reassign: 0
13 | quotes: 0
14 | space-infix-ops: 0
15 | no-underscore-dangle: [2, { "allowAfterThis": true }]
16 | object-curly-newline: [2, { "consistent": true }]
17 | operator-linebreak: [1, after]
18 | jsx-a11y/click-events-have-key-events: 0
19 | jsx-a11y/label-has-associated-control: 0
20 | jsx-a11y/control-has-associated-label: 0
21 | jsx-a11y/no-static-element-interactions: 0
22 |
23 | settings:
24 | react:
25 | version: "17"
26 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | # See:
2 | #
3 | # https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes (E, W)
4 | # https://flake8.pycqa.org/en/latest/user/error-codes.html (F)
5 | # https://github.com/PyCQA/flake8-bugbear
6 | #
7 | # for error codes. And
8 | #
9 | # https://flake8.pycqa.org/en/latest/user/violations.html#selecting-violations-with-flake8
10 | #
11 | # for error classes selected below.
12 |
13 | [flake8]
14 | max-line-length = 80
15 | select = C,E,F,W,B,B950
16 | ignore = E501, W503, E203
17 |
--------------------------------------------------------------------------------
/.git-pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Based on https://gist.github.com/dahjelle/8ddedf0aebd488208a9a7c829f19b9e8
3 |
4 | for file in $(git diff --diff-filter=d --cached --name-only | grep -E '\.(js|jsx)$')
5 | do
6 | git show ":$file" | npx eslint -c .eslintrc.yaml --stdin --stdin-filename "$file" # we only want to lint the staged changes, not any un-staged changes
7 | if [ $? -ne 0 ]; then
8 | echo "ESLint failed on staged file '$file'. Please check your code and try again. You can run ESLint manually via make lint."
9 | exit 1 # exit with failure status
10 | fi
11 | done
12 |
13 | echo "-> Javascript code passed linter"
14 |
15 | UNPINNED=$(cat package.json | grep '\^\|~' | sed 's/,*$//g')
16 | if [[ -n $UNPINNED ]]; then
17 | echo "Some Javascript packages are not pinned to a specific version:"
18 | echo
19 | echo "$UNPINNED"
20 | echo
21 | echo Please pin their versions by, e.g., removing operators such as \~ and ^.
22 | exit 1
23 | else
24 | echo "-> Javascript versions correctly pinned"
25 | fi
26 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yaml:
--------------------------------------------------------------------------------
1 | name: Build and deploy documentation
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 |
9 | jobs:
10 | build-and-deploy-docs:
11 | runs-on: ubuntu-latest
12 | timeout-minutes: 10
13 |
14 | if: github.repository_owner == 'cesium-ml'
15 |
16 | steps:
17 | - uses: actions/cache@v4
18 | with:
19 | path: |
20 | ~/.cache/pip
21 | ~/.cache/sphinx
22 | ~/.ccache
23 | ~/.local
24 | ~/.npm
25 | key: ${{ runner.os }}-${{ hashFiles('**/lockfiles') }}
26 |
27 | - uses: actions/setup-python@v5
28 | with:
29 | python-version: "3.10"
30 |
31 | - uses: actions/checkout@v4
32 |
33 | - name: Install dependencies
34 | run: |
35 | python -m pip install --upgrade pip
36 | pip install -r requirements.docs.txt
37 |
38 | - name: Build docs
39 | run: |
40 | cd doc
41 | make html
42 | touch _build/html/.nojekyll
43 |
44 | - name: Install SSH Client 🔑
45 | if: github.ref == 'refs/heads/main'
46 | uses: webfactory/ssh-agent@v0.4.1
47 | with:
48 | ssh-private-key: ${{ secrets.CI_DEPLOY_KEY }}
49 |
50 | - name: Deploy docs
51 | if: github.ref == 'refs/heads/main'
52 | uses: JamesIves/github-pages-deploy-action@releases/v3
53 | with:
54 | FOLDER: doc/_build/html
55 | REPOSITORY_NAME: cesium-ml/baselayer
56 | BRANCH: gh-pages
57 | SSH: true
58 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yaml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on: [push, pull_request]
4 |
5 | permissions:
6 | contents: read
7 |
8 | jobs:
9 | pre-commit:
10 | name: pre-commit-hooks
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - uses: actions/setup-python@v5
15 | - uses: pre-commit/action@646c83fcd040023954eafda54b4db0192ce70507 # v3.0.0
16 |
--------------------------------------------------------------------------------
/.github/workflows/test.yaml:
--------------------------------------------------------------------------------
1 | name: Test Baselayer
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 |
9 | defaults:
10 | run:
11 | working-directory: ..
12 |
13 | jobs:
14 | test:
15 | name: Test Baselayer
16 | runs-on: ubuntu-latest
17 |
18 | services:
19 | postgres:
20 | image: postgres
21 | env:
22 | POSTGRES_USER: baselayer
23 | POSTGRES_PASSWORD: anything
24 | ports:
25 | - 5432:5432
26 | # needed because the postgres container does not provide a
27 | # healthcheck
28 | options: >-
29 | --health-cmd pg_isready
30 | --health-interval 10s
31 | --health-timeout 5s
32 | --health-retries 5
33 |
34 | steps:
35 | - uses: actions/setup-python@v5
36 | with:
37 | python-version: "3.10"
38 |
39 | - uses: actions/setup-node@v4
40 | with:
41 | node-version: 20
42 |
43 | - name: Checkout
44 | uses: actions/checkout@v4
45 |
46 | - name: Install template app
47 | run: |
48 | git clone https://github.com/cesium-ml/baselayer_template_app
49 | cp -rf baselayer baselayer_template_app/
50 |
51 | - uses: actions/cache@v4
52 | with:
53 | path: |
54 | ~/.npm
55 | key: ${{ runner.os }}-npm-${{ hashFiles('baselayer_template_app/package.json') }}
56 |
57 | - uses: actions/cache@v4
58 | with:
59 | path: |
60 | ~/.cache/pip
61 | key: ${{ runner.os }}-${{ hashFiles('**/requirements*.txt') }}
62 |
63 | - uses: browser-actions/setup-geckodriver@latest
64 | with:
65 | token: ${{ secrets.GITHUB_TOKEN }}
66 |
67 | - name: Install system dependencies
68 | run: |
69 | cd baselayer
70 |
71 | sudo apt update -y
72 |
73 | ### firefox installation
74 | sudo snap remove firefox
75 | sudo add-apt-repository ppa:mozillateam/ppa
76 | printf 'Package: *\nPin: release o=LP-PPA-mozillateam\nPin-Priority: 1001' | sudo tee /etc/apt/preferences.d/mozilla-firefox
77 |
78 | sudo apt install -y wget nodejs unzip firefox
79 |
80 | # if nginx is already installed, remove it
81 | sudo apt remove -y nginx nginx-common nginx-core nginx-full
82 | sudo apt purge -y nginx nginx-common nginx-core nginx-full
83 |
84 | # add the PPA repository with brotli support for nginx
85 | sudo add-apt-repository ppa:ondrej/nginx -y
86 | sudo apt update -y
87 | sudo apt install nginx libnginx-mod-http-brotli-static libnginx-mod-http-brotli-filter -y
88 |
89 |
90 | pip install --upgrade pip
91 | pip install wheel
92 |
93 | export NPM_PACKAGES="${HOME}/.npm-packages"
94 | export PATH=${NPM_PACKAGES}/bin:$PATH
95 | export NODE_PATH="$NPM_PACKAGES/lib/node_modules:$NODE_PATH"
96 |
97 | sudo npm -g install npm@latest
98 |
99 | which python; python --version
100 | echo npm $(npm --version)
101 | echo node $(node --version)
102 | nginx -v
103 | firefox --version
104 |
105 | - name: Install Geckodriver / Selenium
106 | run: |
107 | geckodriver --version
108 | pip install selenium==4.8.3
109 | python -c "import selenium; print(f'Selenium {selenium.__version__}')"
110 |
111 | - name: Write configuration & build DB
112 | run: |
113 | cd baselayer_template_app
114 |
115 | cat << EOF > config.yaml
116 | database:
117 | database: template_app
118 | user: baselayer
119 | host: localhost
120 | port: 5432
121 | password: anything
122 | EOF
123 |
124 | cat << EOF > test_config.yaml
125 | database:
126 | database: template_app_test
127 | user: baselayer
128 | host: localhost
129 | port: 5432
130 | password: anything
131 | EOF
132 |
133 | echo "localhost:5432:*:baselayer:anything" > ~/.pgpass
134 | chmod 600 ~/.pgpass
135 |
136 | createdb -h localhost -U baselayer template_app
137 | psql -U baselayer -h localhost -c "GRANT ALL PRIVILEGES ON DATABASE template_app TO baselayer;" template_app
138 |
139 | createdb -h localhost -U baselayer template_app_test
140 | psql -U baselayer -h localhost -c "GRANT ALL PRIVILEGES ON DATABASE template_app_test TO baselayer;" template_app_test
141 |
142 | make db_init
143 |
144 | - name: Test template app
145 | run: |
146 | cd baselayer_template_app
147 | make test_headless
148 |
149 | - name: Upload logs
150 | uses: actions/upload-artifact@v4
151 | if: ${{ always() }}
152 | with:
153 | name: logs
154 | path: baselayer_template_app/log
155 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .#*
2 | log/
3 | .cache
4 | static/build
5 | run/
6 | public/
7 | \#*
8 | *~
9 | *.swp
10 | *.swo
11 | .DS_Store
12 | __pycache__/
13 | node_modules/
14 | *.pyc
15 | doc/_build
16 |
17 | # These are built from templates
18 | conf/supervisor/supervisor.conf
19 | services/dask/supervisor.conf
20 | services/app/supervisor.conf
21 | services/nginx/nginx.conf
22 | conf/nginx.conf
23 |
--------------------------------------------------------------------------------
/.pep8speaks.yml:
--------------------------------------------------------------------------------
1 | scanner:
2 | diff_only: True # Errors caused by only the patch are shown, not the whole file
3 |
4 | pycodestyle:
5 | ignore: # Errors and warnings to ignore
6 | - W391 # blank line at the end of file
7 | - E203 # whitespace before ,;:
8 | - W503 # newline before binary operator
9 |
10 | no_blank_comment: True # If True, no comment is made when the bot does not find any pep8 errors
11 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.5.0
4 | hooks:
5 | - id: check-yaml
6 | - id: end-of-file-fixer
7 | - id: trailing-whitespace
8 | - repo: https://github.com/asottile/pyupgrade
9 | rev: v3.15.0
10 | hooks:
11 | - id: pyupgrade
12 | args: [--py38-plus]
13 | - repo: https://github.com/pycqa/isort
14 | rev: 5.12.0
15 | hooks:
16 | - id: isort
17 | name: isort (python)
18 | args: ["--profile", "black"]
19 | - repo: https://github.com/python/black
20 | rev: 23.12.0
21 | hooks:
22 | - id: black
23 | pass_filenames: true
24 | exclude: baselayer|node_modules|static
25 | - repo: https://github.com/pycqa/flake8
26 | rev: 6.1.0
27 | hooks:
28 | - id: flake8
29 | pass_filenames: true
30 | exclude: baselayer|node_modules|static|__init__.py
31 | - repo: https://github.com/pre-commit/mirrors-prettier
32 | rev: v3.1.0 # Use the sha or tag you want to point at
33 | hooks:
34 | - id: prettier
35 | pass_filenames: true
36 | - repo: https://github.com/pre-commit/mirrors-eslint
37 | rev: 4bfe10ad902d472399bcacf83cba41500542718a # v8.55.0
38 | hooks:
39 | - id: eslint
40 | additional_dependencies:
41 | # Keep up to date with baselayer_template_app/package.json
42 | - eslint@8.33.0
43 | - eslint-config-airbnb@18.2.0
44 | - eslint-plugin-import@2.22.1
45 | - eslint-plugin-jsx-a11y@6.3.1
46 | - eslint-plugin-react@7.22.0
47 | - eslint-plugin-react-hooks@4.2.0
48 | files: \.[j]sx?$
49 | types: [file]
50 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | SHELL = /bin/bash
2 | ESLINT=npx eslint
3 |
4 | .DEFAULT_GOAL := help
5 |
6 | # Use `config.yaml` by default, unless overridden by user
7 | # through setting FLAGS environment variable
8 | FLAGS:=$(if $(FLAGS),$(FLAGS),--config=config.yaml)
9 |
10 | PYTHON=PYTHONPATH=. python
11 | ENV_SUMMARY=$(PYTHON) baselayer/tools/env_summary.py $(FLAGS)
12 |
13 | # Flags are propagated to supervisord via the FLAGS environment variable
14 | # Inside of supervisord configuration files, you may reference them using
15 | # %(ENV_FLAGS)s
16 | SUPERVISORD_CFG=baselayer/conf/supervisor/supervisor.conf
17 | SUPERVISORD=$(PYTHON) -m supervisor.supervisord -s -c $(SUPERVISORD_CFG)
18 | SUPERVISORCTL=$(PYTHON) -m supervisor.supervisorctl -c $(SUPERVISORD_CFG)
19 |
20 | LOG=@$(PYTHON) -c "from baselayer.log import make_log; spl = make_log('baselayer'); spl('$1')"
21 |
22 | # Bold
23 | B=\033[1m
24 | # Normal
25 | N=\033[0m
26 |
27 | bundle = static/build/main.bundle.js
28 | rspack = npx rspack
29 |
30 | # NOTE: These targets are meant to be *included* in the parent app
31 | # Makefile. See end of this file for baselayer specific targets.
32 |
33 | .PHONY: clean dependencies db_init db_clear bundle bundle-watch paths
34 | .PHONY: fill_conf_values log run run_production run_testing monitor attach
35 | .PHONY: stop status test_headless test test_report check-js-updates lint-install
36 | .PHONY: lint lint-unix lint-githook baselayer_doc_reqs html
37 | .PHONY: system_setup service_setup
38 | .PHONY: $(bundle) bundle bundle-watch
39 |
40 | help:
41 | @python ./baselayer/tools/makefile_to_help.py $(MAKEFILE_LIST)
42 |
43 | dependencies: README.md
44 | @PYTHONPATH=. pip install packaging
45 | @baselayer/tools/check_app_environment.py
46 | @PYTHONPATH=. python baselayer/tools/pip_install_requirements.py baselayer/requirements.txt requirements.txt
47 | @./baselayer/tools/silent_monitor.py baselayer/tools/check_js_deps.sh
48 |
49 | db_init: ## Initialize database and models.
50 | db_init: dependencies
51 | @echo -e "\nInitializing database:"
52 | @PYTHONPATH=. baselayer/tools/db_init.py $(FLAGS)
53 |
54 | db_clear: ## Delete all data from the database.
55 | db_clear: dependencies
56 | @PYTHONPATH=. baselayer/tools/silent_monitor.py baselayer/tools/db_init.py -f $(FLAGS)
57 |
58 | $(bundle): rspack.config.js package.json
59 | @$(rspack)
60 |
61 | bundle: $(bundle)
62 |
63 | bundle-watch:
64 | $(rspack) -w
65 |
66 | paths:
67 | @mkdir -p log run tmp
68 | @mkdir -p ./log/sv_child
69 |
70 | fill_conf_values:
71 | @find -L . -name '[^.]*.template' | grep -v "node_modules" | PYTHONPATH=. xargs ./baselayer/tools/fill_conf_values.py $(FLAGS)
72 |
73 | system_setup: | paths dependencies fill_conf_values service_setup
74 |
75 | service_setup:
76 | @PYTHONPATH=. python ./baselayer/tools/setup_services.py $(FLAGS)
77 |
78 | log: ## Monitor log files for all services.
79 | log: paths
80 | @PYTHONPATH=. PYTHONUNBUFFERED=1 baselayer/tools/watch_logs.py
81 |
82 | run: ## Start the web application.
83 | run: FLAGS:=$(FLAGS) --debug
84 | run: system_setup
85 | @echo
86 | $(call LOG, Starting micro-services)
87 | @echo
88 | @echo " - Run \`make log\` in another terminal to view logs"
89 | @echo " - Run \`make monitor\` in another terminal to restart services"
90 | @echo
91 | @echo "The server is in debug mode:"
92 | @echo
93 | @echo " JavaScript and Python files will be reloaded upon change."
94 | @echo
95 | @export FLAGS="$(FLAGS)" && \
96 | $(ENV_SUMMARY) && echo && \
97 | echo "Press Ctrl-C to abort the server" && \
98 | echo && \
99 | $(SUPERVISORD)
100 |
101 | run_production: ## Run the web application in production mode (no dependency checking).
102 | run_production:
103 | @echo "[!] Production run: not automatically installing dependencies."
104 | @echo
105 | @export FLAGS="$(FLAGS)" && \
106 | $(ENV_SUMMARY) && \
107 | $(SUPERVISORD)
108 |
109 | run_testing: FLAGS=--config=test_config.yaml # both this and the next FLAGS definition are needed
110 | run_testing: system_setup
111 | @echo -e "\n$(B)[baselayer] Launch app for testing$(N)"
112 | @export FLAGS="$(FLAGS) --debug" && \
113 | $(ENV_SUMMARY) && \
114 | $(SUPERVISORD)
115 |
116 | monitor: ## Monitor microservice status.
117 | @echo "Entering supervisor control panel."
118 | @echo
119 | @echo " - Type \`status\` to see microservice status"
120 | @echo
121 | @$(SUPERVISORCTL) -i
122 |
123 | attach: ## Attach to terminal of running webserver; useful to, e.g., use pdb.
124 | @echo "Run the following, replacing NN with the process number, e.g. 00, 11, etc.:"
125 | @echo
126 | @echo "$(SUPERVISORCTL) fg app:app_NN"
127 |
128 | clean:
129 | rm -f $(bundle)
130 |
131 | stop: ## Stop all running services.
132 | $(SUPERVISORCTL) stop all
133 |
134 | status:
135 | @PYTHONPATH='.' ./baselayer/tools/supervisor_status.py
136 |
137 | test_headless: ## Run tests headlessly
138 | test_headless: system_setup
139 | @PYTHONPATH='.' baselayer/tools/test_frontend.py --headless --xml
140 |
141 | test: ## Run tests.
142 | test: system_setup
143 | @PYTHONPATH='.' ./baselayer/tools/test_frontend.py --xml
144 |
145 | test_report: ## Print report on failed tests
146 | test_report:
147 | @PYTHONPATH='.' baselayer/tools/junitxml_report.py test-results/junit.xml
148 |
149 | # Call this target to see which Javascript dependencies are not up to date
150 | check-js-updates:
151 | ./baselayer/tools/check_js_updates.sh
152 |
153 | # Lint targets
154 | lint-install: ## Install ESLint and a git pre-commit hook.
155 | lint-install: cp-lint-yaml lint-githook
156 | @echo "Installing latest version of ESLint and AirBNB style rules"
157 | @./baselayer/tools/update_eslint.sh
158 |
159 | cp-lint-yaml: ## Copy eslint config file to parent app if not present
160 | @if ! [ -e .eslintrc.yaml ]; then \
161 | echo "No ESLint configuration found; copying baselayer's version of .eslintrc.yaml"; \
162 | cp baselayer/.eslintrc.yaml .eslintrc.yaml; \
163 | fi
164 |
165 | $(ESLINT): lint-install
166 |
167 | lint: ## Check JavaScript code style.
168 | $(ESLINT) --ext .jsx,.js -c .eslintrc.yaml static/js
169 |
170 | lint-unix:
171 | $(ESLINT) --ext .jsx,.js -c .eslintrc.yaml --format=unix static/js
172 |
173 | lint-githook:
174 | @if ! [ -e .git/hooks/pre-commit ]; then \
175 | echo "Installing ESLint pre-commit hook into \`.git/hooks/pre-commit\`"; \
176 | cp baselayer/.git-pre-commit .git/hooks/pre-commit; \
177 | fi
178 |
179 | # baselayer-specific targets
180 | # All other targets are run from the parent app. The following are related to
181 | # baselayer itself, and will be run from the baselayer repo root.
182 |
183 | # Documentation targets, run from the `baselayer` directory
184 | baselayer_doc_reqs:
185 | pip install -q -r requirements.docs.txt
186 |
187 | baselayer_html: | baselayer_doc_reqs
188 | export SPHINXOPTS=-W; make -C doc html
189 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Baselayer
6 |
7 |
8 |
9 |
10 | A Scientific Web Application Template
11 |
12 |
13 | Baselayer is a "batteries included" web application template that includes:
14 |
15 | - a Tornado-based Python web application template to customize to your liking
16 | - WebSockets
17 | - JavaScript 6 compilation via Babel, with Redux & React frontend
18 | - Process management via supervisord
19 | - Proxy configuration via nginx
20 | - Authentication (currently using Google) via Python Social Auth
21 | - Distributed task computation, via `dask` and `distributed`
22 |
23 | Please clone and try our example application at
24 |
25 | https://github.com/cesium-ml/baselayer_template_app
26 |
27 | Read more at http://cesium-ml.org/baselayer/
28 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cesium-ml/baselayer/8b2fae5229c6af08a6beca992ce2f60dfc64303a/__init__.py
--------------------------------------------------------------------------------
/app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cesium-ml/baselayer/8b2fae5229c6af08a6beca992ce2f60dfc64303a/app/__init__.py
--------------------------------------------------------------------------------
/app/access.py:
--------------------------------------------------------------------------------
1 | import functools
2 |
3 | import sqlalchemy as sa
4 | import tornado.web
5 | from sqlalchemy.orm import joinedload
6 |
7 | from baselayer.app.custom_exceptions import AccessError # noqa: F401
8 | from baselayer.app.models import DBSession, Role, Token, User # noqa: F401
9 |
10 |
11 | def auth_or_token(method):
12 | """Ensure that a user is signed in.
13 |
14 | This is a decorator for Tornado handler `get`, `put`, etc. methods.
15 |
16 | Signing in happens via the login page, or by using an auth token.
17 | To use an auth token, the `Authorization` header has to be
18 | provided, and has to be of the form `token 123efghj`. E.g.:
19 |
20 | $ curl -v -H "Authorization: token 123efghj" http://localhost:5000/api/endpoint
21 |
22 | """
23 |
24 | @functools.wraps(method)
25 | def wrapper(self, *args, **kwargs):
26 | token_header = self.request.headers.get("Authorization", None)
27 | if token_header is not None and token_header.startswith("token "):
28 | token_id = token_header.replace("token", "").strip()
29 | with DBSession() as session:
30 | token = session.scalars(
31 | sa.select(Token)
32 | .options(
33 | joinedload(Token.created_by).options(
34 | joinedload(User.acls),
35 | joinedload(User.roles),
36 | )
37 | )
38 | .where(Token.id == token_id)
39 | ).first()
40 | if token is not None:
41 | self.current_user = token
42 | if not token.created_by.is_active():
43 | raise tornado.web.HTTPError(403, "User account expired")
44 | else:
45 | raise tornado.web.HTTPError(401)
46 | return method(self, *args, **kwargs)
47 | else:
48 | if self.current_user is not None:
49 | if not self.current_user.is_active():
50 | raise tornado.web.HTTPError(403, "User account expired")
51 | else:
52 | raise tornado.web.HTTPError(
53 | 401,
54 | 'Credentials malformed; expected form "Authorization: token abc123"',
55 | )
56 | return tornado.web.authenticated(method)(self, *args, **kwargs)
57 |
58 | wrapper.__authenticated__ = True
59 | return wrapper
60 |
61 |
62 | def permissions(acl_list):
63 | """Decorate methods with this to require that the current user have all the
64 | specified ACLs.
65 | """
66 |
67 | def check_acls(method):
68 | @auth_or_token
69 | @functools.wraps(method)
70 | def wrapper(self, *args, **kwargs):
71 | if not (
72 | set(acl_list).issubset(self.current_user.permissions)
73 | or "System admin" in self.current_user.permissions
74 | ):
75 | raise tornado.web.HTTPError(401)
76 | return method(self, *args, **kwargs)
77 |
78 | wrapper.__permissions__ = acl_list
79 | return wrapper
80 |
81 | return check_acls
82 |
--------------------------------------------------------------------------------
/app/app_server.py:
--------------------------------------------------------------------------------
1 | import tornado.web
2 |
3 | from .env import load_env
4 | from .handlers import (
5 | AuthHandler,
6 | CompleteHandler,
7 | DisconnectHandler,
8 | LogoutHandler,
9 | MainPageHandler,
10 | ProfileHandler,
11 | SocketAuthTokenHandler,
12 | )
13 |
14 | env, cfg = load_env()
15 |
16 |
17 | # Tornado settings
18 | settings = {
19 | "template_path": "./static",
20 | "login_url": "/",
21 | # Python Social Auth configuration
22 | "SOCIAL_AUTH_USER_MODEL": "baselayer.app.models.User",
23 | "SOCIAL_AUTH_STORAGE": "baselayer.app.psa.TornadoStorage",
24 | "SOCIAL_AUTH_STRATEGY": "baselayer.app.psa.TornadoStrategy",
25 | "SOCIAL_AUTH_AUTHENTICATION_BACKENDS": (
26 | "social_core.backends.google.GoogleOAuth2",
27 | ),
28 | "SOCIAL_AUTH_LOGIN_URL": "/",
29 | "SOCIAL_AUTH_LOGIN_REDIRECT_URL": "/", # on success
30 | "SOCIAL_AUTH_LOGIN_ERROR_URL": "/login-error/",
31 | "SOCIAL_AUTH_USER_FIELDS": ["username"],
32 | "SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL": True,
33 | "SOCIAL_AUTH_SESSION_EXPIRATION": True,
34 | "SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": cfg["server.auth.google_oauth2_key"],
35 | "SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": cfg["server.auth.google_oauth2_secret"],
36 | "SOCIAL_AUTH_REDIRECT_IS_HTTPS": cfg["server.ssl"],
37 | "SOCIAL_AUTH_URLOPEN_TIMEOUT": cfg["server.auth.google_oauth2_timeout"],
38 | }
39 |
40 | if cfg["server.auth.debug_login"]:
41 | settings["SOCIAL_AUTH_AUTHENTICATION_BACKENDS"] = (
42 | "baselayer.app.psa.FakeGoogleOAuth2",
43 | )
44 |
45 | SOCIAL_AUTH_ROUTES = [
46 | tornado.web.url(r"/login/(?P[^/]+)/?", AuthHandler, name="begin"),
47 | tornado.web.url(r"/complete/(?P[^/]+)/", CompleteHandler, name="complete"),
48 | tornado.web.url(
49 | r"/disconnect/(?P[^/]+)/?", DisconnectHandler, name="disconnect"
50 | ),
51 | tornado.web.url(
52 | r"/disconnect/(?P[^/]+)/(?P\d+)/?",
53 | DisconnectHandler,
54 | name="disconnect_individual",
55 | ),
56 | ]
57 |
58 | handlers = SOCIAL_AUTH_ROUTES + [
59 | (r"/baselayer/socket_auth_token", SocketAuthTokenHandler),
60 | (r"/baselayer/profile", ProfileHandler),
61 | (r"/baselayer/logout", LogoutHandler),
62 | (r"/()", MainPageHandler),
63 | (r"/static/(.*)", tornado.web.StaticFileHandler, {"path": "static/"}),
64 | (r"/(favicon.png)", tornado.web.StaticFileHandler, {"path": "static/"}),
65 | ]
66 |
--------------------------------------------------------------------------------
/app/config.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import os
3 | from pathlib import Path
4 |
5 | import yaml
6 |
7 | from ..log import make_log
8 |
9 | log = make_log("baselayer")
10 |
11 |
12 | def recursive_update(d, u):
13 | # Based on https://stackoverflow.com/a/3233356/214686
14 | for k, v in u.items():
15 | if isinstance(v, collections.abc.Mapping):
16 | r = recursive_update(d.get(k, {}) or {}, v)
17 | d[k] = r
18 | else:
19 | d[k] = u[k]
20 | return d
21 |
22 |
23 | def relative_to(path, root):
24 | p = Path(path)
25 | try:
26 | return p.relative_to(root)
27 | except ValueError:
28 | return p
29 |
30 |
31 | class Config(dict):
32 | """To simplify access, the configuration allows fetching nested
33 | keys separated by a period `.`, e.g.:
34 |
35 | >>> cfg['app.db']
36 |
37 | is equivalent to
38 |
39 | >>> cfg['app']['db']
40 |
41 | """
42 |
43 | def __init__(self, config_files=None):
44 | dict.__init__(self)
45 | if config_files is not None:
46 | cwd = os.getcwd()
47 | config_names = [relative_to(c, cwd) for c in config_files]
48 | print(f" Config files: {config_names[0]}")
49 | for f in config_names[1:]:
50 | print(f" {f}")
51 | self["config_files"] = config_files
52 | for f in config_files:
53 | self.update_from(f)
54 |
55 | def update_from(self, filename):
56 | """Update configuration from YAML file"""
57 | if os.path.isfile(filename):
58 | more_cfg = yaml.full_load(open(filename))
59 | recursive_update(self, more_cfg)
60 |
61 | def __getitem__(self, key):
62 | keys = key.split(".")
63 |
64 | val = self
65 | for key in keys:
66 | if isinstance(val, dict):
67 | val = dict.__getitem__(val, key)
68 | else:
69 | raise KeyError(key)
70 |
71 | return val
72 |
73 | def get(self, key, default=None, /):
74 | try:
75 | return self.__getitem__(key)
76 | except KeyError:
77 | return default
78 |
79 | def show(self):
80 | """Print configuration"""
81 | print()
82 | print("=" * 78)
83 | print("Configuration")
84 | for key in self:
85 | print("-" * 78)
86 | print(key)
87 |
88 | if isinstance(self[key], dict):
89 | for key, val in self[key].items():
90 | print(" ", key.ljust(30), val)
91 |
92 | print("=" * 78)
93 |
94 |
95 | def load_config(config_files=[]):
96 | basedir = Path(os.path.dirname(__file__)) / ".."
97 | missing = [cfg for cfg in config_files if not os.path.isfile(cfg)]
98 | if missing:
99 | log(f'Missing config files: {", ".join(missing)}; continuing.')
100 | if "config.yaml" in missing:
101 | log(
102 | "Warning: You are running on the default configuration. To configure your system, "
103 | "please copy `config.yaml.defaults` to `config.yaml` and modify it as you see fit."
104 | )
105 |
106 | # Always load the default configuration values first, and override
107 | # with values in user configuration files
108 | all_configs = [
109 | Path(basedir / "config.yaml.defaults"),
110 | Path(basedir / "../config.yaml.defaults"),
111 | ] + config_files
112 | all_configs = [cfg for cfg in all_configs if os.path.exists(os.path.normpath(cfg))]
113 | all_configs = [os.path.abspath(Path(c).absolute()) for c in all_configs]
114 |
115 | cfg = Config(all_configs)
116 |
117 | return cfg
118 |
--------------------------------------------------------------------------------
/app/custom_exceptions.py:
--------------------------------------------------------------------------------
1 | import tornado.web
2 |
3 |
4 | class AccessError(tornado.web.HTTPError):
5 | def __init__(self, reason, status_code=400):
6 | tornado.web.HTTPError.__init__(self, reason=reason, status_code=400)
7 |
8 | def __str__(self):
9 | return self.reason
10 |
--------------------------------------------------------------------------------
/app/env.py:
--------------------------------------------------------------------------------
1 | """
2 | Parse environment flags, and load the app configuration.
3 | """
4 |
5 | import argparse
6 | import textwrap
7 |
8 | from .config import load_config
9 |
10 | # Cache loading of environment
11 | _cache = {}
12 |
13 | parser = argparse.ArgumentParser(description="Launch web app")
14 | parser.add_argument("-C", "--config", action="append")
15 | parser.add_argument("--debug", action="store_true")
16 |
17 |
18 | def load_env():
19 | """Parse environment and load configuration.
20 |
21 | The configuration is loaded only once per session. When invoked a
22 | second time, it returns a cached result.
23 |
24 | Environment variables supported:
25 |
26 | --config Additional configuration files to load, over and above the
27 | default `baselayer/config.yaml.defaults`
28 | and `./config.yaml.defaults`). Can be specified multiple times.
29 |
30 | --debug In Debug mode:
31 | a) Tornado reloads files automatically that change from disk.
32 | b) SQLAlchemy logs more verbosely to the logs.
33 |
34 | """
35 | if not _cache:
36 | env, unknown = parser.parse_known_args()
37 | cfg = load_config(config_files=env.config or [])
38 |
39 | _cache.update({"file": env.config, "env": env, "cfg": cfg})
40 |
41 | # Prohibit more arguments from being added on after config has
42 | # been loaded
43 | def no_more_args(cls, *args, **kwargs):
44 | raise RuntimeError(
45 | textwrap.dedent(
46 | """
47 | Trying to add argument after `load_env` has already been called.
48 | This typically happens when one of your imports calls
49 | `load_env`. To avoid this error, move your imports until after
50 | adding new arguments to the parser.
51 | """
52 | )
53 | )
54 |
55 | parser.add_argument = no_more_args
56 |
57 | return _cache["env"], _cache["cfg"]
58 |
--------------------------------------------------------------------------------
/app/flow.py:
--------------------------------------------------------------------------------
1 | import zmq
2 |
3 | from ..log import make_log
4 | from .env import load_env
5 | from .json_util import to_json
6 |
7 | env, cfg = load_env()
8 | log = make_log("flow")
9 |
10 |
11 | class Flow:
12 | """Send messages through websocket to frontend"""
13 |
14 | def __init__(self, socket_path=cfg["ports.websocket_path_in"]):
15 | self._socket_path = socket_path
16 | self._ctx = zmq.Context.instance()
17 | self._bus = self._ctx.socket(zmq.PUSH)
18 | self._bus.connect(self._socket_path)
19 |
20 | def push(self, user_id, action_type, payload={}):
21 | """Push action to specified user over websocket.
22 |
23 | Parameters
24 | ----------
25 | user_id : int or str
26 | User to push websocket message to. If '*', target all users.
27 | action_type : str
28 | Action label for the message; a string identifier used by
29 | the frontend to distinguish between different types of
30 | messages. Example: `cesium/RELOAD_FRONTPAGE`.
31 | payload : dict
32 | Payload forwarded to the frontend. This may contain small
33 | pieces of data. Larger result sets should be fetched via
34 | an API call.
35 |
36 | """
37 | log(f"Pushing action {action_type} to user {user_id}")
38 | message = [
39 | str(user_id),
40 | to_json(
41 | {"user_id": user_id, "actionType": action_type, "payload": payload}
42 | ),
43 | ]
44 | self._bus.send_multipart([m.encode("utf-8") for m in message])
45 |
--------------------------------------------------------------------------------
/app/handlers/__init__.py:
--------------------------------------------------------------------------------
1 | from ..custom_exceptions import AccessError
2 | from .auth import AuthHandler, CompleteHandler, DisconnectHandler
3 | from .base import BaseHandler
4 | from .mainpage import MainPageHandler
5 | from .profile import LogoutHandler, ProfileHandler
6 | from .socket_auth import SocketAuthTokenHandler
7 |
--------------------------------------------------------------------------------
/app/handlers/auth.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 |
3 | from social_core.actions import do_auth, do_complete, do_disconnect
4 | from social_core.backends.utils import get_backend
5 | from social_core.utils import get_strategy, setting_name
6 |
7 | from baselayer.app.handlers.base import BaseHandler
8 |
9 | DEFAULTS = {
10 | "STORAGE": "baselayer.app.psa.TornadoStorage",
11 | "STRATEGY": "baselayer.app.psa.TornadoStrategy",
12 | }
13 |
14 |
15 | def get_helper(request_handler, name):
16 | return request_handler.settings.get(setting_name(name), DEFAULTS.get(name, None))
17 |
18 |
19 | def load_strategy(request_handler):
20 | strategy = get_helper(request_handler, "STRATEGY")
21 | storage = get_helper(request_handler, "STORAGE")
22 | return get_strategy(strategy, storage, request_handler)
23 |
24 |
25 | def load_backend(request_handler, strategy, name, redirect_uri):
26 | backends = get_helper(request_handler, "AUTHENTICATION_BACKENDS")
27 | Backend = get_backend(backends, name)
28 | return Backend(strategy, redirect_uri)
29 |
30 |
31 | def psa(redirect_uri=None):
32 | def decorator(func):
33 | @wraps(func)
34 | def wrapper(self, backend, *args, **kwargs):
35 | uri = redirect_uri
36 | if uri and not uri.startswith("/"):
37 | uri = self.reverse_url(uri, backend)
38 | self.strategy = load_strategy(self)
39 | self.backend = load_backend(self, self.strategy, backend, uri)
40 | return func(self, backend, *args, **kwargs)
41 |
42 | return wrapper
43 |
44 | return decorator
45 |
46 |
47 | class AuthHandler(BaseHandler):
48 | def get(self, backend):
49 | self._auth(backend)
50 |
51 | def post(self, backend):
52 | self._auth(backend)
53 |
54 | @psa("complete")
55 | def _auth(self, backend):
56 | do_auth(self.backend)
57 |
58 |
59 | class CompleteHandler(BaseHandler):
60 | def get(self, backend):
61 | self._complete(backend)
62 |
63 | def post(self, backend):
64 | self._complete(backend)
65 |
66 | @psa("complete")
67 | def _complete(self, backend):
68 | do_complete(
69 | self.backend,
70 | login=lambda backend, user, social_user: self.login_user(user),
71 | user=self.get_current_user(),
72 | )
73 |
74 |
75 | class DisconnectHandler(BaseHandler):
76 | def post(self):
77 | do_disconnect()
78 |
--------------------------------------------------------------------------------
/app/handlers/base.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | from contextlib import contextmanager
4 | from json.decoder import JSONDecodeError
5 |
6 | # The Python Social Auth base handler gives us:
7 | # user_id, get_current_user, login_user
8 | #
9 | # `get_current_user` is needed by tornado.authentication,
10 | # and provides a cached version, `current_user`, that should
11 | # be used to look up the logged in user.
12 | import sqlalchemy
13 | import tornado.escape
14 | from tornado.log import app_log
15 | from tornado.web import RequestHandler
16 |
17 | from ...log import make_log
18 |
19 | # Initialize PSA tornado models
20 | from .. import psa
21 | from ..custom_exceptions import AccessError
22 | from ..env import load_env
23 | from ..flow import Flow
24 | from ..json_util import to_json
25 | from ..models import DBSession, User, VerifiedSession, bulk_verify, session_context_id
26 |
27 | env, cfg = load_env()
28 | log = make_log("basehandler")
29 |
30 | # Python Social Auth documentation:
31 | # https://python-social-auth.readthedocs.io/en/latest/backends/implementation.html#auth-apis
32 |
33 |
34 | class NoValue:
35 | pass
36 |
37 |
38 | class PSABaseHandler(RequestHandler):
39 | """
40 | Mixin used by Python Social Auth
41 | """
42 |
43 | def user_id(self):
44 | return self.get_secure_cookie("user_id")
45 |
46 | def get_current_user(self):
47 | if self.user_id() is None:
48 | return
49 | user_id = int(self.user_id())
50 | oauth_uid = self.get_secure_cookie("user_oauth_uid")
51 | if user_id and oauth_uid:
52 | with DBSession() as session:
53 | try:
54 | user = session.scalars(
55 | sqlalchemy.select(User).where(User.id == user_id)
56 | ).first()
57 | if user is None:
58 | return None
59 | sa = session.scalars(
60 | sqlalchemy.select(psa.TornadoStorage.user).where(
61 | psa.TornadoStorage.user.user_id == user_id
62 | )
63 | ).first()
64 | if sa is None:
65 | # No SocialAuth entry; probably machine generated user
66 | return user
67 | if sa.uid.encode("utf-8") == oauth_uid:
68 | return user
69 | except Exception as e:
70 | session.rollback()
71 | log(f"Could not get current user: {e}")
72 | return None
73 | else:
74 | return None
75 |
76 | def login_user(self, user):
77 | with DBSession() as session:
78 | try:
79 | self.set_secure_cookie("user_id", str(user.id))
80 | user = session.scalars(
81 | sqlalchemy.select(User).where(User.id == user.id)
82 | ).first()
83 | if user is None:
84 | return
85 | sa = session.scalars(
86 | sqlalchemy.select(psa.TornadoStorage.user).where(
87 | psa.TornadoStorage.user.user_id == user.id
88 | )
89 | ).first()
90 | if sa is not None:
91 | self.set_secure_cookie("user_oauth_uid", sa.uid)
92 | except Exception as e:
93 | session.rollback()
94 | log(f"Could not login user: {e}")
95 |
96 | def write_error(self, status_code, exc_info=None):
97 | if exc_info is not None:
98 | err_cls, err, traceback = exc_info
99 | else:
100 | err = "An unknown error occurred"
101 | self.render("loginerror.html", app=cfg["app"], error_message=str(err))
102 |
103 | def log_exception(self, typ=None, value=None, tb=None):
104 | expected_exceptions = [
105 | "Authentication Error:",
106 | "User account expired",
107 | "Credentials malformed",
108 | "Method Not Allowed",
109 | "Unauthorized",
110 | ]
111 | v_str = str(value)
112 | if any(exception in v_str for exception in expected_exceptions):
113 | log(f"Error response returned by [{self.request.path}]: [{v_str}]")
114 | else:
115 | app_log.error(
116 | "Uncaught exception %s\n%r",
117 | self._request_summary(),
118 | self.request,
119 | exc_info=(typ, value, tb),
120 | )
121 |
122 | def on_finish(self):
123 | DBSession.remove()
124 |
125 |
126 | class BaseHandler(PSABaseHandler):
127 | @contextmanager
128 | def Session(self):
129 | """
130 | Generate a scoped session that also has knowledge
131 | of the current user, so when commit() is called on it
132 | it will also verify that all rows being committed
133 | are accessible to the user.
134 | The current user is taken from the handler's `current_user`.
135 | This is a shortcut method to `models.Session`
136 | that saves the need to manually input the user object.
137 |
138 | Parameters
139 | ----------
140 | verify : boolean
141 | if True (default), will call the functions
142 | `verify()` and whenever `commit()` is called.
143 |
144 | Returns
145 | -------
146 | A scoped session object that can be used in a context
147 | manager to access the database. If auto verify is enabled,
148 | will use the current user given to apply verification
149 | before every commit.
150 |
151 | """
152 | with VerifiedSession(self.current_user) as session:
153 | # must merge the user object with the current session
154 | # ref: https://docs.sqlalchemy.org/en/14/orm/session_basics.html#adding-new-or-existing-items
155 | session.add(self.current_user)
156 | session.bind = DBSession.session_factory.kw["bind"]
157 | yield session
158 |
159 | def verify_permissions(self):
160 | """Check that the current user has permission to create, read,
161 | update, or delete rows that are present in the session. If not,
162 | raise an AccessError (causing the transaction to fail and the API to
163 | respond with 401).
164 | """
165 |
166 | # get items to be inserted
167 | new_rows = [row for row in DBSession().new]
168 |
169 | # get items to be updated
170 | updated_rows = [
171 | row for row in DBSession().dirty if DBSession().is_modified(row)
172 | ]
173 |
174 | # get items to be deleted
175 | deleted_rows = [row for row in DBSession().deleted]
176 |
177 | # get items that were read
178 | read_rows = [
179 | row
180 | for row in set(DBSession().identity_map.values())
181 | - (set(updated_rows) | set(new_rows) | set(deleted_rows))
182 | ]
183 |
184 | # need to check delete permissions before flushing, as deleted records
185 | # are not present in the transaction after flush (thus can't be used in
186 | # joins). Read permissions can be checked here or below as they do not
187 | # change on flush.
188 | for mode, collection in zip(
189 | ["read", "update", "delete"],
190 | [read_rows, updated_rows, deleted_rows],
191 | ):
192 | bulk_verify(mode, collection, self.current_user)
193 |
194 | # update transaction state in DB, but don't commit yet. this updates
195 | # or adds rows in the database and uses their new state in joins,
196 | # for permissions checking purposes.
197 | DBSession().flush()
198 | bulk_verify("create", new_rows, self.current_user)
199 |
200 | def verify_and_commit(self):
201 | """Verify permissions on the current database session and commit if
202 | successful, otherwise raise an AccessError.
203 | """
204 | self.verify_permissions()
205 | DBSession().commit()
206 |
207 | def prepare(self):
208 | self.cfg = self.application.cfg
209 | self.flow = Flow()
210 | session_context_id.set(uuid.uuid4().hex)
211 |
212 | # Remove slash prefixes from arguments
213 | if self.path_args:
214 | self.path_args = [
215 | arg.lstrip("/") if arg is not None else None for arg in self.path_args
216 | ]
217 | self.path_args = [arg if (arg != "") else None for arg in self.path_args]
218 |
219 | # If there are no arguments, make it explicit, otherwise
220 | # get / post / put / delete all have to accept an optional kwd argument
221 | if len(self.path_args) == 1 and self.path_args[0] is None:
222 | self.path_args = []
223 |
224 | # TODO Refactor to be a context manager or utility function
225 | N = 5
226 | for i in range(1, N + 1):
227 | try:
228 | assert DBSession.session_factory.kw["bind"] is not None
229 | except Exception as e:
230 | if i == N:
231 | raise e
232 | else:
233 | log("Error connecting to database, sleeping for a while")
234 | time.sleep(5)
235 |
236 | return super().prepare()
237 |
238 | def push(self, action, payload={}):
239 | """Broadcast a message to current frontend user.
240 |
241 | Parameters
242 | ----------
243 | action : str
244 | Name of frontend action to perform after API success. This action
245 | is sent to the frontend over WebSocket.
246 | payload : dict, optional
247 | Action payload. This data accompanies the action string
248 | to the frontend.
249 | """
250 | # Don't push messages if current user is a token
251 | if hasattr(self.current_user, "username"):
252 | self.flow.push(self.current_user.id, action, payload)
253 |
254 | def push_all(self, action, payload={}):
255 | """Broadcast a message to all frontend users.
256 |
257 | Use this functionality with care for two reasons:
258 |
259 | - It emits many messages, and if those messages trigger a response from
260 | frontends, it can result in many incoming API requests
261 | - Any information included in the message will be seen by everyone; and
262 | everyone will know it was sent. Do not, e.g., send around a message
263 | saying "secret object XYZ was updated; fetch the latest version".
264 | Even though the user won't be able to fetch the object, they'll
265 | know that it exists, and that it was modified.
266 |
267 | Parameters
268 | ----------
269 | action : str
270 | Name of frontend action to perform after API success. This action
271 | is sent to the frontend over WebSocket.
272 | payload : dict, optional
273 | Action payload. This data accompanies the action string
274 | to the frontend.
275 | """
276 | self.flow.push("*", action, payload=payload)
277 |
278 | def get_json(self):
279 | if len(self.request.body) == 0:
280 | return {}
281 | try:
282 | json = tornado.escape.json_decode(self.request.body)
283 | if not isinstance(json, dict):
284 | raise Exception("Please ensure posted data is of type application/json")
285 | return json
286 | except JSONDecodeError:
287 | raise Exception(
288 | f"JSON decode of request body failed on {self.request.uri}."
289 | " Please ensure all requests are of type application/json."
290 | )
291 |
292 | def error(self, message, data={}, status=400, extra={}):
293 | """Push an error message to the frontend via WebSocket connection.
294 |
295 | The return JSON has the following format::
296 |
297 | {
298 | "status": "error",
299 | "data": ...,
300 | ...extra...
301 | }
302 |
303 | Parameters
304 | ----------
305 | message : str
306 | Description of the error.
307 | data : dict, optional
308 | Any data to be included with error message.
309 | status : int, optional
310 | HTTP status code. Defaults to 400 (bad request).
311 | See https://www.restapitutorial.com/httpstatuscodes.html for a full
312 | list.
313 | extra : dict
314 | Extra fields to be included in the response.
315 | """
316 | self.set_header("Content-Type", "application/json")
317 | self.set_status(status)
318 | self.write({"status": "error", "message": message, "data": data, **extra})
319 |
320 | def action(self, action, payload={}):
321 | """Push an action to the frontend via WebSocket connection.
322 |
323 | Parameters
324 | ----------
325 | action : str
326 | Name of frontend action to perform after API success. This action
327 | is sent to the frontend over WebSocket.
328 | payload : dict, optional
329 | Action payload. This data accompanies the action string
330 | to the frontend.
331 | """
332 | self.push(action, payload)
333 |
334 | def success(self, data={}, action=None, payload={}, status=200, extra={}):
335 | """Write data and send actions on API success.
336 |
337 | The return JSON has the following format::
338 |
339 | {
340 | "status": "success",
341 | "data": ...,
342 | ...extra...
343 | }
344 |
345 | Parameters
346 | ----------
347 | data : dict, optional
348 | The JSON returned by the API call in the `data` field.
349 | action : str, optional
350 | Name of frontend action to perform after API success. This action
351 | is sent to the frontend over WebSocket.
352 | payload : dict, optional
353 | Action payload. This data accompanies the action string
354 | to the frontend.
355 | status : int, optional
356 | HTTP status code. Defaults to 200 (OK).
357 | See https://www.restapitutorial.com/httpstatuscodes.html for a full
358 | list.
359 | extra : dict
360 | Extra fields to be included in the response.
361 | """
362 | if action is not None:
363 | self.action(action, payload)
364 |
365 | self.set_header("Content-Type", "application/json")
366 | self.set_status(status)
367 | self.write(to_json({"status": "success", "data": data, **extra}))
368 |
369 | def write_error(self, status_code, exc_info=None):
370 | if exc_info is not None:
371 | err_cls, err, traceback = exc_info
372 | if isinstance(err_cls, AccessError):
373 | status_code = 401
374 | else:
375 | err = "An unknown error occurred"
376 |
377 | self.error(str(err), status=status_code)
378 |
379 | async def _get_client(self, timeout=5):
380 | IP = "127.0.0.1"
381 | PORT_SCHEDULER = self.cfg["ports.dask"]
382 |
383 | from distributed import Client
384 |
385 | client = await Client(
386 | f"{IP}:{PORT_SCHEDULER}", asynchronous=True, timeout=timeout
387 | )
388 |
389 | return client
390 |
391 | def push_notification(self, note, notification_type="info"):
392 | self.push(
393 | action="baselayer/SHOW_NOTIFICATION",
394 | payload={"note": note, "type": notification_type},
395 | )
396 |
397 | def get_query_argument(self, value, default=NoValue, **kwargs):
398 | if default != NoValue:
399 | kwargs["default"] = default
400 | arg = super().get_query_argument(value, **kwargs)
401 | if type(kwargs.get("default", None)) == bool:
402 | arg = str(arg).lower() in ["true", "yes", "t", "1"]
403 | return arg
404 |
--------------------------------------------------------------------------------
/app/handlers/mainpage.py:
--------------------------------------------------------------------------------
1 | from baselayer.app.handlers.base import BaseHandler
2 |
3 |
4 | class MainPageHandler(BaseHandler):
5 | def get(self):
6 | if not self.current_user:
7 | self.render("login.html")
8 | else:
9 | self.render("index.html")
10 |
--------------------------------------------------------------------------------
/app/handlers/profile.py:
--------------------------------------------------------------------------------
1 | import tornado.web
2 |
3 | from baselayer.app.handlers.base import BaseHandler
4 |
5 |
6 | class ProfileHandler(BaseHandler):
7 | @tornado.web.authenticated
8 | def get(self):
9 | return self.success({"username": self.current_user.username})
10 |
11 |
12 | class LogoutHandler(BaseHandler):
13 | @tornado.web.authenticated
14 | def get(self):
15 | self.clear_cookie("user_id")
16 | self.redirect("/")
17 |
--------------------------------------------------------------------------------
/app/handlers/socket_auth.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | import jwt
4 | import tornado.web
5 |
6 | from baselayer.app.handlers.base import BaseHandler
7 |
8 | # !!!
9 | # This API call should **only be callable by logged in users**
10 | # !!!
11 |
12 |
13 | class SocketAuthTokenHandler(BaseHandler):
14 | @tornado.web.authenticated
15 | def get(self):
16 | user = self.current_user
17 | if user is None:
18 | raise RuntimeError(
19 | "No current user while authenticating socket. "
20 | "This should NEVER happen."
21 | )
22 |
23 | secret = self.cfg["app.secret_key"]
24 | token = jwt.encode(
25 | {
26 | "exp": datetime.datetime.utcnow() + datetime.timedelta(minutes=15),
27 | "user_id": str(user.id),
28 | },
29 | secret,
30 | )
31 | self.success({"token": token})
32 |
--------------------------------------------------------------------------------
/app/json_util.py:
--------------------------------------------------------------------------------
1 | from datetime import date, datetime
2 |
3 | import simplejson as json
4 | from arrow.arrow import Arrow
5 | from sqlalchemy_utils import PhoneNumber
6 |
7 | data_types = {
8 | int: "int",
9 | float: "float",
10 | bool: "bool",
11 | dict: "dict",
12 | str: "str",
13 | list: "list",
14 | }
15 |
16 |
17 | class Encoder(json.JSONEncoder):
18 | """Extends json.JSONEncoder with additional capabilities/configurations."""
19 |
20 | def default(self, o):
21 | if isinstance(o, (datetime, Arrow, date)):
22 | return o.isoformat()
23 |
24 | elif isinstance(o, bytes):
25 | return o.decode("utf-8")
26 |
27 | elif hasattr(o, "__table__"): # SQLAlchemy model
28 | return o.to_dict()
29 |
30 | elif o is int:
31 | return "int"
32 |
33 | elif o is float:
34 | return "float"
35 |
36 | elif type(o).__name__ == "ndarray": # avoid numpy import
37 | return o.tolist()
38 |
39 | elif type(o).__name__ == "DataFrame": # avoid pandas import
40 | o.columns = o.columns.droplevel("channel") # flatten MultiIndex
41 | return o.to_dict(orient="index")
42 |
43 | elif isinstance(o, PhoneNumber):
44 | return o.e164
45 |
46 | elif o.__class__.__name__ == "ObjectId":
47 | return str(o)
48 |
49 | elif type(o) is type and o in data_types:
50 | return data_types[o]
51 |
52 | return json.JSONEncoder.default(self, o)
53 |
54 |
55 | def to_json(obj, **kwargs):
56 | indent = kwargs.pop("indent", 2)
57 | return json.dumps(obj, cls=Encoder, indent=indent, ignore_nan=True, **kwargs)
58 |
--------------------------------------------------------------------------------
/app/model_util.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import sqlalchemy as sa
4 |
5 | from baselayer.app import models
6 |
7 | # Do not remove this "unused" import; it is required for
8 | # psa to initialize the Tornado models
9 | from . import psa # noqa: F401
10 |
11 |
12 | def drop_tables():
13 | conn = models.DBSession.session_factory.kw["bind"]
14 | print(f"Dropping tables on database {conn.url.database}")
15 | meta = sa.MetaData()
16 | meta.reflect(bind=conn)
17 | meta.drop_all(bind=conn)
18 |
19 |
20 | def create_tables(retry=5, add=True):
21 | """Create tables for all models, retrying 5 times at intervals of 3
22 | seconds if the database is not reachable.
23 |
24 | Parameters
25 | ----------
26 | add : bool
27 | Whether to add tables if some tables already exist. This is
28 | convenient during development, but will cause problems
29 | for installations that depend on migrations to create new
30 | tables.
31 |
32 | """
33 | conn = models.DBSession.session_factory.kw["bind"]
34 | tables = models.Base.metadata.sorted_tables
35 | if tables and not add:
36 | print("Existing tables found; not creating additional tables")
37 | return
38 |
39 | for i in range(1, retry + 1):
40 | try:
41 | conn = models.DBSession.session_factory.kw["bind"]
42 | print(f"Creating tables on database {conn.url.database}")
43 | models.Base.metadata.create_all(conn)
44 |
45 | table_list = ", ".join(list(models.Base.metadata.tables.keys()))
46 | print(f"Refreshed tables: {table_list}")
47 | # for m in models.Base.metadata.tables:
48 | # print(f" - {m}")
49 |
50 | return
51 |
52 | except Exception as e:
53 | if i == retry:
54 | raise e
55 | else:
56 | print("Could not connect to database...sleeping 3")
57 | print(f" > {e}")
58 | time.sleep(3)
59 |
60 |
61 | def clear_tables():
62 | drop_tables()
63 | create_tables()
64 |
65 |
66 | def recursive_to_dict(obj):
67 | if isinstance(obj, dict):
68 | return {k: recursive_to_dict(v) for k, v in obj.items()}
69 | if isinstance(obj, (list, tuple)):
70 | return [recursive_to_dict(el) for el in obj]
71 | if hasattr(obj, "__table__"): # SQLAlchemy model
72 | return recursive_to_dict(obj.to_dict())
73 | return obj
74 |
--------------------------------------------------------------------------------
/app/test_util.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | from selenium import webdriver
5 | from selenium.common.exceptions import (
6 | ElementClickInterceptedException,
7 | JavascriptException,
8 | NoSuchElementException,
9 | StaleElementReferenceException,
10 | TimeoutException,
11 | )
12 | from selenium.webdriver.common.action_chains import ActionChains
13 | from selenium.webdriver.common.by import By
14 | from selenium.webdriver.support import expected_conditions
15 | from selenium.webdriver.support.ui import WebDriverWait
16 | from seleniumrequests.request import RequestsSessionMixin
17 |
18 | from baselayer.app import models
19 | from baselayer.app.config import load_config
20 |
21 | cfg = load_config()
22 |
23 |
24 | def set_server_url(server_url):
25 | """Set web driver server URL using value loaded from test config file."""
26 | MyCustomWebDriver.server_url = server_url
27 |
28 |
29 | class MyCustomWebDriver(RequestsSessionMixin, webdriver.Firefox):
30 | @property
31 | def server_url(self):
32 | if not hasattr(self, "_server_url"):
33 | raise NotImplementedError(
34 | "Please first set the web driver URL" " using `set_server_url`"
35 | )
36 | return self._server_url
37 |
38 | @server_url.setter
39 | def server_url(self, value):
40 | self._server_url = value
41 |
42 | def get(self, uri):
43 | webdriver.Firefox.get(self, self.server_url + uri)
44 | try:
45 | self.find_element(By.ID, "websocketStatus")
46 | self.wait_for_xpath(
47 | "//*[@id='websocketStatus' and contains(@title,'connected')]"
48 | )
49 | except NoSuchElementException:
50 | pass
51 |
52 | def wait_for_xpath(self, xpath, timeout=10):
53 | return WebDriverWait(self, timeout).until(
54 | expected_conditions.presence_of_element_located((By.XPATH, xpath))
55 | )
56 |
57 | def wait_for_css(self, css, timeout=10):
58 | return WebDriverWait(self, timeout).until(
59 | expected_conditions.presence_of_element_located((By.CSS_SELECTOR, css))
60 | )
61 |
62 | def wait_for_xpath_to_appear(self, xpath, timeout=10):
63 | return WebDriverWait(self, timeout).until_not(
64 | expected_conditions.invisibility_of_element((By.XPATH, xpath))
65 | )
66 |
67 | def wait_for_xpath_to_disappear(self, xpath, timeout=10):
68 | return WebDriverWait(self, timeout).until(
69 | expected_conditions.invisibility_of_element((By.XPATH, xpath))
70 | )
71 |
72 | def wait_for_css_to_disappear(self, css, timeout=10):
73 | return WebDriverWait(self, timeout).until(
74 | expected_conditions.invisibility_of_element((By.CSS_SELECTOR, css))
75 | )
76 |
77 | def wait_for_xpath_to_be_clickable(self, xpath, timeout=10):
78 | return WebDriverWait(self, timeout).until(
79 | expected_conditions.element_to_be_clickable((By.XPATH, xpath))
80 | )
81 |
82 | def wait_for_xpath_to_be_unclickable(self, xpath, timeout=10):
83 | return WebDriverWait(self, timeout).until_not(
84 | expected_conditions.element_to_be_clickable((By.XPATH, xpath))
85 | )
86 |
87 | def wait_for_css_to_be_clickable(self, css, timeout=10):
88 | return WebDriverWait(self, timeout).until(
89 | expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, css))
90 | )
91 |
92 | def wait_for_css_to_be_unclickable(self, css, timeout=10):
93 | return WebDriverWait(self, timeout).until_not(
94 | expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, css))
95 | )
96 |
97 | def scroll_to_element(self, element, scroll_parent=False):
98 | scroll_script = (
99 | """
100 | arguments[0].scrollIntoView();
101 | """
102 | if scroll_parent
103 | else """
104 | const viewPortHeight = Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
105 | const elementTop = arguments[0].getBoundingClientRect().top;
106 | window.scrollBy(0, elementTop - (viewPortHeight / 2));
107 | """
108 | )
109 | self.execute_script(scroll_script, element)
110 |
111 | def scroll_to_element_and_click(self, element, timeout=10, scroll_parent=False):
112 | self.scroll_to_element(element, scroll_parent=scroll_parent)
113 | ActionChains(self).move_to_element(element).perform()
114 |
115 | try:
116 | return element.click()
117 | except ElementClickInterceptedException:
118 | pass
119 | except StaleElementReferenceException:
120 | pass
121 |
122 | try:
123 | return self.execute_script("arguments[0].click();", element)
124 | except JavascriptException:
125 | pass
126 | except StaleElementReferenceException:
127 | pass
128 |
129 | # Tried to click something that's not a button, try sending
130 | # a mouse click to that coordinate
131 | ActionChains(self).click().perform()
132 |
133 | def click_xpath(self, xpath, wait_clickable=True, timeout=10, scroll_parent=False):
134 | if wait_clickable:
135 | element = self.wait_for_xpath_to_be_clickable(xpath, timeout=timeout)
136 | else:
137 | element = self.wait_for_xpath(xpath)
138 | return self.scroll_to_element_and_click(element, scroll_parent=scroll_parent)
139 |
140 | def click_css(self, css, timeout=10, scroll_parent=False):
141 | element = self.wait_for_css_to_be_clickable(css, timeout=timeout)
142 | return self.scroll_to_element_and_click(element, scroll_parent=scroll_parent)
143 |
144 |
145 | @pytest.fixture(scope="session")
146 | def driver(request):
147 | import shutil
148 |
149 | from selenium import webdriver
150 | from webdriver_manager.firefox import GeckoDriverManager
151 |
152 | options = webdriver.FirefoxOptions()
153 | if "BASELAYER_TEST_HEADLESS" in os.environ:
154 | options.headless = True
155 | options.set_preference("devtools.console.stdout.content", True)
156 | options.set_preference("browser.download.manager.showWhenStarting", False)
157 | options.set_preference("browser.download.folderList", 2)
158 | options.set_preference(
159 | "browser.download.dir", os.path.abspath(cfg["paths.downloads_folder"])
160 | )
161 | options.set_preference(
162 | "browser.helperApps.neverAsk.saveToDisk",
163 | (
164 | "text/csv,text/plain,application/octet-stream,"
165 | "text/comma-separated-values,text/html"
166 | ),
167 | )
168 |
169 | executable_path = shutil.which("geckodriver")
170 | if executable_path is None:
171 | executable_path = GeckoDriverManager().install()
172 | service = webdriver.firefox.service.Service(executable_path=executable_path)
173 |
174 | driver = MyCustomWebDriver(options=options, service=service)
175 | driver.set_window_size(1920, 1200)
176 | login(driver)
177 |
178 | yield driver
179 |
180 | driver.close()
181 |
182 |
183 | def login(driver):
184 | username_xpath = '//*[contains(string(),"testuser-cesium-ml-org")]'
185 |
186 | driver.get("/")
187 | try:
188 | driver.wait_for_xpath(username_xpath, 0.25)
189 | return # Already logged in
190 | except TimeoutException:
191 | pass
192 |
193 | try:
194 | element = driver.wait_for_xpath(
195 | '//a[contains(@href,"/login/google-oauth2")]', 20
196 | )
197 | element.click()
198 | except TimeoutException:
199 | pass
200 |
201 | try:
202 | driver.wait_for_xpath(username_xpath, 5)
203 | except TimeoutException:
204 | raise TimeoutException("Login failed:\n" + driver.page_source)
205 |
206 |
207 | @pytest.fixture(scope="function", autouse=True)
208 | def reset_state(request):
209 | def teardown():
210 | models.DBSession().rollback()
211 |
212 | request.addfinalizer(teardown)
213 |
--------------------------------------------------------------------------------
/conf/supervisor/supervisor.conf.template:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | loglevel=info
3 | logfile=log/supervisord.log
4 | pidfile=run/supervisord.pid
5 | nodaemon=true
6 | childlogdir=log/sv_child
7 |
8 | [supervisorctl]
9 | serverurl=unix://run/supervisor.sock
10 | username = dummy
11 | password = dummy
12 |
13 | [unix_http_server]
14 | file=run/supervisor.sock
15 | username = dummy
16 | password = dummy
17 |
18 | [rpcinterface:supervisor]
19 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
20 |
--------------------------------------------------------------------------------
/config.yaml.defaults:
--------------------------------------------------------------------------------
1 | app:
2 | title: Baselayer Example App
3 | secret_key: abc01234 # This secret key can be any random string of
4 | # characters.
5 | #
6 | # You should re-generate this for your application
7 | # using:
8 | #
9 | # base64.b64encode(os.urandom(50)).decode('ascii')
10 | factory:
11 |
12 | database:
13 | database:
14 | host: localhost
15 | port: 5432
16 | user:
17 | password:
18 |
19 | paths:
20 | downloads_folder: '/tmp'
21 |
22 | server:
23 | # Set this to true if the URL of your server is expected to be secure, i.e.
24 | # https://...
25 | # E.g., if you are using an HTTPS load balancer, this value will likely be true.
26 | #
27 | # Also see `ssl_certificate` for running nginx in SSL mode.
28 | ssl: False
29 |
30 | # If you are using certificates, you can specify them here
31 | # See http://nginx.org/en/docs/http/configuring_https_servers.html for details
32 | ssl_certificate:
33 | ssl_certificate_key:
34 |
35 | # According to the Tornado docs at
36 | # https://www.tornadoweb.org/en/stable/guide/running.html#processes-and-ports:
37 | #
38 | # Due to the Python GIL (Global Interpreter Lock), it is necessary
39 | # to run multiple Python processes to take full advantage of
40 | # multi-CPU machines. Typically it is best to run one process per
41 | # CPU.
42 | processes: 4
43 |
44 | # How many of the above processes should be dedicated to
45 | # frontend only (i.e., no token authorized API requests)
46 | dedicated_frontend_processes: 2
47 |
48 | # The max size of a request body in megabytes (M)
49 | max_body_size: 10
50 |
51 | # nginx parameter which sets both the time in seconds before which
52 | # the server is considered unavailable and the subsequent period of
53 | # time the server will be unavailable
54 | fail_timeout: 20
55 |
56 | # Rate limit: number of requests per second (see https://www.nginx.com/blog/rate-limiting-nginx/)
57 | rate_limit: 5
58 |
59 | # Rate limit burst size (https://www.nginx.com/blog/rate-limiting-nginx/#bursts)
60 | burst: 10
61 |
62 | # In addition to the local network, specify any IP addresses that are to be
63 | # exempt from API rate limiting
64 | whitelisted_ips: []
65 |
66 | # Specify IPs or address ranges (e.g., 130.211.0.0/22) that are associated
67 | # with upstream load balancing.
68 | # These trusted addresses are used to uncover the originating IP.
69 | loadbalancer_ips: []
70 |
71 | # From https://console.developers.google.com/
72 | #
73 | # - Create Client ID
74 | # - Javascript origins: https://localhost:5000
75 | # - Authorized redirect URLs: http://localhost:5000/complete/google-oauth2/
76 | #
77 | # You need to have Google+ API enabled; it takes a few minutes to activate.
78 | auth:
79 | debug_login: False
80 | google_oauth2_key:
81 | google_oauth2_secret:
82 | google_oauth2_timeout: 15
83 |
84 |
85 | services:
86 | paths:
87 | - ./baselayer/services
88 | - ./services
89 |
90 | # By default, all services other than dask are enabled.
91 | # Use this list to add services if 'disabled' is set to '*'
92 | enabled:
93 |
94 | # A list of service names, or '*'
95 | disabled:
96 | - dask
97 |
98 | # You can use disabled to disable specific services, or '*' to disable them all
99 |
100 |
101 | log:
102 | # if true, log all API calls, including successful ones, if false,
103 | # log only those that trigger a warning / error
104 | api_calls: False
105 |
106 | # SQL logging
107 | # see e.g. https://docs.sqlalchemy.org/en/20/core/engines.html#configuring-logging
108 |
109 | # if True, enable SQL echoing.
110 | database: False
111 |
112 | # if True, enable connection pool logging
113 | database_pool: False
114 |
115 | ports:
116 | websocket: 64000
117 | fake_oauth: 63000
118 | app: 5000
119 | app_http_proxy: 5001
120 | app_internal: 65000 # nginx forwards this port to ports:app
121 | dask: 63500
122 | websocket_path_in: 'ipc://run/message_flow_in'
123 | websocket_path_out: 'ipc://run/message_flow_out'
124 | status: 64500
125 | migration_manager: 64501
126 |
127 | external_logging:
128 | papertrail:
129 | # get an account at https://papertrailapp.com
130 | enabled: False
131 | # change url to the correct subdomain for your account
132 | # and change the port as well
133 | url:
134 | port:
135 | # which log files, if any do you not want to send over to the 3rd party?
136 | excluded_log_files: [""]
137 |
138 | security:
139 | strict: true
140 | slack:
141 | enabled: false
142 | url: null
143 |
144 |
145 | # You can schedule jobs to run at a certain time interval (given in minutes).
146 | #
147 | # If baselayer is not running at the time the job is supposed to run,
148 | # it will be run when fired up the next time.
149 | #
150 | # If a "limit" is provided, the cron job will only executed between
151 | # the start and end times given.
152 | #
153 | # cron:
154 | # - interval: 1
155 | # script: tools/1_minute.py
156 | # - interval: 5
157 | # script: tools/5_minute.py
158 | # limit: ["01:00", "02:00"]
159 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
21 |
22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
23 |
24 | help:
25 | @echo "Please use \`make ' where is one of"
26 | @echo " html to make standalone HTML files"
27 | @echo " dirhtml to make HTML files named index.html in directories"
28 | @echo " singlehtml to make a single large HTML file"
29 | @echo " pickle to make pickle files"
30 | @echo " json to make JSON files"
31 | @echo " htmlhelp to make HTML files and a HTML help project"
32 | @echo " qthelp to make HTML files and a qthelp project"
33 | @echo " applehelp to make an Apple Help Book"
34 | @echo " devhelp to make HTML files and a Devhelp project"
35 | @echo " epub to make an epub"
36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
37 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
39 | @echo " text to make text files"
40 | @echo " man to make manual pages"
41 | @echo " texinfo to make Texinfo files"
42 | @echo " info to make Texinfo files and run them through makeinfo"
43 | @echo " gettext to make PO message catalogs"
44 | @echo " changes to make an overview of all changed/added/deprecated items"
45 | @echo " xml to make Docutils-native XML files"
46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
47 | @echo " linkcheck to check all external links for integrity"
48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
49 | @echo " coverage to run coverage check of the documentation (if enabled)"
50 |
51 | clean:
52 | rm -rf $(BUILDDIR)/*
53 | rm -rf api/
54 | rm -rf examples/*output* examples/*ipynb
55 | rm -rf auto_examples
56 | rm -rf modules/
57 |
58 | html:
59 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
60 | @echo
61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
62 |
63 | dirhtml:
64 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
65 | @echo
66 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
67 |
68 | singlehtml:
69 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
70 | @echo
71 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
72 |
73 | pickle:
74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
75 | @echo
76 | @echo "Build finished; now you can process the pickle files."
77 |
78 | json:
79 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
80 | @echo
81 | @echo "Build finished; now you can process the JSON files."
82 |
83 | htmlhelp:
84 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
85 | @echo
86 | @echo "Build finished; now you can run HTML Help Workshop with the" \
87 | ".hhp project file in $(BUILDDIR)/htmlhelp."
88 |
89 | qthelp:
90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
91 | @echo
92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/baselayer.qhcp"
95 | @echo "To view the help file:"
96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/baselayer.qhc"
97 |
98 | applehelp:
99 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
100 | @echo
101 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
102 | @echo "N.B. You won't be able to view it unless you put it in" \
103 | "~/Library/Documentation/Help or install it in your application" \
104 | "bundle."
105 |
106 | devhelp:
107 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
108 | @echo
109 | @echo "Build finished."
110 | @echo "To view the help file:"
111 | @echo "# mkdir -p $$HOME/.local/share/devhelp/baselayer"
112 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/baselayer"
113 | @echo "# devhelp"
114 |
115 | epub:
116 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
117 | @echo
118 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
119 |
120 | latex:
121 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
122 | @echo
123 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
124 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
125 | "(use \`make latexpdf' here to do that automatically)."
126 |
127 | latexpdf:
128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
129 | @echo "Running LaTeX files through pdflatex..."
130 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
131 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
132 |
133 | latexpdfja:
134 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
135 | @echo "Running LaTeX files through platex and dvipdfmx..."
136 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
137 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
138 |
139 | text:
140 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
141 | @echo
142 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
143 |
144 | man:
145 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
146 | @echo
147 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
148 |
149 | texinfo:
150 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
151 | @echo
152 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
153 | @echo "Run \`make' in that directory to run these through makeinfo" \
154 | "(use \`make info' here to do that automatically)."
155 |
156 | info:
157 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
158 | @echo "Running Texinfo files through makeinfo..."
159 | make -C $(BUILDDIR)/texinfo info
160 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
161 |
162 | gettext:
163 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
164 | @echo
165 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
166 |
167 | changes:
168 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
169 | @echo
170 | @echo "The overview file is in $(BUILDDIR)/changes."
171 |
172 | linkcheck:
173 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
174 | @echo
175 | @echo "Link check complete; look for any errors in the above output " \
176 | "or in $(BUILDDIR)/linkcheck/output.txt."
177 |
178 | doctest:
179 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
180 | @echo "Testing of doctests in the sources finished, look at the " \
181 | "results in $(BUILDDIR)/doctest/output.txt."
182 |
183 | coverage:
184 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
185 | @echo "Testing of coverage in the sources finished, look at the " \
186 | "results in $(BUILDDIR)/coverage/python.txt."
187 |
188 | xml:
189 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
190 | @echo
191 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
192 |
193 | pseudoxml:
194 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
195 | @echo
196 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
197 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # baselayer documentation build configuration file, created by
4 | # sphinx-quickstart on Thu Jul 23 13:00:45 2015 (copied from `cesium`).
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | import os
16 |
17 | # If extensions (or modules to document with autodoc) are in another directory,
18 | # add these directories to sys.path here. If the directory is relative to the
19 | # documentation root, use os.path.abspath to make it absolute, like shown here.
20 | # sys.path.insert(0, os.path.abspath('.'))
21 |
22 | # -- General configuration ------------------------------------------------
23 |
24 | # If your documentation needs a minimal Sphinx version, state it here.
25 | # needs_sphinx = '1.0'
26 |
27 | # Add any Sphinx extension module names here, as strings. They can be
28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 | # ones.
30 | extensions = [
31 | "recommonmark",
32 | "sphinx.ext.mathjax",
33 | # 'sphinx.ext.autodoc',
34 | "sphinx.ext.autosummary",
35 | # 'sphinx.ext.pngmath',
36 | # 'numpydoc',
37 | # 'plot2rst',
38 | # 'sphinx.ext.intersphinx',
39 | # 'sphinx.ext.linkcode',
40 | # 'sphinx_gallery.gen_gallery'
41 | ]
42 |
43 | # Add any paths that contain templates here, relative to this directory.
44 | templates_path = ["_templates"]
45 |
46 | source_suffix = {
47 | ".rst": "restructuredtext",
48 | ".md": "markdown",
49 | }
50 |
51 | # The encoding of source files.
52 | # source_encoding = 'utf-8-sig'
53 |
54 | # The main toctree document.
55 | main_doc = "index"
56 |
57 | # General information about the project.
58 | project = "baselayer"
59 | copyright = "2017, The baselayer Team"
60 | author = "The baselayer Team"
61 |
62 | # The version info for the project you're documenting, acts as replacement for
63 | # |version| and |release|, also used in various other places throughout the
64 | # built documents.
65 | #
66 |
67 | # setup_lines = open('../baselayer/__init__.py').readlines()
68 | version = "vUndefined"
69 | # for l in setup_lines:
70 | # if l.startswith('__version__'):
71 | # version = l.split("'")[1]
72 | # break
73 |
74 | # The full version, including alpha/beta/rc tags.
75 | release = version
76 |
77 | # The language for content autogenerated by Sphinx. Refer to documentation
78 | # for a list of supported languages.
79 | #
80 | # This is also used if you do content translation via gettext catalogs.
81 | # Usually you set "language" from the command line for these cases.
82 | language = None
83 |
84 | # There are two options for replacing |today|: either, you set today to some
85 | # non-false value, then it is used:
86 | # today = ''
87 | # Else, today_fmt is used as the format for a strftime call.
88 | # today_fmt = '%B %d, %Y'
89 |
90 | # List of patterns, relative to source directory, that match files and
91 | # directories to ignore when looking for source files.
92 | exclude_patterns = ["_build"]
93 |
94 | # The reST default role (used for this markup: `text`) to use for all
95 | # documents.
96 | # default_role = None
97 |
98 | # If true, '()' will be appended to :func: etc. cross-reference text.
99 | # add_function_parentheses = True
100 |
101 | # If true, the current module name will be prepended to all description
102 | # unit titles (such as .. function::).
103 | # add_module_names = True
104 |
105 | # If true, sectionauthor and moduleauthor directives will be shown in the
106 | # output. They are ignored by default.
107 | # show_authors = False
108 |
109 | # The name of the Pygments (syntax highlighting) style to use.
110 | pygments_style = "sphinx"
111 |
112 | # A list of ignored prefixes for module index sorting.
113 | # modindex_common_prefix = []
114 |
115 | # If true, keep warnings as "system message" paragraphs in the built documents.
116 | # keep_warnings = False
117 |
118 | # If true, `todo` and `todoList` produce output, else they produce nothing.
119 | todo_include_todos = False
120 |
121 | # -- Options for HTML output ----------------------------------------------
122 |
123 | # The theme to use for HTML and HTML Help pages. See the documentation for
124 | # a list of builtin themes.
125 | # Per https://github.com/snide/sphinx_rtd_theme: specify theme if not on RTD
126 | on_rtd = os.environ.get("READTHEDOCS", None) == "True"
127 | if not on_rtd: # only import and set the theme if we're building docs locally
128 | import sphinx_rtd_theme # noqa: F401
129 |
130 | html_theme = "sphinx_rtd_theme"
131 |
132 | # Theme options are theme-specific and customize the look and feel of a theme
133 | # further. For a list of options available for each theme, see the
134 | # documentation.
135 | # html_theme_options = {}
136 |
137 | # Add any paths that contain custom themes here, relative to this directory.
138 | # html_theme_path = []
139 |
140 | # The name for this set of Sphinx documents. If None, it defaults to
141 | # " v documentation".
142 | # html_title = None
143 |
144 | # A shorter title for the navigation bar. Default is the same as html_title.
145 | # html_short_title = None
146 |
147 | # The name of an image file (relative to this directory) to place at the top
148 | # of the sidebar.
149 | # html_logo = None
150 |
151 | # The name of an image file (within the static path) to use as favicon of the
152 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
153 | # pixels large.
154 | # html_favicon = None
155 |
156 | # Add any paths that contain custom static files (such as style sheets) here,
157 | # relative to this directory. They are copied after the builtin static files,
158 | # so a file named "default.css" will overwrite the builtin "default.css".
159 | html_static_path = ["_static"]
160 |
161 | # Add any extra paths that contain custom files (such as robots.txt or
162 | # .htaccess) here, relative to this directory. These files are copied
163 | # directly to the root of the documentation.
164 | # html_extra_path = []
165 |
166 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 | # using the given strftime format.
168 | # html_last_updated_fmt = '%b %d, %Y'
169 |
170 | # If true, SmartyPants will be used to convert quotes and dashes to
171 | # typographically correct entities.
172 | # html_use_smartypants = True
173 |
174 | # Custom sidebar templates, maps document names to template names.
175 | # html_sidebars = {}
176 |
177 | # Additional templates that should be rendered to pages, maps page names to
178 | # template names.
179 | # html_additional_pages = {}
180 |
181 | # If false, no module index is generated.
182 | # html_domain_indices = True
183 |
184 | # If false, no index is generated.
185 | # html_use_index = True
186 |
187 | # If true, the index is split into individual pages for each letter.
188 | # html_split_index = False
189 |
190 | # If true, links to the reST sources are added to the pages.
191 | html_show_sourcelink = False
192 |
193 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
194 | # html_show_sphinx = True
195 |
196 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
197 | # html_show_copyright = True
198 |
199 | # If true, an OpenSearch description file will be output, and all pages will
200 | # contain a tag referring to it. The value of this option must be the
201 | # base URL from which the finished HTML is served.
202 | # html_use_opensearch = ''
203 |
204 | # This is the file name suffix for HTML files (e.g. ".xhtml").
205 | # html_file_suffix = None
206 |
207 | # Language to be used for generating the HTML full-text search index.
208 | # Sphinx supports the following languages:
209 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
210 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
211 | # html_search_language = 'en'
212 |
213 | # A dictionary with options for the search language support, empty by default.
214 | # Now only 'ja' uses this config value
215 | # html_search_options = {'type': 'default'}
216 |
217 | # The name of a javascript file (relative to the configuration directory) that
218 | # implements a search results scorer. If empty, the default will be used.
219 | # html_search_scorer = 'scorer.js'
220 |
221 | # Output file base name for HTML help builder.
222 | htmlhelp_basename = "baselayerdoc"
223 |
224 | # -- Options for LaTeX output ---------------------------------------------
225 |
226 | latex_elements = {
227 | # The paper size ('letterpaper' or 'a4paper').
228 | # 'papersize': 'letterpaper',
229 | # The font size ('10pt', '11pt' or '12pt').
230 | # 'pointsize': '10pt',
231 | # Additional stuff for the LaTeX preamble.
232 | # 'preamble': '',
233 | # Latex figure (float) alignment
234 | # 'figure_align': 'htbp',
235 | }
236 |
237 | # Grouping the document tree into LaTeX files. List of tuples
238 | # (source start file, target name, title,
239 | # author, documentclass [howto, manual, or own class]).
240 | latex_documents = [
241 | (
242 | main_doc,
243 | "baselayer.tex",
244 | "baselayer Documentation",
245 | "The baselayer Team",
246 | "manual",
247 | ),
248 | ]
249 |
250 | # The name of an image file (relative to this directory) to place at the top of
251 | # the title page.
252 | # latex_logo = None
253 |
254 | # For "manual" documents, if this is true, then toplevel headings are parts,
255 | # not chapters.
256 | # latex_use_parts = False
257 |
258 | # If true, show page references after internal links.
259 | # latex_show_pagerefs = False
260 |
261 | # If true, show URL addresses after external links.
262 | # latex_show_urls = False
263 |
264 | # Documents to append as an appendix to all manuals.
265 | # latex_appendices = []
266 |
267 | # If false, no module index is generated.
268 | # latex_domain_indices = True
269 |
270 |
271 | # -- Options for manual page output ---------------------------------------
272 |
273 | # One entry per manual page. List of tuples
274 | # (source start file, name, description, authors, manual section).
275 | man_pages = [(main_doc, "baselayer", "baselayer Documentation", [author], 1)]
276 |
277 | # If true, show URL addresses after external links.
278 | # man_show_urls = False
279 |
280 |
281 | # -- Options for Texinfo output -------------------------------------------
282 |
283 | # Grouping the document tree into Texinfo files. List of tuples
284 | # (source start file, target name, title, author,
285 | # dir menu entry, description, category)
286 | texinfo_documents = [
287 | (
288 | main_doc,
289 | "baselayer",
290 | "baselayer Documentation",
291 | author,
292 | "baselayer",
293 | "One line description of project.",
294 | "Miscellaneous",
295 | ),
296 | ]
297 |
298 | # Documents to append as an appendix to all manuals.
299 | # texinfo_appendices = []
300 |
301 | # If false, no module index is generated.
302 | # texinfo_domain_indices = True
303 |
304 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
305 | # texinfo_show_urls = 'footnote'
306 |
307 | # If true, do not generate a @detailmenu in the "Top" node's menu.
308 | # texinfo_no_detailmenu = False
309 |
310 |
311 | def setup(app):
312 | app.add_css_file("output_cells.css")
313 |
--------------------------------------------------------------------------------
/doc/dev.md:
--------------------------------------------------------------------------------
1 | # Developer notes
2 |
3 | ## Testing
4 |
5 | To execute the test suite:
6 |
7 | - Install ChromeDriver from [https://sites.google.com/a/chromium.org/chromedriver/home](https://sites.google.com/a/chromium.org/chromedriver/home)
8 | - Install Chrome or Chromium
9 | - To run all tests: `make test`
10 | - To run a single test: `./tools/test_frontend.py skportal/tests/frontend/.py::test_`
11 |
12 | On Linux, the tests can be run in "headless" mode (no browser display):
13 |
14 | - Install xfvb (`sudo apt-get install xfvb`)
15 | - `make test_headless`
16 |
17 | ## Debugging
18 |
19 | - Run `make log` to watch log output
20 | - Run `make stop` to stop any running web services.
21 | - Run `make attach` to attach to output of webserver, e.g. for use with `pdb.set_trace()`
22 | - Run `make check-js-updates` to see which Javascript packages are eligible for an upgrade.
23 |
24 | ## Database
25 |
26 | All interactions with the database are performed by way of SQLAlchemy using the
27 | Pyscopg2 backend. Some standard but not necessarily obvious usage patterns we
28 | make use of include:
29 |
30 | - Logic for connecting to the DB, refreshing tables, etc. is found in `baselayer/model_utils.py`:
31 |
32 | ```
33 | from baselayer.app.env import load_env
34 | from baselayer.models import DBSession, init_db
35 | env, cfg = load_env()
36 | init_db(**cfg['database'])
37 | ```
38 |
39 | - The session object controls various DB state operations:
40 |
41 | ```
42 | DBSession().add(obj) # add a new object into the DB
43 | DBSession().commit() # commit modifications to objects
44 | DBSession().rollback() # recover after a DB error
45 | ```
46 |
47 | - Generic logic applicable to any model is included in the base model class `baselayer.app.models.Base` (`to_dict`, `__str__`, etc.), but can be overridden within a specific model
48 | - Models can be selected directly (`User.query.all()`), or more specific queries can be constructed via the session object (`DBSession().query(User.id).all()`)
49 | - Convenience functionality:
50 | - Join relationships: some multi-step relationships are defined through joins using the `secondary` parameter to eliminate queries from the intermediate table; e.g., `User.acls` instad of `[r.acls for r in User.roles]`
51 | - [Association proxies](http://docs.sqlalchemy.org/en/latest/orm/extensions/associationproxy.html): shortcut to some attribute of a related object; e.g., `User.permissions` instead of `[a.id for a in User.acls]`
52 | - [Joined loads](http://docs.sqlalchemy.org/en/latest/orm/loading_relationships.html): this allows for a single query to also include child/related objects; often used in handlers when we know that information about related objects will also be needed.
53 | - `to_json()`: often from a handler we return an ORM object, which gets converted to JSON via `json_util.to_json(obj.to_dict())`. This also includes the attributes of any children that were loaded via `joinedload` or by accessing them directly. For example:
54 | - `User.query.first().to_dict()` will not contain information about the user's permissions
55 | - `u = User.query.first(); u.acls; u.to_dict()` does include a list of the user's ACLs
56 |
57 | ## New SQL Alchemy 2.0 style select statements
58 |
59 | To start a session without verification (i.e., when not committing to DB):
60 |
61 | ```
62 | with DBSession() as session:
63 | ...
64 | ```
65 |
66 | The context manager will make sure the connection is closed when exiting context.
67 |
68 | To use a verified session that checks all rows before committing them:
69 |
70 | ```
71 | with VerifiedSession(user_or_token) as session:
72 | ...
73 | session.commit()
74 | ```
75 |
76 | This does the same checks that are performed when calling `self.verify_and_commit()`.
77 | Each handler class can also call `self.Session()` as a stand-in for `VerifiedSession(self.current_user)`:
78 |
79 | ```
80 | with self.Session as session:
81 | ...
82 | session.commit()
83 | ```
84 |
85 | To quickly get rows from a table using the new "select" methods, use one of these (replace `User` with any class):
86 |
87 | ```
88 | user = User.get(id_or_list, user_or_token, mode='read', raise_if_none=False, options=[])
89 | all_users = User.get_all(user_or_token, mode='read', raise_if_none=False, options=[], columns=None)
90 | stmt = User.select(user_or_token, mode='read', options=[], columnns=None)
91 | ```
92 |
93 | The `get` and `get_all` functions open a session internally and retrieve the objects specified,
94 | if they are accessible to the user. In the case of `get`, if any of the IDs given (as a scalar or list)
95 | are not accessible to do not exist in the DB, the function returns None, or raises an `AccessError`
96 | (if `raise_if_none=True` is specified). The `get_all` just retrieves all rows that are accessible from that table.
97 | Note that these two methods will produce an object _not associated with the external session, if any_.
98 | Thus, if the call is made while an external context is used,
99 | the object has to be added to that session before it can, e.g., load additional relationships,
100 | or be saved, or do any other operation that involves the database.
101 | As an example:
102 |
103 | ```
104 | with self.Session() as session:
105 | user = User.get(user_id, self.current_user, mode='read')
106 | session.add(user) # must have this to load additional relationships
107 | tokens = user.tokens # will fail if user is not in session
108 | ```
109 |
110 | On the other hand, the `select` function will return
111 | a select statement object that only selects rows that are accessible.
112 | This statement can be further filtered with `where()` and executed using the session:
113 |
114 | ```
115 | with VerifiedSession(user_or_token) as session:
116 | stmt = User.select(user_or_token).where(User.id == user_id)
117 | user = session.execute(stmt).scalars().first() # returns a tuple with one object
118 | # can also call session.scalars(stmt).first() to get the object directly
119 | user.name = new_name
120 | session.commit()
121 | ```
122 |
123 | If not using `commit()`, the call to `VerifiedSession(user_or_token)`
124 | can be replaced with `DBSession()` with no arguments.
125 |
126 | ## Standards
127 |
128 | We use ESLint to ensure that our JavaScript & JSX code is consistent and conforms with recommended standards.
129 |
130 | - Install ESLint using `make lint-install`. This will also install a git pre-commit hook so that any commit is linted before it is checked in.
131 | - Run `make lint` to perform a style check
132 |
133 | ## Upgrading Javascript dependencies
134 |
135 | The `./tools/check_js_updates.sh` script uses
136 | [`npm-check`](https://github.com/dylang/npm-check) to search updates
137 | for packages defined in `package.json`. It then provides an
138 | interactive interface for selecting new versions and performing the upgrade.
139 |
--------------------------------------------------------------------------------
/doc/extending.md:
--------------------------------------------------------------------------------
1 | # Extending baselayer
2 |
3 | ## Modifying the Tornado application
4 |
5 | A Python function can be specified in the configuration as
6 | `app.factory`, that will be used to create the Tornado
7 | application. This is often needed to add additional routes, or do
8 | certain setup procedures before the application is run.
9 |
10 | The function should have the following argument signature:
11 |
12 | ```
13 | def make_app(config, baselayer_handlers, baselayer_settings)
14 | ```
15 |
16 | The configuration is passed in as the first parameter, followed by
17 | baselayer-handlers (those should be appended to your Tornado handlers,
18 | to put in place system endpoints such as logging in). The last
19 | argument contains baselayer-specific Tornado configuration.
20 |
21 | A typical `make_app` could be:
22 |
23 | ```
24 | from baselayer.app.app_server import MainPageHandler
25 |
26 | def make_app(config, baselayer_handlers, baselayer_settings):
27 | handlers = baselayer_handlers + [
28 | (r'/my_page', MyPageHandler),
29 | (r'/.*', MainPageHandler)
30 | ]
31 |
32 | settings = baselayer_settings
33 | settings.update({
34 | 'tornado_config_key': 'tornado_config_value'
35 | }) # Specify any additional settings here
36 |
37 | app = tornado.web.Application(handlers, **settings)
38 | return app
39 | ```
40 |
41 | ## Templating
42 |
43 | Often, values inside your JavaScript code or engineering configuration
44 | files (nginx, supervisor, etc.) depend on settings inside
45 | `config.yaml`. To simplify propagating these values, `baselayer`
46 | provides templating functionality, applied to files named
47 | `*.template`, before running the application. The template engine used
48 | is [Jinja2](https://jinja.palletsprojects.com).
49 |
50 | The configuration file is injected into the template, so you can
51 | include their values as follows:
52 |
53 | ```
54 | The database port is {{ database.port }}.
55 | ```
56 |
57 | When you launch the `run` or `run_production` targets for baselayer,
58 | it will automatically fill out all template files. Alternatively, you
59 | can run the templating manually:
60 |
61 | ```
62 | ./baselayer/tools/fill_conf_values.py --config="config.yaml" static/js/component.jsx.template
63 | ```
64 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | Baselayer
2 | ---------
3 |
4 | .. toctree::
5 | :maxdepth: 2
6 |
7 | setup
8 | dev
9 | usage
10 | extending
11 |
12 | .. Indices and tables
13 | .. ==================
14 |
15 | .. * :ref:`genindex`
16 | .. * :ref:`modindex`
17 | .. * :ref:`search`
18 |
--------------------------------------------------------------------------------
/doc/setup.md:
--------------------------------------------------------------------------------
1 | # Setup
2 |
3 | This guide describes how to get a webapp, derived from `baselayer`, up
4 | and running. A simple example of such an application is provides as a
5 | [template application](https://github.com/cesium-ml/baselayer_template_app).
6 |
7 | Clone that application, and then proceed with the following instructions.
8 |
9 | ## Installation
10 |
11 | - A **Python 3.8** or later installation is required.
12 | - Install the following dependencies: Supervisor, NGINX, PostgreSQL, Node.JS
13 |
14 | ### On macOS
15 |
16 | - Using [Homebrew](http://brew.sh/): `brew install supervisor postgresql node`
17 | - If you want to use [brotli compression](https://en.wikipedia.org/wiki/Brotli) with NGINX (better compression rates for the frontend), you can install NGINX with the `ngx_brotli` module with this command: `brew tap denji/nginx && brew install nginx-full --with-brotli`. _If you already had NGINX installed, you may need to uninstall it first with `brew unlink nginx`._ Otherwise, you can install NGINX normally with `brew install nginx`.
18 | - Start the postgresql server:
19 | - to start automatically at login: `brew services start postgresql`
20 | - to start manually: `pg_ctl -D /usr/local/var/postgres start`
21 | - Using [MacPorts](https://www.macports.org): `port install nginx +realip postgresql13-server npm7`
22 | - Start the postgresql server: `port load postgresql13-server`
23 |
24 | #### Port Number Configuration with macOS
25 |
26 | The default port number used by the baselayer app is 5000, but this port is not available for use with all operating systems.
27 | Port 5000 is not free for the latest macOS version, Monterey.
28 |
29 | If 5000 is not available, you will need to modify the `config.yaml` file to use another port. For example, you may use:
30 |
31 | ```yaml
32 | ports:
33 | app: 5700
34 | ```
35 |
36 | See [below](#configuration) for more information on modifying the baselayer configuration file.
37 |
38 | ### On Linux
39 |
40 | - Using `apt-get`:
41 | `sudo apt-get install supervisor postgresql libpq-dev npm nodejs-legacy`
42 |
43 | If you want to use [brotli compression](https://en.wikipedia.org/wiki/Brotli) with NGINX (better compression rates for the frontend), you have to install NGINX and the brotli module from another source with:
44 |
45 | ```
46 | sudo apt remove -y nginx nginx-common nginx-core
47 | sudo add-apt-repository ppa:ondrej/nginx-mainline -y
48 | sudo apt update -y
49 | sudo apt install -y nginx libnginx-mod-brotli
50 | ```
51 |
52 | Otherwise, you can install NGINX normally with `sudo apt-get install nginx`.
53 |
54 | - It may be necessary to configure your database permissions: at
55 | the end of your `pg_hba.conf` (typically in `/etc/postgresql/13.3/main` or `/var/lib/pgsql/data`),
56 | add the following lines and restart PostgreSQL
57 | (`sudo service postgresql restart` or `systemctl reload postgresql`):
58 |
59 | ```
60 | # CONNECTION DATABASE USER ADDRESS METHOD
61 | host template_app template_app localhost trust
62 | host all postgres localhost trust
63 | ```
64 |
65 | Substitute the correct database name and user, as defined in your `config.yaml`.
66 |
67 | ## Building the baselayer database
68 |
69 | - Initialize the database with `make db_init` (also tests that your
70 | permissions have been properly configured).
71 |
72 | - Run `make` to start the server and navigate to `localhost:5000`. If you have modified the baselayer configuration to use a different app port, you should instead navigate to `localhost:PORTNUMBER`.
73 |
74 | ## Configuration
75 |
76 | - Customize `config.yaml` (see `config.yaml.defaults` for all options).
77 | - Always modify `secret_key` before deployment!
78 | - If you want other users to be able to log in:
79 | - Provide Google auth credentials, obtained as described in `config.yaml`.
80 |
81 | ## Launch
82 |
83 | Launch the app with `make run`.
84 |
85 | ## Deployment options
86 |
87 | The default configuration file used can be overridden by setting the
88 | FLAGS environment variable:
89 |
90 | ```
91 | FLAGS="--config=myconfig.yaml" make run
92 | ```
93 |
94 | ## Debug mode
95 |
96 | By default, `baselayer` runs in debug mode. In debug mode:
97 |
98 | - The server binds to localhost, not 0.0.0.0 (i.e., is not publicly
99 | accessible).
100 | - Authentication always succeeds, but does not connect to any oauth
101 | provider.
102 | - Code changes cause automatic reloads of the app, and recompilation
103 | of Javascript bundles.
104 | -
105 |
106 | When switching to production mode (`debug` set to False in the config
107 | file):
108 |
109 | - The server binds to 0.0.0.0.
110 | - Javascript bundles are not compiled; they need to be pre-compiled
111 | using `make bundle`.
112 |
--------------------------------------------------------------------------------
/doc/usage.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | The premiere application of `baselayer` is
4 | [SkyPortal](https://skyportal.io). Several pieces of functionality
5 | have been implemented there, but have not been backported to
6 | `baselayer` yet. Please refer to the SkyPortal documentation, and if
7 | you see a feature you'd like to use, file an issue so we can bring it
8 | in.
9 |
10 | ## Permissions
11 |
12 | Access to resources in Skyportal is controlled in two ways:
13 |
14 | - _Roles_ are sets of site-wide permissions (_ACLs_) that allow a user to perform certain actions: e.g, create a new user, upload spectra, post comments, etc.
15 | - _Groups_ are sets of sources that are accessible to members of that group
16 | - Members can also be made an _admin_ of the group, which gives them group-specific permissions to add new users, etc.
17 | - The same source source can belong to multiple groups
18 |
19 | ## Microservices
20 |
21 | Baselayer uses stand-alone micro-services whenever possible. These
22 | services are monitored by supervisord, and by default include nginx, the web
23 | app, a cron-job handler, the websocket server, etc.
24 |
25 | Services are configured in the `config.yaml` file (defaults in
26 | `config.yaml.defaults`), and are discovered by path:
27 |
28 | ```
29 | services:
30 | paths:
31 | - ./baselayer/services
32 | - ./services
33 | enabled:
34 | disabled:
35 | ```
36 |
37 | For example, the `cron` microservice lives in
38 | `./baselayer/services/cron`. In that directory, there is a
39 | `supervisor.conf` file and any other files that implement the
40 | microservice (in this case, `cron.py`).
41 |
42 | A microservice is loaded by injecting the `supervisor.conf` into the
43 | `supervisor.conf` file used by the entire system.
44 |
45 | By default, all discovered microservices are loaded, but this can be
46 | customized through the `services.enabled` and `services.disabled`
47 | configuration keys. `services.disabled` can be set to `'*'` to disable
48 | all services. E.g., to only load the `cron` service, you would do:
49 |
50 | ```
51 | services:
52 | paths:
53 | - ./baselayer/services
54 | - ./services
55 | enabled:
56 | - cron
57 | disabled: '*'
58 | ```
59 |
60 | Sometimes, the supervisor configuration needs information from the
61 | configuration file, therefore `supervisor.conf` can instead be
62 | provided as `supervisor.conf.template`, which will be compiled before
63 | launching. See, e.g., `services/dask`.
64 |
65 | ## Web Application
66 |
67 | Baselayer comes with a microservice capable of bundling a web application. A great example of this is given in the [template application](https://github.com/cesium-ml/baselayer_template_app). When building your own application on top of baselayer, you'll need to add your own `static` directory at the root of your project, as well as a `rspack.config.js` file to bundle your application. The `rspack.config.js` from the template application is a good starting point, and will be sufficient for most use cases. Instead of using the very popular [webpack](https://webpack.js.org/), we use [rspack](https://rspack.dev/) as a 1:1 replacement. It covers all the features needed by baselayer, but offers much faster build times in development and production modes. We've noticed a x2 speedup on average when building a complex & heavy web app such as [SkyPortal](https://github.com/skyportal/skyportal), and at least a x5 speedup when re-building the app in watch mode, which we use to update the web app in real-time when developing.
68 |
--------------------------------------------------------------------------------
/github_deploy_key.enc:
--------------------------------------------------------------------------------
1 | gAAAAABZ87Lf7OKQL3kFNU7SXQ0EDkJOWMXEou8SsmN1j4Z3ulYn2F2NxcYDJk8wWOK-HMpQvDoFbksfHv9omDBFuLWfyxSvgGapqmq8x0FChYl-PlgHYXO1cUymPwsRDdOf8NHMdiG0Gdkpm8-1JpUtO7CqzpXSO_p5uqwkRpf1Eu5U1sSOYfqOPx8qjic3R7qCM_BuuK5OyS-EpDqU_d9guM11Bhi7Uy0eqbFbQOg45gegeyLIn8RZd3OQLgar2Hvsxwb4SSCv6QeRJ9qqXmNRPUp1jMxWu8sIrUumoPRGVlF3GRpeylL3umDp5BjgAEx21XqAd_t_Jcgw5o7AePGosPBbl-7mYfCOxsMYmfWZX72jakl3RWBiJEZ1zrNcF1UBhEhuRoWH_E27FNHNkGtWR1ugkuzsMUWx9I8C9NKGy17XfRGv_AVNpkQ7byp7cqx6pwEby-ImC0DhnhodAuJCr-EN4onL5q-py7ntbwbu6od6Bf9TIhwtn4Zml7yLT46NvsvCQsDm_FQzvUHmfbuYPoBXNDiVBgJBJAgtR2nhflTq2vsDQX8ZUaLstIcZ7nzING3Fk8AmgPtM660HKIsQ-AvOedCpDHSMYJcCYckWxYPkeoNbso0KQjyp6HyHt2Uo5igN7OT-IPwPJ5cyEAYwIe5r7TEB-Tl9ZZ_e3QToZYKV6B8V8PDfbkKAB2NVR_dnZ-auEM2xz27JQLL-6iKnBDpzh4IfODg4b4BXwkMm73zvwJUZf5FCPTx0b8zrqdXBs1PPF5qd8RNNAy7AbonIMHGtA_U4MGO2HyeRSaMw7vGeautF4rF8zIPKCtloshCsnAKxPXxDgOdSqf7AjWEXWcH8xWA810S7RfAV_N6NmOCDmRSy4Li8AsLZDFjLsmcUt0VODDekgTE-n80665ADpUH2rn2hZ-aX3jcJ-OX2fMfrJzSvnmM_YENZXb26I9CT_X3fDACWq5QRxaAWuxJLn5W2wC-xulKjLQnHwvdmuEJeex1PUNxIrmmQ2lVhdjAaXOo5kTlOE_p58kcja0D3LYp7Lmr58sjoxdMFtCC6_w6JMoELQWgt38Bx922rfv36WQ6gcdBVQLZzYoai7WGGc3iNOYDKN_rRbTo5KpDKLBs4ai4KIt3Vc-uELyR0ICppNONeM_hOXo1LeHqA03NC3t5PhsgSTALtGg0fluRjm-MM-JMoLUXWDgZJM-LZRRXJA21ha9tzzAAU3zmo2VGaB-lQR_UTUNsw_GjykOh8zI9J-CnVWSSTtxRvaYrBjuNrijqbj_GdVjlHBY3VpFJ-UZIA2zTNEckBpVjuo4KENLSa3bznyjC-Cy0yDE8wa5L7eXisfifUtAOouyz2Ks1sWug48QMNofjxWs8uiWkwjJVCTjQujfjFdfi6VIoqLmp9sTsPMsoWRZVDE7n3Ml2LjzeMt-0XvsX7PaPs0J1E71hQcR9eW8sdfEf0NlGfDCcqyeZEXFAzQEWpGM5yBF4_02Xo0ho5o0jInTscAQvP_16Nm1I9iYtPtELDvU9wWOFaZMdJFOho0EbEuexHQM5lgnDrk2FB5rn3tBVOsalOTZSJHzDAmsZnAihfMuInIZyY5UPnZLOQ6qlBTjEL8H6IxLBqQgAJv2aJFzcOc2Rwh0mfmLuqn9SBejBJMl8TySSspYv9Df9IHE_8Do3u26DMab9uqAlJtUouih0J6dIvAEJxCB9ym1d6gCJvekKEf2QSogtYGxk5ludpZlD6Y1s1oxpODcZCVgNa63CfkSd2Dy4kDHmcO1mxSPa5VbG4yid_cy30S8fqK54FwOVUI-qgFyEWKL9akHM-VQ7xccgUJjSDTraow12hezi4dExs2c4q9OMCuYiuSNnjOBWTos14xZFSo_F02flRXKkOWj4-h07TlpD1lFlD6AP_m9wbMA1dOnBJV2pfd2zt1-UU-lIzsqKeZNlfYkKxmUZrjAJd-X6wyE90buYwqdPy4mDBsrFg56hW4Dkay68rlygqKWBLTpsvcg_vX4S8BFdo2EpNwlhjIx-q2792_sSd9VVYCeDSy0sB8dYsnL47IvhdaOtie6i-Jx5YH-hRcyZHP5haEWigrzcuv_91XCq8t37PDzR4zbJ70bDH9pFex0I-r1VoornAPg0L6orXUE99PzMOQ1rXGBPjxd5CeS8_xpXvBaGES8aCVhdQyFjkia8uwuLMJm17ftBIaib5YzplV6JngLavQdvGrUBNRLLVyaCvf2UIhTUjgXFuyIPGCxP5Wuo0OZYWGilTDmDxlwlQeFjZI7yHJHpJJdOquhqHF7-3BApoXq1Zx42CzwXrMIpMyWg2k9mnk8tW3O53HL2ApgtBLdtl1ztCXyztLaemSz3VEbBByfE4QIqlzO7pP2P8sI_7pY6cJxvGfpe6a4XNbSB-5OcGZmtCOaJSqRya1WkrO3LirEFe_XQ1ZH2WJoXkfJZ7Vdfmyhgm5JTqqAQYalZvUIP4OQalHbz4yCFfV7TRBKZaqIoWO8iWPxrgjgOZbEmdnj1i7XCNUyui1rHvYMgQg8yNyO10RnQ7V5ox_iFIxnhpoEDEsh7-rzV_HnE8RGHmL52nCAfVOc13nMhq4Gk_eg0XFpuuY3K05_3154fq3jxuncm5IDFfhdsJxKzew_HbTbEZuOMAuiWtOvFcLF_llaj5Z8NYdE520sSPlh0ljR8a_XK47-y1SkkBcMcT0QlCcx3UG04fOI3oNGLkeyNbet5_yH0Zrmd0Y0rRlDu6JrZqCjYsQLDKDNgWfL2ZqCeXxUCuEhRC8hiF6KolXgKzKO7P1GJoiAwUCCm_urS-8_Ex81UZEKqyUAbzWwISvdzK0nHMIpz3UHqeY_dB2apvvzNZA5P_11CY7I2yXFJ-yTBZ5w4LcKEGoNlSwfqF6iF3kHkP-heFZXF2mj9Ie3EUSnXDx_NNL1VXwQkizj-t7fbjNCbAN4PIbqABh8S-fQFD44ASWt5c1NA2YippbJrub1yATPQQiWuNeh0U7G8roMf_2gsQRe73BQAZsl1AvOlty58IRxTD1YVotCMtlzcdD6L8CiWK4XxwJJSVc136s6N3zKvFOKz8Nbz7cEkLj29yY0O-r02aBWmlygTRQB-_Zynl9MKeHdc56MCuef5ohd1IdxdovCV_26AEnM_8thQrwbvc28BI1nwgEbNc_L34Vlx6tGOr7mKACH_EBwlrU4wd2ECUCRG0vx87CeXoYVP_lMbLqXki6ftdTrkoo_AwVJddklr4LHSQCMEOcOF4C_QShuJZelUXInqYjdmaAOFSa3CRBzukk-mE8mhoGTAYSAh9_rVrejA6NN-glFGB_Uk5dWl9sxIuZEig1kRL8rgx8V_k-fKNAgwENA_fli37JGAEbFQrSibnUSgbq2pSEfNCPUJAA7u_rF_q4_DMOUZlPB_Y22ODLA1a87r5W-atJiWVlxkxrpuXUzFPgM04KAAMizhopErwkflR3AKDjUouT6rjAO9pdi4wHXlCQDes72Do8o7pLQsfvNHOBrXfgzNvjlKTlTbfgURF2QMda7bVcuT4W8XwCCzbuvO4S23WDEVqEF6vZkiVqT42Db7uV-O45pj9qKCItwCTsPNXUEZPKn8t-x8Y5ZVANbCQf0lO87-DKx5WB4y71-DN9JfZqkMMmXF6n2zrfWLQYwD1eUjZUQStiu_LsGGdSDejkVOX0UeZ7-VKr2nH_alf_-mzdBRQBdfHw2VzAYMUoztK9Leukd6dj3PG1ZC6--rXPUR6BHiYdHZHwsK0e7nFL4Y3Yz2yav_Xy_e941-c0Be0MfU5hnH_0Zqs4aBdxUX-Jn9qteiyjoIT_WgD0cgKTAusyMEiqTkN62Bg6h9kRvxeGhSk8jZZ6UzWH5_EeolJNPSgbkqY-Fp1pjmKDNiYb6oy72giQRo0_P-nLMJwTHTSL9b1_f28Ug1Zlzzxbjplmrzv_KtAIWcEWbDYz8Jru3CgVzwZiCbqXp0lldfnMR23eIXk9lmwXmQxThjfFYE2F6m2bRAUEx7vTpl7bZvzmARsomu8bGpnUE1ZpGU0L1wG0A9MhKDxLiemul3zE3OJ0Pw5RVmmoP6JbEMRUIkVv5AEMfwWtthJb8MTJn0IDVr27p8dppTL2RHlr8L8xm7dyUS9_z3_x7KGz5-Q7h_gWgrCrTF1qJWtG-_AUnR26fnNfDIyvwmCbrz-MIfwPKHAV0jdkyEarA_wRUrCrHXmmMPWxyJR6hiDOv9jit22LzXSRbj6jdWjnPVXdyeOspV0b-IVoFU_xep-IPdjUCl-u6kRfcAhirFk1lfOF8HYAx0pNEDFBWWXuCPOh1vnmtYOqayMjcsa0xeRAqwpJvPPhAWt85ywdWCirvPsGobGGfSVisZIAYFPZBhJqVKrNbo-RIV-AjVFDMb3YXE=
2 |
--------------------------------------------------------------------------------
/log.py:
--------------------------------------------------------------------------------
1 | import zlib
2 | from datetime import datetime
3 |
4 | BOLD = "\033[1m"
5 | NORMAL = "\033[0;0m"
6 |
7 |
8 | COLOR_TABLE = [
9 | "black",
10 | "red",
11 | "green",
12 | "yellow",
13 | "blue",
14 | "magenta",
15 | "cyan",
16 | "white",
17 | "default",
18 | ]
19 |
20 |
21 | def colorize(s, fg=None, bg=None, bold=False, underline=False, reverse=False):
22 | """Wraps a string with ANSI color escape sequences corresponding to the
23 | style parameters given.
24 |
25 | All of the color and style parameters are optional.
26 |
27 | This function is from Robert Kern's grin:
28 |
29 | https://github.com/cpcloud/grin
30 |
31 | Copyright (c) 2007, Enthought, Inc. under a BSD license.
32 |
33 | Parameters
34 | ----------
35 | s : str
36 | fg : str
37 | Foreground color of the text. One of (black, red, green, yellow, blue,
38 | magenta, cyan, white, default)
39 | bg : str
40 | Background color of the text. Color choices are the same as for fg.
41 | bold : bool
42 | Whether or not to display the text in bold.
43 | underline : bool
44 | Whether or not to underline the text.
45 | reverse : bool
46 | Whether or not to show the text in reverse video.
47 |
48 | Returns
49 | -------
50 | A string with embedded color escape sequences.
51 | """
52 |
53 | style_fragments = []
54 | if fg in COLOR_TABLE:
55 | # Foreground colors go from 30-39
56 | style_fragments.append(COLOR_TABLE.index(fg) + 30)
57 | if bg in COLOR_TABLE:
58 | # Background colors go from 40-49
59 | style_fragments.append(COLOR_TABLE.index(bg) + 40)
60 | if bold:
61 | style_fragments.append(1)
62 | if underline:
63 | style_fragments.append(4)
64 | if reverse:
65 | style_fragments.append(7)
66 | style_start = "\x1b[" + ";".join(map(str, style_fragments)) + "m"
67 | style_end = "\x1b[0m"
68 | return style_start + s + style_end
69 |
70 |
71 | def log(app, message):
72 | color_table = ["red", "green", "yellow", "blue", "magenta", "cyan", "white"]
73 | color = color_table[zlib.crc32(app.encode("ascii")) % len(color_table)]
74 | timestamp = datetime.now().strftime("%H:%M:%S")
75 | formatted_message = f"[{timestamp} {app}] {message}"
76 | print(colorize(formatted_message, fg=color, bold=True))
77 |
78 |
79 | def make_log(app):
80 | def app_log(*args, **kwargs):
81 | log(app, *args, **kwargs)
82 |
83 | return app_log
84 |
--------------------------------------------------------------------------------
/requirements.docs.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | recommonmark
3 | numpydoc
4 | sphinx-rtd-theme
5 | notedown
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | supervisor>=4.2.1
2 | psycopg2-binary>=2.8.6
3 | pyyaml>=5.3.1
4 | tornado>=6.0.3
5 | pyzmq>=20.0.0
6 | pyjwt>=2.0.1
7 | distributed>=2023.1.1
8 | simplejson>=3.17.2
9 | requests>=2.25.1
10 | selenium>=4.3.0
11 | selenium-requests>=2.0.0
12 | pytest>=5.4.3
13 | sqlalchemy==2.0.0
14 | sqlalchemy-utils>=0.36.8
15 | social-auth-core==4.2.0
16 | arrow>=0.15.4
17 | jinja2>=2.11.2
18 | python-dateutil>=2.8.1
19 | phonenumbers>=8.12.15
20 | python-slugify>=4.0.1
21 | numpy>=1.21.4
22 | webdriver-manager>=4.0.1
23 |
--------------------------------------------------------------------------------
/services/app/app.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import time
3 |
4 | import requests
5 | import tornado.ioloop
6 | import tornado.log
7 |
8 | from baselayer.app.env import load_env, parser
9 | from baselayer.log import make_log
10 |
11 | parser.description = "Launch app microservice"
12 | parser.add_argument(
13 | "-p",
14 | "--process",
15 | type=int,
16 | help="Process number, when multiple server processes are used."
17 | " This number gets added to the app port.",
18 | )
19 | env, cfg = load_env()
20 |
21 | log = make_log(f"app_{env.process or 0}")
22 |
23 | # We import these later, otherwise them calling load_env interferes
24 | # with argument parsing
25 | from baselayer.app.app_server import handlers as baselayer_handlers # noqa: E402
26 | from baselayer.app.app_server import settings as baselayer_settings # noqa: E402
27 |
28 | app_factory = cfg["app.factory"]
29 | baselayer_settings["cookie_secret"] = cfg["app.secret_key"]
30 | baselayer_settings["autoreload"] = env.debug
31 |
32 |
33 | def migrated_db(migration_manager_port):
34 | port = migration_manager_port
35 | try:
36 | r = requests.get(f"http://localhost:{port}")
37 | status = r.json()
38 | except requests.exceptions.RequestException:
39 | log(f"Could not connect to migration manager on port [{port}]")
40 | return None
41 |
42 | return status["migrated"]
43 |
44 |
45 | # Before creating the app, ask migration_manager whether the DB is ready
46 | log("Verifying database migration status")
47 | port = cfg["ports.migration_manager"]
48 | timeout = 1
49 | while not migrated_db(port):
50 | log(f"Database not migrated, or could not verify; trying again in {timeout}s")
51 | time.sleep(timeout)
52 | timeout = min(timeout * 2, 30)
53 |
54 |
55 | module, app_factory = app_factory.rsplit(".", 1)
56 | app_factory = getattr(importlib.import_module(module), app_factory)
57 |
58 | app = app_factory(
59 | cfg,
60 | baselayer_handlers,
61 | baselayer_settings,
62 | process=env.process if env.process else 0,
63 | env=env,
64 | )
65 | app.cfg = cfg
66 |
67 | port = cfg["ports.app_internal"] + (env.process or 0)
68 |
69 | address = "127.0.0.1"
70 | app.listen(port, xheaders=True, address=address)
71 |
72 | log(f"Listening on {address}:{port}")
73 | tornado.ioloop.IOLoop.current().start()
74 |
--------------------------------------------------------------------------------
/services/app/supervisor.conf.template:
--------------------------------------------------------------------------------
1 | [program:app]
2 | numprocs={{ server.processes }}
3 | command=/usr/bin/env python baselayer/services/app/app.py --process=%(process_num)s %(ENV_FLAGS)s
4 | process_name=%(program_name)s_%(process_num)02d
5 | environment=PYTHONPATH=".",PYTHONUNBUFFERED=1
6 | stdout_logfile=log/app_%(process_num)02d.log
7 | redirect_stderr=true
8 |
--------------------------------------------------------------------------------
/services/cron/cron.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import sys
4 | import time
5 | from datetime import datetime
6 |
7 | import yaml
8 | from dateutil.parser import parse as parse_time
9 |
10 | from baselayer.app.env import load_env
11 | from baselayer.app.models import CronJobRun, DBSession, init_db
12 | from baselayer.log import make_log
13 |
14 | log = make_log("cron")
15 |
16 | env, cfg = load_env()
17 | jobs = cfg.get("cron") or []
18 |
19 | init_db(**cfg["database"])
20 |
21 | timestamp_file = ".jobs_timestamps.yaml"
22 |
23 |
24 | class TimeCache:
25 | def __init__(self):
26 | if os.path.exists(timestamp_file):
27 | with open(timestamp_file) as f:
28 | timestamps = yaml.full_load(f)["timestamps"]
29 | else:
30 | timestamps = {}
31 |
32 | self.ts = timestamps
33 |
34 | def should_run(self, key, interval, limit=None):
35 | """Determine whether job should run.
36 |
37 | Parameters
38 | ----------
39 | key : str
40 | A key for the job, made up of `script_name+interval`.
41 | interval : int
42 | Interval, in minutes, at which to execute the job.
43 | limit : tuple of two time strings, optional
44 | Limit the execution of the job to this bracket.
45 |
46 | """
47 | if limit is not None:
48 | limit_start, limit_end = (parse_time(t) for t in limit)
49 | now = datetime.now()
50 | if not (limit_start < now < limit_end):
51 | return False
52 |
53 | if key not in self.ts:
54 | self.reset(key)
55 |
56 | return (time.time() - self.ts[key]) > interval * 60
57 |
58 | def reset(self, key):
59 | self.ts[key] = time.time()
60 | self.cache_to_file()
61 |
62 | def cache_to_file(self):
63 | with open(timestamp_file, "w") as f:
64 | yaml.dump({"timestamps": self.ts}, f)
65 |
66 |
67 | log(f"Monitoring {len(jobs)} jobs")
68 |
69 | tc = TimeCache()
70 |
71 | while True:
72 | for job in jobs:
73 | if job.get("interval") is None:
74 | continue
75 | interval = job["interval"]
76 | script = job["script"]
77 | limit = job.get("limit")
78 |
79 | key = f"{script}+{interval}"
80 |
81 | if tc.should_run(key, interval, limit=limit):
82 | log(f"Executing {script}")
83 | tc.reset(key)
84 | try:
85 | proc = subprocess.Popen(
86 | [script, *sys.argv[1:]],
87 | stdout=subprocess.PIPE,
88 | stderr=subprocess.STDOUT,
89 | )
90 | output, _ = proc.communicate()
91 | except Exception as e:
92 | log(f"Error executing {script}: {e}")
93 | DBSession().add(CronJobRun(script=script, exit_status=1, output=str(e)))
94 | else:
95 | DBSession().add(
96 | CronJobRun(
97 | script=script,
98 | exit_status=proc.returncode,
99 | output=output.decode("utf-8").strip(),
100 | )
101 | )
102 | finally:
103 | DBSession().commit()
104 |
105 | time.sleep(60)
106 |
--------------------------------------------------------------------------------
/services/cron/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:cron]
2 | command=/usr/bin/env python baselayer/services/cron/cron.py %(ENV_FLAGS)s
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | startretries=0
5 | startsecs=1
6 | stdout_logfile=log/cron.log
7 | redirect_stderr=true
8 | priority=110
9 |
--------------------------------------------------------------------------------
/services/dask/supervisor.conf.template:
--------------------------------------------------------------------------------
1 | [program:dask_scheduler]
2 | command=dask-scheduler --host 127.0.0.1 --port={{ ports.dask }} --no-dashboard
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | stdout_logfile=log/dask_scheduler.log
5 | redirect_stderr=true
6 |
7 | [program:dask_worker]
8 | command=dask-worker --nthreads=1 --nworkers=4 127.0.0.1:{{ ports.dask }}
9 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
10 | stdout_logfile=log/dask_workers.log
11 | redirect_stderr=true
12 |
--------------------------------------------------------------------------------
/services/external_logging/external_logging.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import logging
4 | import socket
5 | from logging.handlers import SysLogHandler
6 |
7 | from baselayer.app.env import load_env
8 | from baselayer.log import make_log
9 | from baselayer.tools.watch_logs import basedir, log_watcher, tail_f
10 |
11 | env, cfg = load_env()
12 | log = make_log("external_logging")
13 |
14 |
15 | def is_int(x):
16 | try:
17 | int(x)
18 | except (ValueError, TypeError):
19 | return False
20 | else:
21 | return True
22 |
23 |
24 | def check_config(config, service):
25 | if not config.get("enabled", True):
26 | log(f"Logging service {service} disabled")
27 | return False
28 |
29 | conditions = [(False, f"Unknown logging service: {service}")]
30 |
31 | if service == "papertrail":
32 | conditions = [
33 | ("url" not in config, "Warning: missing URL for papertrail logging."),
34 | ("port" not in config, "Warning: missing port for papertrail logging."),
35 | (
36 | config.get("url", "").find("papertrailapp.com") == -1,
37 | "Warning: incorrect URL for papertrail logging.",
38 | ),
39 | (
40 | not is_int(config["port"]),
41 | f"Warning: bad port [{config['port']}] for papertrail logging. Should be an integer.",
42 | ),
43 | ]
44 |
45 | for cond, msg in conditions:
46 | if cond:
47 | log(msg)
48 |
49 | valid = not any(check for (check, msg) in conditions)
50 | return valid
51 |
52 |
53 | def external_logging_services():
54 | """Check 3rd party logging and make sure that it is set up properly
55 |
56 | TODO: This could eventually be done with a JSONschema
57 |
58 | """
59 | service_configs = cfg.get("external_logging", [])
60 |
61 | enabled_services = list(
62 | service
63 | for service in service_configs
64 | if check_config(service_configs[service], service)
65 | )
66 |
67 | for service in enabled_services:
68 | log(f"Enabling external logging to {service}.")
69 |
70 | if not enabled_services:
71 | log("No external logging services configured")
72 |
73 | return enabled_services
74 |
75 |
76 | def get_papertrail_stream_logger():
77 | class ContextFilter(logging.Filter):
78 | hostname = socket.gethostname()
79 |
80 | def filter(self, record):
81 | record.hostname = ContextFilter.hostname
82 | return True
83 |
84 | syslog = SysLogHandler(
85 | address=(
86 | cfg["external_logging.papertrail.url"],
87 | cfg["external_logging.papertrail.port"],
88 | )
89 | )
90 | syslog.addFilter(ContextFilter())
91 | title = cfg["app"].get("title", basedir.split("/")[-1])
92 | format = f"%(asctime)s %(hostname)s {title}: %(message)s"
93 | formatter = logging.Formatter(format, datefmt="%b %d %H:%M:%S")
94 | syslog.setFormatter(formatter)
95 | logger = logging.getLogger()
96 | logger.addHandler(syslog)
97 | logger.setLevel(logging.INFO)
98 | return logger
99 |
100 |
101 | enabled_services = external_logging_services()
102 | printers = []
103 |
104 | if "papertrail" in enabled_services:
105 | # logging set up: see
106 | # `https://documentation.solarwinds.com/en/Success_Center/papertrail/Content/kb/configuration/configuring-centralized-logging-from-python-apps.htm`
107 |
108 | stream_logger = get_papertrail_stream_logger()
109 |
110 | def papertrail_printer(logfile, **kwargs):
111 | excluded = cfg["external_logging.papertrail.excluded_log_files"]
112 | if logfile in excluded:
113 | return
114 | else:
115 | log(f"Streaming {logfile} to papertrail")
116 | stream_logger.info(f"-> {logfile}")
117 | for line in tail_f(logfile):
118 | stream_logger.info(line)
119 |
120 | printers.append(papertrail_printer)
121 |
122 |
123 | log_watcher(printers)
124 |
--------------------------------------------------------------------------------
/services/external_logging/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:external_logging]
2 | command=/usr/bin/env python baselayer/services/external_logging/external_logging.py %(ENV_FLAGS)s
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | stdout_logfile=log/external_logging.log
5 | redirect_stderr=true
6 | startsecs=0
7 | # Start this early on, we want to be ready for those logs
8 | priority=10
9 |
--------------------------------------------------------------------------------
/services/fake_oauth2/fake_oauth2.py:
--------------------------------------------------------------------------------
1 | import uuid
2 |
3 | import tornado.ioloop
4 | import tornado.web
5 | from tornado.httputil import url_concat
6 | from tornado.web import RequestHandler
7 |
8 | from baselayer.app.env import load_env
9 |
10 |
11 | class FakeGoogleOAuth2AuthHandler(RequestHandler):
12 | def get(self):
13 | # issue a fake auth code and redirect to redirect_uri
14 | code = "fake-authorization-code"
15 | self.redirect(
16 | url_concat(
17 | self.get_argument("redirect_uri"),
18 | dict(code=code, state=self.get_argument("state")),
19 | )
20 | )
21 |
22 |
23 | class FakeGoogleOAuth2TokenHandler(RequestHandler):
24 | def post(self):
25 | self.get_argument("code") == "fake-authorization-code"
26 |
27 | fake_token = str(uuid.uuid4())
28 | self.write({"access_token": fake_token, "expires_in": "never-expires"})
29 |
30 |
31 | env, cfg = load_env()
32 |
33 | handlers = [
34 | ("/fakeoauth2/auth", FakeGoogleOAuth2AuthHandler),
35 | ("/fakeoauth2/token", FakeGoogleOAuth2TokenHandler),
36 | ]
37 | app = tornado.web.Application(handlers)
38 | app.listen(cfg["ports.fake_oauth"])
39 |
40 | tornado.ioloop.IOLoop.current().start()
41 |
--------------------------------------------------------------------------------
/services/fake_oauth2/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:fakeoauth2]
2 | command=/usr/bin/env python baselayer/services/fake_oauth2/fake_oauth2.py %(ENV_FLAGS)s
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | stdout_logfile=log/fake_oauth2.log
5 | redirect_stderr=true
6 |
--------------------------------------------------------------------------------
/services/message_proxy/message_proxy.py:
--------------------------------------------------------------------------------
1 | # http://zguide.zeromq.org/page:all#The-Dynamic-Discovery-Problem
2 |
3 | import zmq
4 |
5 | from baselayer.app.env import load_env
6 | from baselayer.log import make_log
7 |
8 | env, cfg = load_env()
9 |
10 | log = make_log("message_proxy")
11 |
12 | IN = cfg["ports.websocket_path_in"]
13 | OUT = cfg["ports.websocket_path_out"]
14 |
15 | context = zmq.Context()
16 |
17 | feed_in = context.socket(zmq.PULL)
18 | feed_in.bind(IN)
19 |
20 | feed_out = context.socket(zmq.PUB)
21 | feed_out.bind(OUT)
22 |
23 | log(f"Forwarding messages between {IN} and {OUT}")
24 | zmq.proxy(feed_in, feed_out)
25 |
--------------------------------------------------------------------------------
/services/message_proxy/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:message_proxy]
2 | command=/usr/bin/env python baselayer/services/message_proxy/message_proxy.py %(ENV_FLAGS)s
3 | environment=PYTHONUNBUFFERED=1
4 | stdout_logfile=log/message_proxy.log
5 | redirect_stderr=true
6 | priority=90
7 |
--------------------------------------------------------------------------------
/services/migration_manager/migration_manager.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import subprocess
4 | import time
5 |
6 | import tornado.ioloop
7 | import tornado.web
8 |
9 | from baselayer.app.env import load_env
10 | from baselayer.log import make_log
11 |
12 | env, cfg = load_env()
13 | log = make_log("migration_manager")
14 |
15 |
16 | conf_files = env.config
17 | conf_flags = ["-x", f'config={":".join(conf_files)}'] if conf_files else []
18 |
19 |
20 | class timeout_cache:
21 | def __init__(self, timeout):
22 | self.timeout = timeout
23 | self.lastrun = 0
24 | self.cache = None
25 | self.func = None
26 |
27 | def __call__(self, f):
28 | self.func = f
29 | return self.wrapped
30 |
31 | def wrapped(self, *args, **kwargs):
32 | tic = self.lastrun
33 | toc = time.time()
34 | if (toc - tic) > self.timeout or self.cache is None:
35 | self.lastrun = toc
36 | self.cache = self.func(*args, **kwargs)
37 |
38 | return self.cache
39 |
40 |
41 | def _alembic(*options):
42 | path_env = os.environ.copy()
43 | path_env["PYTHONPATH"] = "."
44 |
45 | p = subprocess.Popen(
46 | ["alembic"] + conf_flags + list(options),
47 | stdout=subprocess.PIPE,
48 | stderr=subprocess.PIPE,
49 | env=path_env,
50 | )
51 |
52 | output, error = p.communicate()
53 | return p, output, error
54 |
55 |
56 | def migrations_exist():
57 | if not os.path.isdir("./alembic/versions"):
58 | log("No migrations present; continuing")
59 | return False
60 |
61 | if shutil.which("alembic") is None:
62 | log("`alembic` executable not found; continuing")
63 | return False
64 |
65 | return True
66 |
67 |
68 | def migrate():
69 | path_env = os.environ.copy()
70 | path_env["PYTHONPATH"] = "."
71 |
72 | cmd = ["alembic"] + conf_flags + ["upgrade", "head"]
73 | log(f'Attempting migration: {" ".join(cmd)}')
74 | p = subprocess.Popen(cmd, stderr=subprocess.PIPE, env=path_env)
75 |
76 | output, error = p.communicate()
77 | for line in error.decode("utf-8").split("\n"):
78 | log(line)
79 |
80 | if p.returncode != 0:
81 | log("Migration failed")
82 | else:
83 | log("Migration succeeded")
84 |
85 |
86 | @timeout_cache(timeout=10)
87 | def migration_status():
88 | if not migrations_exist():
89 | # No migrations present, continue as usual
90 | return True
91 |
92 | p, output, error = _alembic("current", "--verbose")
93 |
94 | if p.returncode != 0:
95 | log("Alembic returned an error; aborting")
96 | log(output.decode("utf-8"))
97 | return False
98 |
99 | status = output.decode("utf-8").strip().split("\n")
100 | status = [line for line in status if line.startswith("Rev: ")]
101 | if not status:
102 | log("Database not stamped: assuming migrations not in use; continuing")
103 | return True
104 |
105 | if status[0].endswith("(head)"):
106 | log("Database is up to date")
107 | return True
108 |
109 | log("Database is not migrated")
110 | return False
111 |
112 |
113 | class MainHandler(tornado.web.RequestHandler):
114 | def set_default_headers(self):
115 | self.set_header("Content-Type", "application/json")
116 |
117 | def get(self):
118 | self.write({"migrated": migration_status()})
119 |
120 |
121 | def make_app():
122 | return tornado.web.Application(
123 | [
124 | (r"/", MainHandler),
125 | ]
126 | )
127 |
128 |
129 | if __name__ == "__main__":
130 | try:
131 | if migrations_exist() and not migration_status():
132 | # Attempt migration on startup
133 | migrate()
134 | except Exception as e:
135 | log(f"Uncaught exception: {e}")
136 |
137 | migration_manager = make_app()
138 |
139 | port = cfg["ports.migration_manager"]
140 | migration_manager.listen(port)
141 | log(f"Listening on port {port}")
142 | tornado.ioloop.IOLoop.current().start()
143 |
--------------------------------------------------------------------------------
/services/migration_manager/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:migration]
2 | command=/usr/bin/env python baselayer/services/migration_manager/migration_manager.py %(ENV_FLAGS)s
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | stdout_logfile=log/migration.log
5 | redirect_stderr=true
6 | # Prioritize this above the app server
7 | # Default is 999
8 | priority=100
9 |
--------------------------------------------------------------------------------
/services/nginx/nginx.conf.template:
--------------------------------------------------------------------------------
1 | error_log log/error.log error;
2 | pid run/nginx.pid;
3 |
4 | {% if fill_config_feature.nginx_brotli.dynamic and fill_config_feature.nginx_brotli.modules_path %}
5 | load_module {{ fill_config_feature.nginx_brotli.modules_path }}/ngx_http_brotli_filter_module.so; # for compressing responses on-the-fly
6 | load_module {{ fill_config_feature.nginx_brotli.modules_path }}/ngx_http_brotli_static_module.so; # for serving pre-compressed files
7 | {% endif %}
8 |
9 |
10 | # Choose number of NGINX worker processes based on number of CPUs
11 | worker_processes auto;
12 |
13 | http {
14 | sendfile on;
15 | tcp_nopush on;
16 | types_hash_max_size 4096;
17 |
18 | # Enable compression of outgoing data
19 | gzip on;
20 | gzip_min_length 1000;
21 | gzip_proxied any;
22 | gzip_types text/plain
23 | text/css
24 | application/json
25 | application/x-javascript
26 | application/xml
27 | text/javascript
28 | application/javascript;
29 |
30 | {% if fill_config_feature.nginx_brotli.installed %}
31 | # also enable brotli compression
32 | brotli on;
33 | brotli_comp_level 6;
34 | brotli_types text/plain
35 | text/css
36 | application/json
37 | application/x-javascript
38 | application/xml
39 | text/javascript
40 | application/javascript;
41 | {% endif %}
42 |
43 | # Only retry if there was a communication error, not a timeout
44 | # on the Tornado server (to avoid propagating "queries of death"
45 | # to all frontends)
46 | #
47 | # See https://www.tornadoweb.org/en/stable/guide/running.html
48 | proxy_next_upstream error;
49 |
50 | {% for ip in server.loadbalancer_ips -%}
51 | set_real_ip_from {{ ip }};
52 | {% endfor %}
53 | real_ip_header X-Forwarded-For;
54 | real_ip_recursive on;
55 |
56 | geo $limit {
57 | default 1;
58 | 127.0.0.1/32 0;
59 | {% for ip in server.whitelisted_ips -%}
60 | {{ ip }} 0;
61 | {% endfor %}
62 | }
63 |
64 | map $limit $limit_key {
65 | 0 "";
66 | 1 $http_authorization;
67 | }
68 |
69 | # Per-token API rate limiting
70 | limit_req_zone $limit_key zone=custom_rate_limit:1m rate={{ server.rate_limit }}r/s;
71 |
72 | upstream websocket_server {
73 | server localhost:{{ ports.websocket }};
74 | }
75 |
76 | upstream fakeoauth_server {
77 | server localhost:{{ ports.fake_oauth }};
78 | }
79 |
80 | upstream frontend {
81 | least_conn;
82 | {% for p in range(server.processes) -%}
83 | server 127.0.0.1:{{ ports.app_internal + p }} fail_timeout={{ server.fail_timeout }}s;
84 | {% endfor %}
85 | server 127.0.0.1:{{ ports.status }} backup max_fails=0;
86 | }
87 |
88 | # Only a subset of processes are available for token authenticated API
89 | # requests. This ensures that even when the system is being
90 | # hit by API requests, the frontend remains responsive.
91 | upstream api {
92 | least_conn;
93 | {% for p in range(server.dedicated_frontend_processes, [server.processes, server.dedicated_frontend_processes + 1] | max) -%}
94 | server 127.0.0.1:{{ ports.app_internal + p }} fail_timeout={{ server.fail_timeout }}s;
95 | {% endfor %}
96 | server 127.0.0.1:{{ ports.status }} backup max_fails=0;
97 | }
98 |
99 | # See http://nginx.org/en/docs/http/websocket.html
100 | map $http_upgrade $connection_upgrade {
101 | default upgrade;
102 | '' close;
103 | }
104 |
105 | map $http_authorization $pool {
106 | default frontend;
107 | '~.' api;
108 | }
109 |
110 | map $request_method $exclude_head_requests {
111 | HEAD 0;
112 | default 1;
113 | }
114 |
115 | map $status $loggable {
116 | ~^[2] $exclude_head_requests;
117 | ~^[3] 0;
118 | ~^[101] 0;
119 | 404 $exclude_head_requests;
120 | default 1;
121 | }
122 |
123 | map $http_authorization $trunc_authorization {
124 | default "";
125 | "~*(?P.{0,14}).*" $tr;
126 | }
127 |
128 | log_format elb_log '$remote_addr - $remote_user [$trunc_authorization] [$time_local] ' '"$request" $status $body_bytes_sent rl=$request_length "$http_referer" ' '"$http_user_agent"';
129 |
130 | server {
131 | {% if env.debug %}
132 | listen 127.0.0.1:{{ ports.app }};
133 | {% else %}
134 | {% if server.ssl_certificate %}
135 | listen {{ ports.app }} ssl;
136 | {% else %}
137 | listen {{ ports.app }};
138 | {% endif %}
139 | listen {{ ports.app_http_proxy }} proxy_protocol; # This is for AWS Elastic Load Balancer
140 | {% endif %}
141 | client_max_body_size {{ server.max_body_size }}M;
142 |
143 | {% if server.ssl_certificate %}
144 | ssl_certificate {{ server.ssl_certificate }};
145 | ssl_certificate_key {{ server.ssl_certificate_key }};
146 | {% endif %}
147 |
148 | location / {
149 | # API rate limiting
150 | limit_req zone=custom_rate_limit burst={{ server.burst }} nodelay;
151 | limit_req_status 429;
152 |
153 | proxy_pass http://$pool;
154 |
155 | proxy_set_header Host $http_host;
156 | proxy_set_header X-Real-IP $remote_addr;
157 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
158 | proxy_set_header X-Forwarded-Proto $scheme;
159 |
160 | # Buffer sizes; see
161 | # https://www.getpagespeed.com/server-setup/nginx/tuning-proxy_buffer_size-in-nginx
162 |
163 | # Handle uploads up to 64k before buffering to disk
164 | client_body_buffer_size 64k;
165 |
166 | # Buffer responses up to 256k
167 | proxy_buffers 32 8k;
168 |
169 | # Serve static files directly
170 | location /static/ {
171 | root .;
172 | include mime.types;
173 | if ($query_string) {
174 | expires max;
175 | }
176 | }
177 | location /favicon.png {
178 | root static;
179 | expires max;
180 | }
181 | }
182 |
183 | location /websocket {
184 | proxy_pass http://websocket_server/websocket;
185 | proxy_http_version 1.1;
186 | proxy_set_header Upgrade $http_upgrade;
187 | proxy_set_header Connection $connection_upgrade;
188 | proxy_read_timeout 60s;
189 | }
190 |
191 | location /fakeoauth2 {
192 | proxy_pass http://fakeoauth_server/fakeoauth2;
193 | }
194 |
195 | error_log log/nginx-error.log warn;
196 | # one of: debug, info, notice, warn, error, crit, alert, emerg
197 |
198 | {% if log.api_calls %}
199 | {% set log_cond = "" %}
200 | {% else %}
201 | {% set log_cond = "if=$loggable" %}
202 | {% endif %}
203 | access_log log/nginx-access.log elb_log {{ log_cond }};
204 | }
205 |
206 | # Set an array of temp and cache file options that will otherwise default to
207 | # restricted locations accessible only to root.
208 | client_body_temp_path tmp/client_body;
209 | fastcgi_temp_path tmp/fastcgi_temp;
210 | proxy_temp_path tmp/proxy_temp;
211 | scgi_temp_path tmp/scgi_temp;
212 | uwsgi_temp_path tmp/uwsgi_temp;
213 |
214 | }
215 |
216 | events {
217 | worker_connections 1024;
218 | }
219 |
--------------------------------------------------------------------------------
/services/nginx/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:nginx]
2 | command=nginx -c baselayer/services/nginx/nginx.conf -p . -g "daemon off;"
3 | stdout_logfile=log/nginx.log
4 | redirect_stderr=true
5 | # First nginx up early
6 | priority=50
7 |
--------------------------------------------------------------------------------
/services/rspack/rspack.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 |
4 | from baselayer.app.env import load_env, parser
5 | from baselayer.log import make_log
6 |
7 | parser.description = "Launch rspack microservice"
8 |
9 | env, cfg = load_env()
10 |
11 | log = make_log("service/rspack")
12 |
13 |
14 | def run(cmd):
15 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
16 | for line in proc.stdout:
17 | log(f"{line.decode().strip()}")
18 | return proc
19 |
20 |
21 | if env.debug:
22 | log("Debug mode detected, launching rspack monitor")
23 | p = run(["npx", "rspack", "--watch"])
24 | sys.exit(p.returncode)
25 | else:
26 | log("Production mode; not building JavaScript bundle")
27 | log("Use `make bundle` to produce it from scratch")
28 |
--------------------------------------------------------------------------------
/services/rspack/supervisor.conf:
--------------------------------------------------------------------------------
1 | ## For development only: do not run in production
2 |
3 | [program:rspack]
4 | command=/usr/bin/env python baselayer/services/rspack/rspack.py %(ENV_FLAGS)s
5 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
6 | startretries=0
7 | startsecs=0
8 | stdout_logfile=log/rspack.log
9 | redirect_stderr=true
10 | stopasgroup=true
11 | # Start this before any other services; it takes a while to build the bundle
12 | priority=0
13 |
--------------------------------------------------------------------------------
/services/status_server/status_server.py:
--------------------------------------------------------------------------------
1 | import tornado.ioloop
2 | import tornado.web
3 |
4 | from baselayer.app.env import load_env
5 |
6 | env, cfg = load_env()
7 |
8 |
9 | class MainHandler(tornado.web.RequestHandler):
10 | def get(self):
11 | self.set_status(503)
12 | self.write(f"{cfg['app.title']} is being provisioned
")
13 | self.write(
14 | "Sysadmins can run make monitor
on the server to see how that is progressing."
15 | )
16 | self.write("
System logs are in ./log/app_*.log
")
17 |
18 |
19 | class MainAPIHandler(tornado.web.RequestHandler):
20 | def get(self, args):
21 | self.set_header("Content-Type", "application/json")
22 | self.set_status(503)
23 | self.write(
24 | {
25 | "status": "error",
26 | "message": "System provisioning",
27 | }
28 | )
29 |
30 |
31 | def make_app():
32 | return tornado.web.Application(
33 | [
34 | (r"/api(/.*)?", MainAPIHandler),
35 | (r".*", MainHandler),
36 | ]
37 | )
38 |
39 |
40 | if __name__ == "__main__":
41 | app = make_app()
42 | app.listen(cfg["ports.status"])
43 | tornado.ioloop.IOLoop.current().start()
44 |
--------------------------------------------------------------------------------
/services/status_server/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:status]
2 | command=/usr/bin/env python baselayer/services/status_server/status_server.py %(ENV_FLAGS)s
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | stdout_logfile=log/status_server.log
5 | redirect_stderr=true
6 | # Fire this up before the app
7 | priority=50
8 |
--------------------------------------------------------------------------------
/services/websocket_server/supervisor.conf:
--------------------------------------------------------------------------------
1 | [program:websocket]
2 | command=/usr/bin/env python baselayer/services/websocket_server/websocket_server.py %(ENV_FLAGS)s
3 | environment=PYTHONPATH=".",PYTHONUNBUFFERED="1"
4 | stdout_logfile=log/websocket_server.log
5 | redirect_stderr=true
6 | # Fire this up before the app
7 | priority=100
8 |
--------------------------------------------------------------------------------
/services/websocket_server/websocket_server.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import json
3 |
4 | import jwt
5 | import zmq
6 | from tornado import ioloop, web, websocket
7 |
8 | from baselayer.app.env import load_env
9 | from baselayer.log import make_log
10 |
11 | env, cfg = load_env()
12 | secret = cfg["app.secret_key"]
13 |
14 | if secret is None:
15 | raise RuntimeError("We need a secret key to communicate with the server!")
16 |
17 | ctx = zmq.Context()
18 |
19 | log = make_log("websocket_server")
20 |
21 |
22 | class WebSocket(websocket.WebSocketHandler):
23 | """This is a single Tornado websocket server. It can handle multiple
24 | websocket connections for multiple users.
25 |
26 | A websocket server has one ZeroMQ stream, which listens to the
27 | message bus (see `message_proxy.py`), onto which messages are
28 | posted by the web application.
29 |
30 | The ZeroMQ stream subscribes to the channel '*' by default:
31 | messages intended for all users.
32 |
33 | Additionally, whenever a user makes a new websocket connection to
34 | this server, the stream subscribes to that user's id, so that
35 | it will receive their messages from the bus.
36 |
37 | +----------------------+ +------------+ +---------+
38 | | |<--------| user1 | | user2 |
39 | | Tornado | API +------^-----+ +---^-----+
40 | | Web Server | | |
41 | | | +------+-----------+-----+
42 | +-----+----------------+ | WebSocket server |
43 | | PUSH +------------------------+
44 | | ^ SUB
45 | | |
46 | v PULL | PUB
47 | +---------------------------------------+-----------------+
48 | | ZMQ Message Proxy |
49 | +---------------------------------------------------------+
50 |
51 | """
52 |
53 | sockets = collections.defaultdict(set)
54 | _zmq_stream = None
55 |
56 | def __init__(self, *args, **kwargs):
57 | websocket.WebSocketHandler.__init__(self, *args, **kwargs)
58 |
59 | if WebSocket._zmq_stream is None:
60 | raise RuntimeError(
61 | "Please install a stream before instantiating " "any websockets"
62 | )
63 |
64 | self.authenticated = False
65 | self.auth_failures = 0
66 | self.max_auth_fails = 3
67 | self.user_id = None
68 |
69 | @classmethod
70 | def install_stream(cls, stream):
71 | stream.socket.setsockopt(zmq.SUBSCRIBE, b"*")
72 | cls._zmq_stream = stream
73 |
74 | @classmethod
75 | def subscribe(cls, user_id):
76 | cls._zmq_stream.socket.setsockopt(zmq.SUBSCRIBE, user_id.encode("utf-8"))
77 |
78 | @classmethod
79 | def unsubscribe(cls, user_id):
80 | cls._zmq_stream.socket.setsockopt(zmq.UNSUBSCRIBE, user_id.encode("utf-8"))
81 |
82 | def check_origin(self, origin):
83 | return True
84 |
85 | def open(self):
86 | self.request_auth()
87 |
88 | def on_close(self):
89 | sockets = WebSocket.sockets
90 |
91 | if self.user_id is not None:
92 | try:
93 | sockets[self.user_id].remove(self)
94 | except KeyError:
95 | pass
96 |
97 | # If we are the last of the user's websockets, since we're leaving
98 | # we unsubscribe to the message feed
99 | if len(sockets[self.user_id]) == 0:
100 | WebSocket.unsubscribe(self.user_id)
101 |
102 | def on_message(self, auth_token):
103 | self.authenticate(auth_token)
104 | if not self.authenticated:
105 | if self.auth_failures <= self.max_auth_fails:
106 | self.request_auth()
107 | else:
108 | log("max auth failure count reached")
109 |
110 | def request_auth(self):
111 | self.auth_failures += 1
112 | self.send_json(actionType="AUTH REQUEST")
113 |
114 | def send_json(self, **kwargs):
115 | self.write_message(json.dumps(kwargs))
116 |
117 | def authenticate(self, auth_token):
118 | try:
119 | token_payload = jwt.decode(auth_token, secret, algorithms=["HS256"])
120 | user_id = token_payload.get("user_id", None)
121 | if not user_id:
122 | raise jwt.DecodeError("No user_id field found")
123 |
124 | self.user_id = user_id
125 | self.authenticated = True
126 | self.auth_failures = 0
127 | self.send_json(actionType="AUTH OK")
128 |
129 | # If we are the first websocket connecting on behalf of
130 | # a given user, subscribe to the feed for that user
131 | if len(WebSocket.sockets[user_id]) == 0:
132 | WebSocket.subscribe(user_id)
133 |
134 | WebSocket.sockets[user_id].add(self)
135 |
136 | except jwt.DecodeError:
137 | self.send_json(actionType="AUTH FAILED")
138 | except jwt.ExpiredSignatureError:
139 | self.send_json(actionType="AUTH FAILED")
140 |
141 | @classmethod
142 | def heartbeat(cls):
143 | for user_id in cls.sockets:
144 | for socket in cls.sockets[user_id]:
145 | socket.write_message(b"<3")
146 |
147 | # http://mrjoes.github.io/2013/06/21/python-realtime.html
148 | @classmethod
149 | def broadcast(cls, data):
150 | user_id, payload = (d.decode("utf-8") for d in data)
151 |
152 | if user_id == "*":
153 | log("Forwarding message to all users")
154 |
155 | all_sockets = [
156 | socket for socket_list in cls.sockets.values() for socket in socket_list
157 | ]
158 |
159 | for socket in all_sockets:
160 | socket.write_message(payload)
161 |
162 | else:
163 | for socket in cls.sockets[user_id]:
164 | log(f"Forwarding message to user {user_id}")
165 |
166 | socket.write_message(payload)
167 |
168 |
169 | if __name__ == "__main__":
170 | PORT = cfg["ports.websocket"]
171 | LOCAL_OUTPUT = cfg["ports.websocket_path_out"]
172 |
173 | import zmq
174 | from zmq.eventloop import zmqstream
175 |
176 | # https://pyzmq.readthedocs.io/en/latest/eventloop.html
177 |
178 | sub = ctx.socket(zmq.SUB)
179 | sub.connect(LOCAL_OUTPUT)
180 |
181 | log(f"Broadcasting {LOCAL_OUTPUT} to all websockets")
182 | stream = zmqstream.ZMQStream(sub)
183 | WebSocket.install_stream(stream)
184 | stream.on_recv(WebSocket.broadcast)
185 |
186 | server = web.Application(
187 | [
188 | (r"/websocket", WebSocket),
189 | ]
190 | )
191 | server.listen(PORT)
192 |
193 | io_loop = ioloop.IOLoop.current()
194 |
195 | # We send a heartbeat every 45 seconds to make sure that nginx
196 | # proxy does not time out and close the connection
197 | ioloop.PeriodicCallback(WebSocket.heartbeat, 45000).start()
198 |
199 | log(f"Listening for incoming websocket connections on port {PORT}")
200 | ioloop.IOLoop.instance().start()
201 |
--------------------------------------------------------------------------------
/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cesium-ml/baselayer/8b2fae5229c6af08a6beca992ce2f60dfc64303a/static/favicon.ico
--------------------------------------------------------------------------------
/static/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cesium-ml/baselayer/8b2fae5229c6af08a6beca992ce2f60dfc64303a/static/img/logo.png
--------------------------------------------------------------------------------
/static/img/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
93 |
--------------------------------------------------------------------------------
/static/js/API.js:
--------------------------------------------------------------------------------
1 | // Note: These are thunks (https://github.com/gaearon/redux-thunk),
2 | // so calling `API(...)` will not do anything.
3 | //
4 | // Each invocation should happen inside of a `dispatch` call, e.g.,
5 | //
6 | // dispatch(API.GET('/baselayer/profile', FETCH_USER_PROFILE));
7 | //
8 |
9 | import { showNotification } from "./components/Notifications";
10 |
11 | const API_CALL = "baselayer/API_CALL";
12 |
13 | function API(endpoint, actionType, method = "GET", body = {}, otherArgs = {}) {
14 | const parameters = { endpoint, actionType, body, method, otherArgs };
15 |
16 | let fetchInit = {
17 | credentials: "same-origin",
18 | headers: {
19 | "Content-Type": "application/json",
20 | },
21 | method,
22 | ...otherArgs,
23 | };
24 | if (method !== "GET") {
25 | fetchInit = { ...fetchInit, body: JSON.stringify(body) };
26 | }
27 |
28 | return async (dispatch) => {
29 | if (!actionType) {
30 | return dispatch(
31 | showNotification(
32 | "API invocation error: no actionType specified",
33 | "error",
34 | ),
35 | );
36 | }
37 | dispatch({ type: API_CALL, parameters });
38 | try {
39 | const response = await fetch(endpoint, fetchInit);
40 | if (response.status !== 200) {
41 | throw new Error(
42 | `Could not fetch data from server (${response.status})`,
43 | );
44 | }
45 |
46 | const json = await response.json();
47 | if (json.status === "success") {
48 | dispatch({ type: `${actionType}_OK`, ...json });
49 | return json.data;
50 | } else {
51 | /* In case of an error, dispatch an action that contains
52 | every piece of information we have about the request, including
53 | JSON args, and the response that came back from the server.
54 |
55 | This information can be used in a reducer to set an error message.
56 | */
57 | dispatch({ type: `${actionType}_FAIL`, parameters, response: json });
58 | throw json.message;
59 | }
60 | } catch (error) {
61 | dispatch({ type: `${actionType}_FAIL`, parameters, error });
62 | return dispatch(showNotification(`API error: ${error}`, "error"));
63 | }
64 | };
65 | }
66 |
67 | function GET(endpoint, actionType) {
68 | return API(endpoint, actionType, "GET");
69 | }
70 |
71 | function POST(endpoint, actionType, payload) {
72 | return API(endpoint, actionType, "POST", payload);
73 | }
74 |
75 | function PUT(endpoint, actionType, payload) {
76 | return API(endpoint, actionType, "PUT", payload);
77 | }
78 |
79 | function DELETE(endpoint, actionType, payload) {
80 | return API(endpoint, actionType, "DELETE", payload);
81 | }
82 |
83 | export { GET, POST, PUT, DELETE, API, API_CALL };
84 |
--------------------------------------------------------------------------------
/static/js/MessageHandler.js:
--------------------------------------------------------------------------------
1 | /* Handle incoming websocket messages */
2 |
3 | import {
4 | SHOW_NOTIFICATION,
5 | showNotification,
6 | } from "./components/Notifications";
7 |
8 | class MessageHandler {
9 | /* You have to run `init` before the messageHandler can be used */
10 |
11 | constructor() {
12 | this._handlers = [];
13 | this._dispatch = null;
14 | this._getState = null;
15 | }
16 |
17 | init(dispatch, getState) {
18 | this._dispatch = dispatch;
19 | this._getState = getState;
20 | }
21 |
22 | add(handler) {
23 | this._handlers.push(handler);
24 | }
25 |
26 | handle(actionType, payload) {
27 | // Execute all registered handlers on the incoming message
28 | this._handlers.forEach((handler) => {
29 | handler(actionType, payload, this._dispatch, this._getState);
30 | });
31 | }
32 | }
33 |
34 | const notificationHandler = (actionType, payload, dispatch) => {
35 | if (actionType === SHOW_NOTIFICATION) {
36 | const { note, type } = payload;
37 | let { duration } = payload;
38 | // if the duration is missing or invalid (negative or too large), use the default
39 | if (!duration || duration <= 0 || duration >= 30000) {
40 | duration = 3000;
41 | }
42 | dispatch(showNotification(note, type, duration));
43 | }
44 | };
45 |
46 | const messageHandler = new MessageHandler();
47 | messageHandler.add(notificationHandler);
48 |
49 | export default messageHandler;
50 |
--------------------------------------------------------------------------------
/static/js/components/Notifications.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { useDispatch, useSelector } from "react-redux";
3 |
4 | export const SHOW_NOTIFICATION = "baselayer/SHOW_NOTIFICATION";
5 | export const HIDE_NOTIFICATION = "baselayer/HIDE_NOTIFICATION";
6 | export const HIDE_NOTIFICATION_BY_TAG = "baselayer/HIDE_NOTIFICATION_BY_TAG";
7 |
8 | export const MS_PER_YEAR = 31540000000;
9 |
10 | export function hideNotification(id) {
11 | return {
12 | type: HIDE_NOTIFICATION,
13 | payload: { id },
14 | };
15 | }
16 |
17 | export function hideNotificationByTag(tag) {
18 | return (dispatch) => {
19 | dispatch({
20 | type: HIDE_NOTIFICATION_BY_TAG,
21 | payload: { tag },
22 | });
23 | return Promise.resolve();
24 | };
25 | }
26 |
27 | export const Notifications = () => {
28 | const style = {
29 | position: "fixed",
30 | zIndex: 20000,
31 | top: "4.5em",
32 | width: "30em",
33 | right: "1em",
34 | overflow: "hidden",
35 |
36 | note: {
37 | color: "white",
38 | fontWeight: 600,
39 | padding: "1.3em",
40 | marginBottom: "0.5em",
41 | width: "100%",
42 | borderRadius: "8px",
43 | WebkitBoxShadow: "0 4px 5px rgba(0, 0, 0, 0.2)",
44 | MozBoxShadow: "0 4px 5px rgba(0, 0, 0, 0.2)",
45 | boxShadow: "0 4px 5px rgba(0, 0, 0, 0.2)",
46 | fontSize: "0.95rem",
47 | display: "inline-block",
48 | },
49 | };
50 |
51 | const noteColor = {
52 | error: "rgba(244,67,54,0.95)",
53 | warning: "rgba(255,152,0,0.95)",
54 | info: "rgba(11,181,119,0.95)",
55 | };
56 |
57 | const dispatch = useDispatch();
58 | const notifications = useSelector((state) => state.notifications.notes);
59 |
60 | return (
61 | notifications.length > 0 && (
62 |
63 | {notifications.map((notification) => (
64 |
dispatch(hideNotification(notification.id))}
69 | >
70 | {notification.note}
71 |
72 | ))}
73 |
74 | )
75 | );
76 | };
77 |
78 | let nextNotificationId = 0;
79 | export function showNotification(
80 | note,
81 | type = "info",
82 | duration = 3000,
83 | tag = "default",
84 | ) {
85 | const thisId = nextNotificationId;
86 | nextNotificationId += 1;
87 |
88 | if (type === "error") {
89 | // eslint-disable-next-line no-console
90 | console.error(note);
91 | }
92 |
93 | return (dispatch) => {
94 | dispatch({
95 | type: SHOW_NOTIFICATION,
96 | payload: {
97 | id: thisId,
98 | note,
99 | type,
100 | tag,
101 | },
102 | });
103 | setTimeout(() => dispatch(hideNotification(thisId)), duration);
104 | };
105 | }
106 |
107 | export function reducer(state = { notes: [] }, action) {
108 | switch (action.type) {
109 | case SHOW_NOTIFICATION: {
110 | const { id, note, type, tag } = action.payload;
111 | return {
112 | notes: state.notes.concat({ id, note, type, tag }),
113 | };
114 | }
115 | case HIDE_NOTIFICATION:
116 | return {
117 | notes: state.notes.filter((n) => n.id !== action.payload.id),
118 | };
119 | case HIDE_NOTIFICATION_BY_TAG:
120 | return {
121 | notes: state.notes.filter((n) => n.tag !== action.payload.tag),
122 | };
123 | default:
124 | return state;
125 | }
126 | }
127 |
--------------------------------------------------------------------------------
/static/js/components/WebSocket.jsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import PropTypes from "prop-types";
3 | import { createCookie, readCookie, eraseCookie } from "../cookies";
4 | import ReconnectingWebSocket from "../reconnecting-websocket";
5 | import messageHandler from "../MessageHandler";
6 | import {
7 | showNotification,
8 | hideNotificationByTag,
9 | MS_PER_YEAR,
10 | } from "./Notifications";
11 |
12 | function checkStatus(response) {
13 | if (response.status >= 200 && response.status < 300) {
14 | return response;
15 | } else {
16 | const error = new Error(response.statusText);
17 | error.response = response;
18 | throw error;
19 | }
20 | }
21 |
22 | function parseJSON(response) {
23 | return response.json();
24 | }
25 |
26 | function getAuthToken(auth_url) {
27 | return new Promise((resolve) => {
28 | // First, try and read the authentication token from a cookie
29 | const cookie_token = readCookie("auth_token");
30 |
31 | if (cookie_token) {
32 | resolve(cookie_token);
33 | } else {
34 | fetch(auth_url, {
35 | credentials: "same-origin",
36 | })
37 | .then(checkStatus)
38 | .then(parseJSON)
39 | .then((json) => {
40 | const { token } = json.data;
41 | createCookie("auth_token", token);
42 | resolve(token);
43 | })
44 | .catch(() => {
45 | // If we get a gateway error, it probably means nginx is
46 | // being restarted. Not much we can do, other than wait a
47 | // bit and continue with a fake token.
48 | const no_token = "no_auth_token_user bad_token";
49 | setTimeout(() => {
50 | resolve(no_token);
51 | }, 1000);
52 | });
53 | }
54 | });
55 | }
56 |
57 | function showWebsocketNotification(dispatch, msg, tag) {
58 | dispatch(hideNotificationByTag(tag)).then(
59 | dispatch(showNotification(msg, "warning", 50 * MS_PER_YEAR, tag)),
60 | );
61 | }
62 |
63 | function clearWebsocketNotification(dispatch, tag) {
64 | dispatch(hideNotificationByTag(tag));
65 | }
66 |
67 | class WebSocket extends React.Component {
68 | constructor(props) {
69 | super(props);
70 | this.state = {
71 | connected: false,
72 | authenticated: false,
73 | };
74 |
75 | const { url, auth_url, dispatch } = this.props;
76 | const ws = new ReconnectingWebSocket(url);
77 | const tag = "websocket";
78 |
79 | ws.onopen = () => {
80 | this.setState({ connected: true });
81 | clearWebsocketNotification(dispatch, tag);
82 | };
83 |
84 | ws.onerror = () => {
85 | showWebsocketNotification(
86 | dispatch,
87 | "No WebSocket connection: limited functionality may be available",
88 | tag,
89 | );
90 | };
91 |
92 | ws.onmessage = (event) => {
93 | const { data } = event;
94 |
95 | // Ignore heartbeat signals
96 | if (data === "<3") {
97 | return;
98 | }
99 |
100 | const message = JSON.parse(data);
101 | const { actionType, payload } = message;
102 |
103 | switch (actionType) {
104 | case "AUTH REQUEST":
105 | getAuthToken(auth_url).then((token) => ws.send(token));
106 | break;
107 | case "AUTH FAILED":
108 | this.setState({ authenticated: false });
109 | eraseCookie("auth_token");
110 | showWebsocketNotification(
111 | dispatch,
112 | "WebSocket connection authentication failed: limited functionality may be available",
113 | tag,
114 | );
115 | break;
116 | case "AUTH OK":
117 | this.setState({ authenticated: true });
118 | dispatch(hideNotificationByTag(tag));
119 | break;
120 | default:
121 | messageHandler.handle(actionType, payload);
122 | }
123 | };
124 |
125 | ws.onclose = () => {
126 | this.setState({
127 | connected: false,
128 | authenticated: false,
129 | });
130 | showWebsocketNotification(
131 | dispatch,
132 | "No WebSocket connection: limited functionality may be available",
133 | tag,
134 | );
135 | };
136 | }
137 |
138 | render() {
139 | const { connected, authenticated } = this.state;
140 | let statusColor;
141 | if (!connected) {
142 | statusColor = "red";
143 | } else {
144 | statusColor = authenticated ? "lightgreen" : "orange";
145 | }
146 |
147 | const statusSize = 12;
148 |
149 | const statusStyle = {
150 | display: "inline-block",
151 | padding: 0,
152 | lineHeight: statusSize,
153 | textAlign: "center",
154 | whiteSpace: "nowrap",
155 | verticalAlign: "baseline",
156 | backgroundColor: statusColor,
157 | borderRadius: "50%",
158 | border: "2px solid gray",
159 | position: "relative",
160 | height: statusSize,
161 | width: statusSize,
162 | };
163 |
164 | const connected_desc = `WebSocket is
165 | ${connected ? "connected" : "disconnected"} &
166 | ${authenticated ? "authenticated" : "unauthenticated"}.`;
167 | return (
168 |
169 | );
170 | }
171 | }
172 |
173 | WebSocket.propTypes = {
174 | url: PropTypes.string.isRequired,
175 | auth_url: PropTypes.string.isRequired,
176 | messageHandler: PropTypes.shape({
177 | handle: PropTypes.func.isRequired,
178 | }).isRequired,
179 | dispatch: PropTypes.func.isRequired,
180 | };
181 |
182 | export default WebSocket;
183 |
--------------------------------------------------------------------------------
/static/js/cookies.js:
--------------------------------------------------------------------------------
1 | // From http://www.quirksmode.org/js/cookies.html
2 |
3 | export function createCookie(name, value, minutes) {
4 | let expires = "";
5 | if (minutes) {
6 | const date = new Date();
7 | date.setTime(date.getTime() + minutes * 60 * 1000);
8 | expires = `; expires=${date.toGMTString()}`;
9 | }
10 | document.cookie = `${name}=${value}${expires};path=/;SameSite=Strict`;
11 | }
12 |
13 | export function readCookie(name) {
14 | const nameEQ = `${name}=`;
15 | const ca = document.cookie.split(";");
16 | for (let i = 0; i < ca.length; i++) {
17 | let c = ca[i];
18 | while (c.charAt(0) === " ") c = c.substring(1, c.length);
19 | if (c.indexOf(nameEQ) === 0) return c.substring(nameEQ.length, c.length);
20 | }
21 | return null;
22 | }
23 |
24 | export function eraseCookie(name) {
25 | createCookie(name, "", -1);
26 | }
27 |
--------------------------------------------------------------------------------
/static/js/reconnecting-websocket.js:
--------------------------------------------------------------------------------
1 | // MIT License:
2 | //
3 | // Copyright (c) 2010-2012, Joe Walnes
4 | //
5 | // Permission is hereby granted, free of charge, to any person obtaining a copy
6 | // of this software and associated documentation files (the "Software"), to deal
7 | // in the Software without restriction, including without limitation the rights
8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | // copies of the Software, and to permit persons to whom the Software is
10 | // furnished to do so, subject to the following conditions:
11 | //
12 | // The above copyright notice and this permission notice shall be included in
13 | // all copies or substantial portions of the Software.
14 | //
15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | // THE SOFTWARE.
22 |
23 | /**
24 | * This behaves like a WebSocket in every way, except if it fails to connect,
25 | * or it gets disconnected, it will repeatedly poll until it successfully connects
26 | * again.
27 | *
28 | * It is API compatible, so when you have:
29 | * ws = new WebSocket('ws://....');
30 | * you can replace with:
31 | * ws = new ReconnectingWebSocket('ws://....');
32 | *
33 | * The event stream will typically look like:
34 | * onconnecting
35 | * onopen
36 | * onmessage
37 | * onmessage
38 | * onclose // lost connection
39 | * onconnecting
40 | * onopen // sometime later...
41 | * onmessage
42 | * onmessage
43 | * etc...
44 | *
45 | * It is API compatible with the standard WebSocket API, apart from the following members:
46 | *
47 | * - `bufferedAmount`
48 | * - `extensions`
49 | * - `binaryType`
50 | *
51 | * Latest version: https://github.com/joewalnes/reconnecting-websocket/
52 | * - Joe Walnes
53 | *
54 | * Syntax
55 | * ======
56 | * var socket = new ReconnectingWebSocket(url, protocols, options);
57 | *
58 | * Parameters
59 | * ==========
60 | * url - The url you are connecting to.
61 | * protocols - Optional string or array of protocols.
62 | * options - See below
63 | *
64 | * Options
65 | * =======
66 | * Options can either be passed upon instantiation or set after instantiation:
67 | *
68 | * var socket = new ReconnectingWebSocket(url, null, { debug: true, reconnectInterval: 4000 });
69 | *
70 | * or
71 | *
72 | * var socket = new ReconnectingWebSocket(url);
73 | * socket.debug = true;
74 | * socket.reconnectInterval = 4000;
75 | *
76 | * debug
77 | * - Whether this instance should log debug messages. Accepts true or false. Default: false.
78 | *
79 | * automaticOpen
80 | * - Whether or not the websocket should attempt to connect immediately upon instantiation. The socket can be manually opened or closed at any time using ws.open() and ws.close().
81 | *
82 | * reconnectInterval
83 | * - The number of milliseconds to delay before attempting to reconnect. Accepts integer. Default: 1000.
84 | *
85 | * maxReconnectInterval
86 | * - The maximum number of milliseconds to delay a reconnection attempt. Accepts integer. Default: 30000.
87 | *
88 | * reconnectDecay
89 | * - The rate of increase of the reconnect delay. Allows reconnect attempts to back off when problems persist. Accepts integer or float. Default: 1.5.
90 | *
91 | * timeoutInterval
92 | * - The maximum time in milliseconds to wait for a connection to succeed before closing and retrying. Accepts integer. Default: 2000.
93 | *
94 | */
95 | (function (global, factory) {
96 | if (typeof define === "function" && define.amd) {
97 | define([], factory);
98 | } else if (typeof module !== "undefined" && module.exports) {
99 | module.exports = factory();
100 | } else {
101 | global.ReconnectingWebSocket = factory();
102 | }
103 | })(this, function () {
104 | if (!("WebSocket" in window)) {
105 | return;
106 | }
107 |
108 | function ReconnectingWebSocket(url, protocols, options) {
109 | // Default settings
110 | var settings = {
111 | /** Whether this instance should log debug messages. */
112 | debug: false,
113 |
114 | /** Whether or not the websocket should attempt to connect immediately upon instantiation. */
115 | automaticOpen: true,
116 |
117 | /** The number of milliseconds to delay before attempting to reconnect. */
118 | reconnectInterval: 1000,
119 | /** The maximum number of milliseconds to delay a reconnection attempt. */
120 | maxReconnectInterval: 30000,
121 | /** The rate of increase of the reconnect delay. Allows reconnect attempts to back off when problems persist. */
122 | reconnectDecay: 1.5,
123 |
124 | /** The maximum time in milliseconds to wait for a connection to succeed before closing and retrying. */
125 | timeoutInterval: 2000,
126 |
127 | /** The maximum number of reconnection attempts to make. Unlimited if null. */
128 | maxReconnectAttempts: null,
129 |
130 | /** The binary type, possible values 'blob' or 'arraybuffer', default 'blob'. */
131 | binaryType: "blob",
132 | };
133 | if (!options) {
134 | options = {};
135 | }
136 |
137 | // Overwrite and define settings with options if they exist.
138 | for (var key in settings) {
139 | if (typeof options[key] !== "undefined") {
140 | this[key] = options[key];
141 | } else {
142 | this[key] = settings[key];
143 | }
144 | }
145 |
146 | // These should be treated as read-only properties
147 |
148 | /** The URL as resolved by the constructor. This is always an absolute URL. Read only. */
149 | this.url = url;
150 |
151 | /** The number of attempted reconnects since starting, or the last successful connection. Read only. */
152 | this.reconnectAttempts = 0;
153 |
154 | /**
155 | * The current state of the connection.
156 | * Can be one of: WebSocket.CONNECTING, WebSocket.OPEN, WebSocket.CLOSING, WebSocket.CLOSED
157 | * Read only.
158 | */
159 | this.readyState = WebSocket.CONNECTING;
160 |
161 | /**
162 | * A string indicating the name of the sub-protocol the server selected; this will be one of
163 | * the strings specified in the protocols parameter when creating the WebSocket object.
164 | * Read only.
165 | */
166 | this.protocol = null;
167 |
168 | // Private state variables
169 |
170 | var self = this;
171 | var ws;
172 | var forcedClose = false;
173 | var timedOut = false;
174 | var eventTarget = document.createElement("div");
175 |
176 | // Wire up "on*" properties as event handlers
177 |
178 | eventTarget.addEventListener("open", function (event) {
179 | self.onopen(event);
180 | });
181 | eventTarget.addEventListener("close", function (event) {
182 | self.onclose(event);
183 | });
184 | eventTarget.addEventListener("connecting", function (event) {
185 | self.onconnecting(event);
186 | });
187 | eventTarget.addEventListener("message", function (event) {
188 | self.onmessage(event);
189 | });
190 | eventTarget.addEventListener("error", function (event) {
191 | self.onerror(event);
192 | });
193 |
194 | // Expose the API required by EventTarget
195 |
196 | this.addEventListener = eventTarget.addEventListener.bind(eventTarget);
197 | this.removeEventListener =
198 | eventTarget.removeEventListener.bind(eventTarget);
199 | this.dispatchEvent = eventTarget.dispatchEvent.bind(eventTarget);
200 |
201 | /**
202 | * This function generates an event that is compatible with standard
203 | * compliant browsers and IE9 - IE11
204 | *
205 | * This will prevent the error:
206 | * Object doesn't support this action
207 | *
208 | * http://stackoverflow.com/questions/19345392/why-arent-my-parameters-getting-passed-through-to-a-dispatched-event/19345563#19345563
209 | * @param s String The name that the event should use
210 | * @param args Object an optional object that the event will use
211 | */
212 | function generateEvent(s, args) {
213 | var evt = document.createEvent("CustomEvent");
214 | evt.initCustomEvent(s, false, false, args);
215 | return evt;
216 | }
217 |
218 | this.open = function (reconnectAttempt) {
219 | ws = new WebSocket(self.url, protocols || []);
220 | ws.binaryType = this.binaryType;
221 |
222 | if (reconnectAttempt) {
223 | if (
224 | this.maxReconnectAttempts &&
225 | this.reconnectAttempts > this.maxReconnectAttempts
226 | ) {
227 | return;
228 | }
229 | } else {
230 | eventTarget.dispatchEvent(generateEvent("connecting"));
231 | this.reconnectAttempts = 0;
232 | }
233 |
234 | if (self.debug || ReconnectingWebSocket.debugAll) {
235 | console.debug("ReconnectingWebSocket", "attempt-connect", self.url);
236 | }
237 |
238 | var localWs = ws;
239 | var timeout = setTimeout(function () {
240 | if (self.debug || ReconnectingWebSocket.debugAll) {
241 | console.debug(
242 | "ReconnectingWebSocket",
243 | "connection-timeout",
244 | self.url,
245 | );
246 | }
247 | timedOut = true;
248 | localWs.close();
249 | timedOut = false;
250 | }, self.timeoutInterval);
251 |
252 | ws.onopen = function (event) {
253 | clearTimeout(timeout);
254 | if (self.debug || ReconnectingWebSocket.debugAll) {
255 | console.debug("ReconnectingWebSocket", "onopen", self.url);
256 | }
257 | self.protocol = ws.protocol;
258 | self.readyState = WebSocket.OPEN;
259 | self.reconnectAttempts = 0;
260 | var e = generateEvent("open");
261 | e.isReconnect = reconnectAttempt;
262 | reconnectAttempt = false;
263 | eventTarget.dispatchEvent(e);
264 | };
265 |
266 | ws.onclose = function (event) {
267 | clearTimeout(timeout);
268 | ws = null;
269 | if (forcedClose) {
270 | self.readyState = WebSocket.CLOSED;
271 | eventTarget.dispatchEvent(generateEvent("close"));
272 | } else {
273 | self.readyState = WebSocket.CONNECTING;
274 | var e = generateEvent("connecting");
275 | e.code = event.code;
276 | e.reason = event.reason;
277 | e.wasClean = event.wasClean;
278 | eventTarget.dispatchEvent(e);
279 | if (!reconnectAttempt && !timedOut) {
280 | if (self.debug || ReconnectingWebSocket.debugAll) {
281 | console.debug("ReconnectingWebSocket", "onclose", self.url);
282 | }
283 | eventTarget.dispatchEvent(generateEvent("close"));
284 | }
285 |
286 | var timeout =
287 | self.reconnectInterval *
288 | Math.pow(self.reconnectDecay, self.reconnectAttempts);
289 | setTimeout(
290 | function () {
291 | self.reconnectAttempts++;
292 | self.open(true);
293 | },
294 | timeout > self.maxReconnectInterval
295 | ? self.maxReconnectInterval
296 | : timeout,
297 | );
298 | }
299 | };
300 | ws.onmessage = function (event) {
301 | if (self.debug || ReconnectingWebSocket.debugAll) {
302 | console.debug(
303 | "ReconnectingWebSocket",
304 | "onmessage",
305 | self.url,
306 | event.data,
307 | );
308 | }
309 | var e = generateEvent("message");
310 | e.data = event.data;
311 | eventTarget.dispatchEvent(e);
312 | };
313 | ws.onerror = function (event) {
314 | if (self.debug || ReconnectingWebSocket.debugAll) {
315 | console.debug("ReconnectingWebSocket", "onerror", self.url, event);
316 | }
317 | eventTarget.dispatchEvent(generateEvent("error"));
318 | };
319 | };
320 |
321 | // Whether or not to create a websocket upon instantiation
322 | if (this.automaticOpen == true) {
323 | this.open(false);
324 | }
325 |
326 | /**
327 | * Transmits data to the server over the WebSocket connection.
328 | *
329 | * @param data a text string, ArrayBuffer or Blob to send to the server.
330 | */
331 | this.send = function (data) {
332 | if (ws) {
333 | if (self.debug || ReconnectingWebSocket.debugAll) {
334 | console.debug("ReconnectingWebSocket", "send", self.url, data);
335 | }
336 | return ws.send(data);
337 | } else {
338 | throw "INVALID_STATE_ERR : Pausing to reconnect websocket";
339 | }
340 | };
341 |
342 | /**
343 | * Closes the WebSocket connection or connection attempt, if any.
344 | * If the connection is already CLOSED, this method does nothing.
345 | */
346 | this.close = function (code, reason) {
347 | // Default CLOSE_NORMAL code
348 | if (typeof code == "undefined") {
349 | code = 1000;
350 | }
351 | forcedClose = true;
352 | if (ws) {
353 | ws.close(code, reason);
354 | }
355 | };
356 |
357 | /**
358 | * Additional public API method to refresh the connection if still open (close, re-open).
359 | * For example, if the app suspects bad data / missed heart beats, it can try to refresh.
360 | */
361 | this.refresh = function () {
362 | if (ws) {
363 | ws.close();
364 | }
365 | };
366 | }
367 |
368 | /**
369 | * An event listener to be called when the WebSocket connection's readyState changes to OPEN;
370 | * this indicates that the connection is ready to send and receive data.
371 | */
372 | ReconnectingWebSocket.prototype.onopen = function (event) {};
373 | /** An event listener to be called when the WebSocket connection's readyState changes to CLOSED. */
374 | ReconnectingWebSocket.prototype.onclose = function (event) {};
375 | /** An event listener to be called when a connection begins being attempted. */
376 | ReconnectingWebSocket.prototype.onconnecting = function (event) {};
377 | /** An event listener to be called when a message is received from the server. */
378 | ReconnectingWebSocket.prototype.onmessage = function (event) {};
379 | /** An event listener to be called when an error occurs. */
380 | ReconnectingWebSocket.prototype.onerror = function (event) {};
381 |
382 | /**
383 | * Whether all instances of ReconnectingWebSocket should log debug messages.
384 | * Setting this to true is the equivalent of setting all instances of ReconnectingWebSocket.debug to true.
385 | */
386 | ReconnectingWebSocket.debugAll = false;
387 |
388 | ReconnectingWebSocket.CONNECTING = WebSocket.CONNECTING;
389 | ReconnectingWebSocket.OPEN = WebSocket.OPEN;
390 | ReconnectingWebSocket.CLOSING = WebSocket.CLOSING;
391 | ReconnectingWebSocket.CLOSED = WebSocket.CLOSED;
392 |
393 | return ReconnectingWebSocket;
394 | });
395 |
--------------------------------------------------------------------------------
/tools/check_app_environment.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import subprocess
4 | import sys
5 | import textwrap
6 |
7 | from packaging.version import Version
8 | from status import status
9 |
10 |
11 | def output(cmd):
12 | p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
13 | out, err = p.communicate()
14 | success = p.returncode == 0
15 | return success, out
16 |
17 |
18 | deps = {
19 | "nginx": (
20 | # Command to get version
21 | ["nginx", "-v"],
22 | # Extract *only* the version number
23 | lambda v: v.split()[2].split("/")[1],
24 | # It must be >= 1.7
25 | "1.7",
26 | ),
27 | "psql": (
28 | ["psql", "--version"],
29 | lambda v: v.split("\n")[-1].split()[2],
30 | "12.0",
31 | ),
32 | "npm": (["npm", "-v"], lambda v: v, "8.3.2"),
33 | "node": (["node", "-v"], lambda v: v[1:], "16.14.0"),
34 | "python": (["python", "--version"], lambda v: v.split()[1], "3.8"),
35 | }
36 |
37 | print("Checking system dependencies:")
38 |
39 | fail = []
40 |
41 | for dep, (cmd, get_version, min_version) in deps.items():
42 | try:
43 | query = f"{dep} >= {min_version}"
44 | with status(query):
45 | success, out = output(cmd)
46 | try:
47 | version = get_version(out.decode("utf-8").strip())
48 | print(f"[{version.rjust(8)}]".rjust(40 - len(query)), end="")
49 | except: # noqa: E722
50 | raise ValueError("Could not parse version")
51 |
52 | if not (Version(version) >= Version(min_version)):
53 | raise RuntimeError(f"Required {min_version}, found {version}")
54 | except ValueError:
55 | print(
56 | f"\n[!] Sorry, but our script could not parse the output of "
57 | f'`{" ".join(cmd)}`; please file a bug, or see '
58 | f"`check_app_environment.py`\n"
59 | )
60 | raise
61 | except Exception as e:
62 | fail.append((dep, e))
63 |
64 | if fail:
65 | print()
66 | print("[!] Some system dependencies seem to be unsatisfied")
67 | print()
68 | print(" The failed checks were:")
69 | print()
70 | for pkg, exc in fail:
71 | cmd, get_version, min_version = deps[pkg]
72 | print(f' - {pkg}: `{" ".join(cmd)}`')
73 | print(" ", exc)
74 | print()
75 | print(
76 | " Please refer to https://cesium-ml.org/baselayer "
77 | "for installation instructions."
78 | )
79 | print()
80 | sys.exit(-1)
81 |
82 | print()
83 | try:
84 | with status("Baselayer installed inside of app"):
85 | if not (
86 | os.path.exists("config.yaml") or os.path.exists("config.yaml.defaults")
87 | ):
88 | raise RuntimeError()
89 | except RuntimeError:
90 | print(
91 | textwrap.dedent(
92 | """
93 | It does not look as though baselayer is deployed as
94 | part of an application.
95 |
96 | Please see
97 |
98 | https://github.com/cesium-ml/baselayer_template_app
99 |
100 | for an example application.
101 | """
102 | )
103 | )
104 | sys.exit(-1)
105 |
106 | print("-" * 20)
107 |
--------------------------------------------------------------------------------
/tools/check_js_deps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CHECKER="node_modules/.bin/check-dependencies"
6 |
7 | if [[ ! -x ${CHECKER} ]]; then
8 | npm install check-dependencies --legacy-peer-deps
9 | fi
10 |
11 | # We suppress output for the next command because, annoyingly, it reports
12 | # that a dependency is unsatisfied even if the --install flag is specified,
13 | # and that package has been successfully installed
14 | ${CHECKER} --install
15 |
16 | # Print report, if any unsatisfied dependencies remain
17 | if ${CHECKER}; then
18 | echo "✓ All Javascript dependencies satisfied."
19 | fi
20 |
--------------------------------------------------------------------------------
/tools/check_js_updates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CHECKER="npx npm-check"
6 |
7 | if ( ! $CHECKER --version > /dev/null 2>&1 ); then
8 | echo "Update checker not found; installing."
9 | npm install npm-check
10 | fi
11 |
12 | ${CHECKER} --skip-unused -u
13 |
--------------------------------------------------------------------------------
/tools/db_init.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import argparse
3 | import subprocess
4 | import sys
5 | import textwrap
6 |
7 | from status import status
8 |
9 | from baselayer.app.env import load_env
10 | from baselayer.log import make_log
11 |
12 | log = make_log("db_init")
13 |
14 | parser = argparse.ArgumentParser(description="Create or re-create the database.")
15 | parser.add_argument(
16 | "-f",
17 | "--force",
18 | action="store_true",
19 | help="recreate the db, even if it already exists",
20 | )
21 | args, unknown = parser.parse_known_args()
22 |
23 | env, cfg = load_env()
24 |
25 | db = cfg["database.database"]
26 | db_test = db + "_test"
27 | all_dbs = (db, db_test)
28 |
29 | user = cfg["database.user"] or db
30 | host = cfg["database.host"]
31 | port = cfg["database.port"]
32 | password = cfg["database.password"]
33 |
34 | psql_cmd = "psql"
35 | flags = f"-U {user}"
36 |
37 | if password:
38 | psql_cmd = f'PGPASSWORD="{password}" {psql_cmd}'
39 | flags += " --no-password"
40 |
41 | if host:
42 | flags += f" -h {host}"
43 |
44 | if port:
45 | flags += f" -p {port}"
46 |
47 | admin_flags = flags.replace(f"-U {user}", "-U postgres")
48 |
49 | test_cmd = f"{psql_cmd} {flags} -c 'SELECT 0;' "
50 |
51 |
52 | def run(cmd):
53 | return subprocess.run(cmd, capture_output=True, shell=True)
54 |
55 |
56 | def test_db(database):
57 | p = run(test_cmd + database)
58 | return p.returncode == 0
59 |
60 |
61 | log("Initializing databases")
62 |
63 | with status(f"Creating user [{user}]"):
64 | run(f'{psql_cmd} {admin_flags} -c "CREATE USER {user};"')
65 |
66 | if args.force:
67 | try:
68 | for current_db in all_dbs:
69 | with status(f"Removing database [{current_db}]"):
70 | p = run(
71 | f'{psql_cmd} {admin_flags}\
72 | -c "DROP DATABASE {current_db};"'
73 | )
74 | if p.returncode != 0:
75 | raise RuntimeError()
76 | except RuntimeError:
77 | print(
78 | "Could not delete database: \n\n"
79 | f'{textwrap.indent(p.stderr.decode("utf-8").strip(), prefix=" ")}\n'
80 | )
81 | sys.exit(1)
82 |
83 | for current_db in all_dbs:
84 | with status(f"Creating database [{current_db}]"):
85 | # We allow this to fail, because oftentimes because of complicated db setups
86 | # users want to create their own databases
87 |
88 | # If database already exists and we can connect to it, there's nothing to do
89 | if test_db(current_db):
90 | continue
91 |
92 | p = run(
93 | f'{psql_cmd} {admin_flags}\
94 | -c "CREATE DATABASE {current_db} OWNER {user};"'
95 | )
96 | if p.returncode == 0:
97 | run(
98 | f'{psql_cmd} {flags}\
99 | -c "GRANT ALL PRIVILEGES ON DATABASE {current_db} TO {user};"\
100 | {current_db}'
101 | )
102 | else:
103 | print()
104 | print(f"Warning: could not create db {current_db}")
105 | print()
106 | print(
107 | "\n".join(
108 | line
109 | for line in p.stderr.decode("utf-8").split("\n")
110 | if "ERROR" in line
111 | )
112 | )
113 | print()
114 | print(" You should create it manually by invoking `createdb`.")
115 | print(" Then, execute:")
116 | print()
117 | print(
118 | f" {psql_cmd} {flags}"
119 | f' -c "GRANT ALL PRIVILEGES ON DATABASE {current_db} TO {user};"'
120 | f" {current_db}"
121 | )
122 | print()
123 |
124 | # We only test the connection to the main database, since
125 | # the test database may not exist in production
126 | try:
127 | with status(f"Testing database connection to [{db}]"):
128 | if not test_db(db):
129 | raise RuntimeError()
130 |
131 | except RuntimeError:
132 | print(
133 | textwrap.dedent(
134 | f"""
135 | !!! Error accessing database:
136 |
137 | The most common cause of database connection errors is a
138 | misconfigured `pg_hba.conf`.
139 |
140 | We tried to connect to the database with the following parameters:
141 |
142 | database: {db}
143 | username: {user}
144 | host: {host}
145 | port: {port}
146 |
147 | The postgres client exited with the following error message:
148 |
149 | {'-' * 78}
150 | {p.stderr.decode('utf-8').strip()}
151 | {'-' * 78}
152 |
153 | Please modify your `pg_hba.conf`, and use the following command to
154 | check your connection:
155 |
156 | {test_cmd + db}
157 | """
158 | )
159 | )
160 | sys.exit(1)
161 |
162 | print()
163 |
--------------------------------------------------------------------------------
/tools/env_summary.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from baselayer.app.env import load_env
4 | from baselayer.log import colorize
5 |
6 |
7 | def config_print(field, value):
8 | print(colorize(field + ":", bold=True), value)
9 |
10 |
11 | env, cfg = load_env()
12 |
13 | print("=" * 50)
14 | config_print("Server at", f"http://localhost:{cfg['ports.app']}")
15 | config_print(
16 | "Database at",
17 | f"{cfg['database.host']}:{cfg['database.port']} ({cfg['database.database']})",
18 | )
19 | config_print("Fake OAuth", "enabled" if cfg["server.auth.debug_login"] else "disabled")
20 | config_print("Debug mode", "enabled" if env.debug else "disabled")
21 | print("=" * 50)
22 |
--------------------------------------------------------------------------------
/tools/fill_conf_values.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import subprocess
5 |
6 | import jinja2
7 | from status import status
8 |
9 | from baselayer.app.env import load_env
10 | from baselayer.log import make_log
11 |
12 | log = make_log("baselayer")
13 |
14 |
15 | def md5sum(fn):
16 | import hashlib
17 |
18 | with open(fn, "rb") as f:
19 | file_hash = hashlib.md5()
20 | while chunk := f.read(8192):
21 | file_hash.update(chunk)
22 | return file_hash.hexdigest()
23 |
24 |
25 | def version(module):
26 | import importlib
27 |
28 | m = importlib.import_module(module)
29 | return getattr(m, "__version__", "")
30 |
31 |
32 | def hash_filter(string, htype):
33 | import hashlib
34 |
35 | h = hashlib.new(htype)
36 | h.update(string.encode("utf-8"))
37 | return h.hexdigest()
38 |
39 |
40 | def nginx_brotli_installed():
41 | """Check if the nginx brotli module is installed
42 |
43 | Returns
44 | -------
45 | installed : bool
46 | True if the nginx brotli module is installed, False otherwise
47 | dynamic : bool
48 | True if the module is dynamically loaded, False otherwise
49 | modules_path : str
50 | The directory where the nginx modules are located if dynamic loading is used
51 | """
52 |
53 | installed = False
54 | dynamic = False
55 | modules_path = None
56 |
57 | try:
58 | output = subprocess.check_output(
59 | ["nginx", "-V"], stderr=subprocess.STDOUT
60 | ).decode("utf-8")
61 | # Option 1: installed at compilation: always loaded
62 | if (
63 | "--add-module" in output
64 | and "brotli" in output.split("--add-module")[1].strip()
65 | ):
66 | installed = True
67 | # Option 2: installed dynamically at compilation or later: has to be loaded
68 | else:
69 | # a. find the modules path
70 | config_path = (
71 | str(output.split("--conf-path=")[1].split(" ")[0]).strip()
72 | if "--conf-path" in output
73 | else None
74 | )
75 | modules_path = (
76 | str(output.split("--modules-path=")[1].split(" ")[0]).strip()
77 | if "--modules-path" in output
78 | else None
79 | )
80 | # if there's no modules path, try to guess it from the config path
81 | if config_path and not modules_path:
82 | modules_path = os.path.dirname(config_path).replace(
83 | "nginx.conf", "modules"
84 | )
85 | if not modules_path or not os.path.isdir(modules_path):
86 | modules_path = None
87 |
88 | # b. check if there is a brotli module in the modules path
89 | if modules_path:
90 | modules_path = modules_path.rstrip("/")
91 | if all(
92 | os.path.isfile(os.path.join(modules_path, f))
93 | for f in [
94 | "ngx_http_brotli_filter_module.so",
95 | "ngx_http_brotli_static_module.so",
96 | ]
97 | ):
98 | installed = True
99 | dynamic = True
100 | else:
101 | installed = False
102 | dynamic = False
103 | modules_path = None
104 | except subprocess.CalledProcessError:
105 | pass
106 | return installed, dynamic, modules_path
107 |
108 |
109 | custom_filters = {"md5sum": md5sum, "version": version, "hash": hash_filter}
110 |
111 |
112 | def fill_config_file_values(template_paths):
113 | log("Compiling configuration templates")
114 | env, cfg = load_env()
115 | installed, dynamic, modules_path = nginx_brotli_installed()
116 | cfg["fill_config_feature"] = {
117 | "nginx_brotli": {
118 | "installed": installed,
119 | "dynamic": dynamic,
120 | "modules_path": modules_path,
121 | }
122 | }
123 |
124 | for template_path in template_paths:
125 | with status(template_path):
126 | tpath, tfile = os.path.split(template_path)
127 | jenv = jinja2.Environment(
128 | loader=jinja2.FileSystemLoader(tpath),
129 | )
130 | jenv.filters.update(custom_filters)
131 |
132 | template = jenv.get_template(tfile)
133 | cfg["env"] = env
134 | rendered = template.render(cfg)
135 |
136 | with open(os.path.splitext(template_path)[0], "w") as f:
137 | f.write(rendered)
138 | f.write("\n")
139 |
140 |
141 | if __name__ == "__main__":
142 | from argparse import ArgumentParser
143 |
144 | parser = ArgumentParser(description="Fill config file templates")
145 | parser.add_argument("template_paths", nargs="+")
146 | args, _ = parser.parse_known_args()
147 | fill_config_file_values(args.template_paths)
148 |
--------------------------------------------------------------------------------
/tools/junitxml_report.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 |
4 | import argparse
5 | import sys
6 | from collections import defaultdict
7 | from xml.etree import ElementTree as ET
8 |
9 | from baselayer.log import colorize
10 |
11 |
12 | def etree_to_dict(t):
13 | d = {t.tag: {} if t.attrib else None}
14 | children = list(t)
15 | if children:
16 | dd = defaultdict(list)
17 | for dc in map(etree_to_dict, children):
18 | for k, v in dc.items():
19 | dd[k].append(v)
20 | d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
21 | if t.attrib:
22 | d[t.tag].update(("@" + k, v) for k, v in t.attrib.items())
23 | if t.text:
24 | text = t.text.strip()
25 | if children or t.attrib:
26 | if text:
27 | d[t.tag]["#text"] = text
28 | else:
29 | d[t.tag] = text
30 | return d
31 |
32 |
33 | parser = argparse.ArgumentParser(description="Generate a failure report from JUnitXML")
34 | parser.add_argument("filename", help="JUnit XML file to parse (produced by pytest)")
35 | args = parser.parse_args()
36 |
37 |
38 | try:
39 | data = open(args.filename).read()
40 | except FileNotFoundError:
41 | print(f"Could not open JUnitXML file [{args.filename}]")
42 | sys.exit(-1)
43 |
44 | xml = ET.XML(data)
45 | json = etree_to_dict(xml)
46 |
47 | tests = json["testsuites"]["testsuite"]["testcase"]
48 |
49 | for test in tests:
50 | if "failure" in test:
51 | message = test["failure"]["@message"]
52 | text = test["failure"]["#text"]
53 |
54 | filename = test["@classname"].replace(".", "/") + ".py"
55 | test_name = test["@name"]
56 |
57 | first_error = []
58 | for line in text.split("\n"):
59 | if line.startswith("_ _ _ _"):
60 | break
61 | first_error.append(line)
62 |
63 | error_line = next(
64 | n for (n, line) in enumerate(first_error) if line.startswith(">")
65 | )
66 | N = 3
67 | cmin = max(0, error_line - N)
68 | cmax = error_line + N
69 | first_error_context = first_error[cmin:cmax]
70 | lineno = first_error[-1].split(":")[-2]
71 |
72 | print("-" * 80)
73 | print(colorize("FAIL: ", fg="yellow", bold=True), end="")
74 | print(colorize(f"{filename}:{lineno}", fg="red"), end="")
75 | print(" in ", end="")
76 | print(colorize(test_name, fg="red", bold=True))
77 | print()
78 | print("\n".join(first_error_context))
79 |
80 | print()
81 | print(
82 | colorize("EDIT:", fg="green"),
83 | )
84 | print(f" $EDITOR +{lineno} {filename}")
85 | print("-" * 80)
86 |
--------------------------------------------------------------------------------
/tools/makefile_to_help.py:
--------------------------------------------------------------------------------
1 | """
2 | Convert any ## style comments after a Makefile target into help text.
3 |
4 | Usage: makefile_to_help.py ...
5 |
6 | The Makefile can also be preceded by a category, e.g.
7 |
8 | makefile_to_help.py Main:Makefile External:submodule/Makefile
9 |
10 | in which case the category names are printed as a heading before the targets.
11 |
12 | """
13 |
14 | import re
15 | import sys
16 |
17 | if not sys.argv:
18 | print("Usage: makefile_to_help.py ...")
19 | sys.exit(0)
20 |
21 |
22 | def describe_targets(lines):
23 | matches = [re.match(r"^([\w-]+): +##(.*)", line) for line in lines]
24 | groups = [m.groups(0) for m in matches if m]
25 | targets = {target: desc for (target, desc) in groups}
26 |
27 | N = max(len(target) for (target, desc) in targets.items())
28 |
29 | for target, desc in targets.items():
30 | print(f"{target:{N}} {desc}")
31 |
32 |
33 | for source in sys.argv[1:]:
34 | if ":" in source:
35 | category, fname = source.split(":")
36 | print(f'\n{category}\n{"-" * len(category)}')
37 | else:
38 | fname = source
39 |
40 | describe_targets(open(fname).readlines())
41 |
--------------------------------------------------------------------------------
/tools/pip_install_requirements.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 |
4 | import pkg_resources
5 | from pkg_resources import DistributionNotFound, Requirement, VersionConflict
6 | from status import status
7 |
8 | if len(sys.argv) < 2:
9 | print(
10 | "Usage: pip_install_requirements.py requirements.txt [requirements_other.txt]"
11 | )
12 | sys.exit(0)
13 |
14 | requirements = []
15 | all_req_files = sys.argv[1:]
16 | for req_file in all_req_files:
17 | with open(req_file) as f:
18 | requirements.extend(f.readlines())
19 |
20 |
21 | def pip(req_files):
22 | args = ["pip", "install"]
23 | for req_file in req_files:
24 | args.extend(["-r", req_file])
25 | p = subprocess.Popen(
26 | args,
27 | stdout=subprocess.PIPE,
28 | stderr=subprocess.STDOUT,
29 | )
30 | for line in iter(p.stdout.readline, b""):
31 | line = line.decode("utf-8")
32 | if line.startswith("Requirement already satisfied"):
33 | continue
34 | print(line, end="")
35 |
36 | retcode = p.wait()
37 | if retcode != 0:
38 | sys.exit(retcode)
39 |
40 |
41 | try:
42 | with status("Verifying Python package dependencies"):
43 | pkg_resources.working_set.resolve(
44 | [Requirement.parse(r.split("#egg=")[-1]) for r in requirements]
45 | )
46 |
47 | except (DistributionNotFound, VersionConflict) as e:
48 | print(e.report())
49 | pip(all_req_files)
50 |
--------------------------------------------------------------------------------
/tools/setup_services.py:
--------------------------------------------------------------------------------
1 | import os
2 | from collections import Counter
3 | from os.path import join as pjoin
4 |
5 | from baselayer.app.env import load_env
6 | from baselayer.log import make_log
7 |
8 | log = make_log("baselayer")
9 |
10 |
11 | def copy_supervisor_configs():
12 | env, cfg = load_env()
13 |
14 | services = {}
15 | for path in cfg["services.paths"]:
16 | if os.path.exists(path):
17 | path_services = [
18 | d for d in os.listdir(path) if os.path.isdir(pjoin(path, d))
19 | ]
20 | services.update({s: pjoin(path, s) for s in path_services})
21 |
22 | duplicates = [k for k, v in Counter(services.keys()).items() if v > 1]
23 | if duplicates:
24 | raise RuntimeError(f"Duplicate service definitions found for {duplicates}")
25 |
26 | log(f"Discovered {len(services)} services")
27 |
28 | disabled = cfg["services.disabled"] or []
29 | enabled = cfg["services.enabled"] or []
30 |
31 | both = set().union(disabled).intersection(enabled)
32 | if both:
33 | raise RuntimeError(
34 | f"Invalid service specification: {both} in both enabled and disabled"
35 | )
36 |
37 | if disabled == "*":
38 | disabled = services.keys()
39 | if enabled == "*":
40 | enabled = []
41 |
42 | services_to_run = set(services.keys()).difference(disabled).union(enabled)
43 | log(f"Enabling {len(services_to_run)} services")
44 |
45 | supervisor_configs = []
46 | for service in services_to_run:
47 | path = services[service]
48 | supervisor_conf = pjoin(path, "supervisor.conf")
49 |
50 | if os.path.exists(supervisor_conf):
51 | with open(supervisor_conf) as f:
52 | supervisor_configs.append(f.read())
53 |
54 | with open("baselayer/conf/supervisor/supervisor.conf", "a") as f:
55 | f.write("\n\n".join(supervisor_configs))
56 |
57 |
58 | if __name__ == "__main__":
59 | copy_supervisor_configs()
60 |
--------------------------------------------------------------------------------
/tools/silent_monitor.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import shlex
4 | import subprocess
5 | import sys
6 |
7 | if len(sys.argv) < 2:
8 | print("Usage: silent_monitor.py ")
9 | sys.exit()
10 |
11 | cmd = " ".join(sys.argv[1:])
12 |
13 | tag = f"Silently executing: {cmd}"
14 | print(f"[·] {tag}", end="")
15 | sys.stdout.flush()
16 |
17 | p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
18 |
19 | err = p.wait()
20 | stdout, stderr = p.stderr.read().strip(), p.stdout.read().strip()
21 |
22 | if err == 0:
23 | print(f"\r[✓] {tag}")
24 | else:
25 | print(f"\r[✗] {tag}")
26 | print(f"\n! Failure (exit code {err}).")
27 |
28 | if stdout:
29 | print("--- stdout ---")
30 | print(stdout.decode("utf-8"))
31 |
32 | if stderr:
33 | print("--- stderr ---")
34 | print(stderr.decode("utf-8"))
35 |
36 | if stdout or stderr:
37 | print("--- end ---")
38 |
39 | sys.exit(err)
40 |
--------------------------------------------------------------------------------
/tools/status.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from contextlib import contextmanager
3 |
4 |
5 | @contextmanager
6 | def status(message):
7 | print(f"[·] {message}", end="")
8 | sys.stdout.flush()
9 | try:
10 | yield
11 | except: # noqa: E722
12 | print(f"\r[✗] {message}")
13 | raise
14 | else:
15 | print(f"\r[✓] {message}")
16 |
--------------------------------------------------------------------------------
/tools/supervisor_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import subprocess
5 | from os.path import join as pjoin
6 |
7 | base_dir = os.path.abspath(pjoin(os.path.dirname(__file__), "../.."))
8 |
9 |
10 | def supervisor_status():
11 | """Check status of all services.
12 |
13 | Returns
14 | -------
15 | list
16 | The output lines from ``supervisorctl``.
17 | int
18 | Return code of ``supervisorctl``. This will be 0 for all
19 | services running, or 3 if one of them exited (note: this is
20 | expected when, e.g., rspack exits normally).
21 | """
22 | result = subprocess.run(
23 | "python -m supervisor.supervisorctl -c baselayer/conf/supervisor/supervisor.conf status",
24 | shell=True,
25 | cwd=base_dir,
26 | stdout=subprocess.PIPE,
27 | )
28 | return result.stdout.decode().split("\n")[:-1], result.returncode
29 |
30 |
31 | if __name__ == "__main__":
32 | supervisor_output, _ = supervisor_status()
33 | print("\n".join(supervisor_output))
34 |
--------------------------------------------------------------------------------
/tools/test_frontend.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import pathlib
5 | import signal
6 | import subprocess
7 | import sys
8 | import time
9 | from os.path import join as pjoin
10 |
11 | import requests
12 |
13 | sys.path.insert(0, pjoin(os.path.dirname(__file__), "../..")) # noqa
14 |
15 | from baselayer.app.model_util import clear_tables # noqa: E402
16 | from baselayer.log import make_log # noqa: E402
17 | from baselayer.tools.supervisor_status import supervisor_status # noqa: E402
18 |
19 | log = make_log("test_frontend")
20 |
21 |
22 | try:
23 | import pytest_randomly # noqa
24 |
25 | RAND_ARGS = "--randomly-seed=1"
26 | except ImportError:
27 | RAND_ARGS = ""
28 |
29 | TEST_CONFIG = "test_config.yaml"
30 |
31 |
32 | def all_services_running():
33 | """Check that all webservices were started successfully.
34 |
35 | All webservices controlled by `supervisor` must be currently running
36 | (RUNNING) or have finished successfully (EXITED). Returns `False` if any
37 | other statuses (STARTING, STOPPED, etc.) are present.
38 | """
39 | valid_states = ("RUNNING", "EXITED")
40 | supervisor_output, return_code = supervisor_status()
41 | running = all(
42 | [any(state in line for state in valid_states) for line in supervisor_output]
43 | )
44 |
45 | # Return 3 is associated with a service exiting normally
46 | return running if return_code in (0, 3) else False
47 |
48 |
49 | def verify_server_availability(url, timeout=180):
50 | """Raise exception if webservices fail to launch or connection to `url` is not
51 | available.
52 | """
53 | for i in range(timeout):
54 | if not os.path.exists("baselayer/conf/supervisor/supervisor.conf"):
55 | time.sleep(1)
56 | continue
57 | try:
58 | statuses, errcode = supervisor_status()
59 | assert (
60 | all_services_running()
61 | ), "Webservice(s) failed to launch:\n" + "\n".join(statuses)
62 | response = requests.get(url)
63 | assert response.status_code == 200, (
64 | "Expected status 200, got" f" {response.status_code}" f" for URL {url}."
65 | )
66 | response = requests.get(url + "/static/build/main.bundle.js")
67 | assert response.status_code == 200, (
68 | "Javascript bundle not found," " did rspack fail?"
69 | )
70 | return # all checks passed
71 | except Exception as e:
72 | if i == timeout - 1: # last iteration
73 | raise ConnectionError(str(e)) from None
74 | time.sleep(1)
75 |
76 |
77 | if __name__ == "__main__":
78 | from argparse import ArgumentParser
79 |
80 | parser = ArgumentParser()
81 | parser.add_argument(
82 | "test_spec",
83 | nargs="?",
84 | default=None,
85 | help="""Test spec. Example:
86 | test_frontend.py skyportal/tests/api
87 | """,
88 | )
89 | parser.add_argument(
90 | "--xml",
91 | action="store_true",
92 | help="Save JUnit xml output to `test-results/junit.xml`",
93 | )
94 | parser.add_argument(
95 | "--headless", action="store_true", help="Run browser headlessly"
96 | )
97 | args = parser.parse_args()
98 |
99 | # Initialize the test database connection
100 | log("Connecting to test database")
101 | from baselayer.app.config import load_config
102 | from baselayer.app.models import init_db
103 |
104 | basedir = pathlib.Path(os.path.dirname(__file__)) / ".." / ".."
105 | cfg = load_config([basedir / TEST_CONFIG])
106 | app_name = cfg["app.factory"].split(".")[0]
107 | engine = init_db(**cfg["database"])
108 | engine.connect()
109 |
110 | if args.test_spec is not None:
111 | test_spec = args.test_spec
112 | else:
113 | test_spec = basedir / app_name / "tests"
114 |
115 | if args.xml:
116 | test_outdir = basedir / "test-results"
117 | if not test_outdir.exists():
118 | test_outdir.mkdir()
119 | xml = f"--junitxml={test_outdir}/junit.xml"
120 | else:
121 | xml = ""
122 |
123 | if args.headless:
124 | os.environ["BASELAYER_TEST_HEADLESS"] = "1"
125 |
126 | log("Clearing test database...")
127 | clear_tables()
128 |
129 | web_client = subprocess.Popen(
130 | ["make", "run_testing"], cwd=basedir, preexec_fn=os.setsid
131 | )
132 |
133 | server_url = f"http://localhost:{cfg['ports.app']}"
134 | print()
135 | log(f"Waiting for server to appear at {server_url}...")
136 |
137 | exit_status = (0, "OK")
138 | try:
139 | verify_server_availability(server_url)
140 |
141 | log(f"Launching pytest on {test_spec}...\n")
142 | p = subprocess.run(
143 | f"python -m pytest -s -v {xml} {test_spec} " f"{RAND_ARGS}",
144 | shell=True,
145 | )
146 | if p.returncode != 0:
147 | exit_status = (-1, "Test run failed")
148 |
149 | p = subprocess.run(
150 | ["make", "-f", "baselayer/Makefile", "test_report"], cwd=basedir
151 | )
152 |
153 | except Exception as e:
154 | log("Could not launch server processes; terminating")
155 | print(e)
156 | exit_status = (-1, "Failed to launch pytest")
157 | finally:
158 | log("Terminating supervisord...")
159 | os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
160 |
161 | code, msg = exit_status
162 | log(msg)
163 | sys.exit(code)
164 |
--------------------------------------------------------------------------------
/tools/update_eslint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export PKG=eslint-config-airbnb
3 | npm info "${PKG}@latest" peerDependencies --json | \
4 | command sed 's/[\{\},]//g ; s/: /@/g' | \
5 | xargs npm install --save-dev "${PKG}@latest"
6 |
--------------------------------------------------------------------------------
/tools/watch_logs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import glob
4 | import os
5 | import threading
6 | import time
7 | from os.path import join as pjoin
8 |
9 | from baselayer.log import colorize
10 |
11 | basedir = pjoin(os.path.dirname(__file__), "..")
12 | logdir = "../log"
13 |
14 |
15 | def tail_f(filename, interval=1.0):
16 | f = None
17 |
18 | while not f:
19 | try:
20 | f = open(filename)
21 | break
22 | except OSError:
23 | time.sleep(1)
24 |
25 | # Find the size of the file and move to the end
26 | st_results = os.stat(filename)
27 | st_size = st_results[6]
28 | f.seek(st_size)
29 |
30 | while True:
31 | where = f.tell()
32 | line = f.readline()
33 | if not line:
34 | time.sleep(interval)
35 | f.seek(where)
36 | else:
37 | yield line.rstrip("\n")
38 |
39 |
40 | def print_log(filename, color="default", stream=None):
41 | """
42 | Print log to stdout; stream is ignored.
43 | """
44 |
45 | def print_col(line):
46 | print(colorize(line, fg=color))
47 |
48 | print_col(f"-> {filename}")
49 |
50 | for line in tail_f(filename):
51 | print_col(line)
52 |
53 |
54 | def log_watcher(printers=None):
55 | """Watch for new logs, and start following them.
56 |
57 | Parameters
58 | ----------
59 | printers : list of callables
60 | Functions of form `f(logfile, color=None)` used to print the
61 | tailed log file. By default, logs are sent to stdout. Note
62 | that the printer is also responsible for following (tailing)
63 | the log file
64 |
65 | See Also
66 | --------
67 | print_log : the default stdout printer
68 |
69 | """
70 | # Start with a short discovery interval, then back off
71 | # until that interval is 60s
72 | interval = 1
73 |
74 | if printers is None:
75 | printers = [print_log]
76 |
77 | colors = ["default", "green", "yellow", "blue", "magenta", "cyan", "red"]
78 | watched = set()
79 |
80 | color = 0
81 | while True:
82 | all_logs = set(glob.glob("log/*.log"))
83 | new_logs = all_logs - watched
84 |
85 | for logfile in sorted(new_logs):
86 | color = (color + 1) % len(colors)
87 | for printer in printers:
88 | thread = threading.Thread(
89 | target=printer, args=(logfile,), kwargs={"color": colors[color]}
90 | )
91 | thread.start()
92 |
93 | watched = all_logs
94 |
95 | time.sleep(interval)
96 | interval = max(interval * 2, 60)
97 |
98 |
99 | if __name__ == "__main__":
100 | log_watcher()
101 |
--------------------------------------------------------------------------------