├── .dev └── volumes │ └── .keep ├── .editorconfig ├── .env.example ├── .github ├── ISSUE_TEMPLATE │ └── Bug-Report.yml ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── add-depr-ticket-to-depr-board.yml │ ├── add-remove-label-on-comment.yml │ ├── cli-tests.yml │ ├── commitlint.yml │ ├── follow-up-devstack-bugs.yml │ ├── provisioning-tests.yml │ ├── quality.yml │ ├── self-assign-issue.yml │ └── upgrade-python-requirements.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── Makefile ├── README-windows.rst ├── README.rst ├── appveyor.yml ├── check.sh ├── compatibility.mk ├── configuration_files ├── analytics_api.yml ├── discovery.yml ├── ecommerce.yml ├── insights.yml ├── registrar.yml └── xqueue.yml ├── course-generator ├── build-course-json.sh ├── create-courses.sh ├── starter-course.json ├── starter-courses.json └── test-course.json ├── credentials ├── assets │ ├── demo-asset-banner-image.png │ ├── demo-asset-certificate-logo.png │ └── demo-asset-logo.png └── generate_program_certificate.sh ├── destroy.sh ├── docker-compose-host.yml ├── docker-compose-themes.yml ├── docker-compose-watchers.yml ├── docker-compose.yml ├── docs ├── Makefile ├── advanced_configuration.rst ├── building-images.rst ├── conf.py ├── database-dumps.rst ├── decisions │ ├── 0001-avoid-default-service-set.rst │ ├── 0002-expect-cli-testing.rst │ ├── 0003-usage-metrics.rst │ ├── 0004-backends-depend-on-frontends.rst │ └── 0005-frontend-package-mounts.rst ├── developing_on_named_release_branches.rst ├── devstack_faq.rst ├── devstack_interface.rst ├── getting_started.rst ├── index.rst ├── logging_in.rst ├── make.bat ├── manual_upgrades.rst ├── pycharm_integration.rst ├── service_list.rst ├── testing_and_debugging.rst ├── troubleshoot_general_tips.rst └── workflow.rst ├── ecommerce.sql ├── edxapp.sql ├── edxapp_csmh.sql ├── enterprise ├── provision.sh └── worker_permissions.py ├── in ├── load-db.sh ├── microfrontend.yml ├── mongo-provision.js ├── openedx.yaml ├── options.mk ├── programs ├── README.md ├── discovery.py ├── lms.py └── provision.sh ├── provision-analyticsapi.sh ├── provision-coursegraph.sh ├── provision-credentials.sh ├── provision-discovery.sh ├── provision-e2e.sh ├── provision-ecommerce.sh ├── provision-forum.sh ├── provision-ida-user.sh ├── provision-ida.sh ├── provision-insights.sh ├── provision-lms.sh ├── provision-mysql80.sql ├── provision-notes.sh ├── provision-registrar.sh ├── provision-retirement-user.sh ├── provision-xqueue.sh ├── provision.sh ├── provision.sql ├── repo.sh ├── requirements ├── base.in ├── base.txt ├── constraints.txt ├── dev.in ├── dev.txt ├── doc.in ├── doc.txt ├── pip-tools.in ├── pip-tools.txt ├── pip.in ├── pip.txt ├── test.in └── test.txt ├── scripts ├── Jenkinsfiles │ ├── devstack_snapshot.sh │ └── snapshot ├── README.txt ├── colors.sh ├── extract_snapshot_linux.sh ├── extract_snapshot_mac.sh ├── make_warn_default_large.sh ├── restore.py ├── send_metrics.py └── snapshot.py ├── tests ├── README.rst ├── metrics.py └── warn_default.py ├── tox.ini ├── update-dbs-init-sql-scripts.sh ├── upgrade_mongo_4_0.sh ├── upgrade_mongo_4_2.sh ├── upgrade_mongo_4_4.sh ├── upgrade_mongo_5_0.sh └── wait-ready.sh /.dev/volumes/.keep: -------------------------------------------------------------------------------- 1 | This directory contains the data for devstack. The contents of this directory are ignored by Git. 2 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # *************************** 2 | # ** DO NOT EDIT THIS FILE ** 3 | # *************************** 4 | # 5 | # This file was generated by edx-lint: https://github.com/openedx/edx-lint 6 | # 7 | # If you want to change this file, you have two choices, depending on whether 8 | # you want to make a local change that applies only to this repo, or whether 9 | # you want to make a central change that applies to all repos using edx-lint. 10 | # 11 | # Note: If your .editorconfig file is simply out-of-date relative to the latest 12 | # .editorconfig in edx-lint, ensure you have the latest edx-lint installed 13 | # and then follow the steps for a "LOCAL CHANGE". 14 | # 15 | # LOCAL CHANGE: 16 | # 17 | # 1. Edit the local .editorconfig_tweaks file to add changes just to this 18 | # repo's file. 19 | # 20 | # 2. Run: 21 | # 22 | # $ edx_lint write .editorconfig 23 | # 24 | # 3. This will modify the local file. Submit a pull request to get it 25 | # checked in so that others will benefit. 26 | # 27 | # 28 | # CENTRAL CHANGE: 29 | # 30 | # 1. Edit the .editorconfig file in the edx-lint repo at 31 | # https://github.com/openedx/edx-lint/blob/master/edx_lint/files/.editorconfig 32 | # 33 | # 2. install the updated version of edx-lint (in edx-lint): 34 | # 35 | # $ pip install . 36 | # 37 | # 3. Run (in edx-lint): 38 | # 39 | # $ edx_lint write .editorconfig 40 | # 41 | # 4. Make a new version of edx_lint, submit and review a pull request with the 42 | # .editorconfig update, and after merging, update the edx-lint version and 43 | # publish the new version. 44 | # 45 | # 5. In your local repo, install the newer version of edx-lint. 46 | # 47 | # 6. Run: 48 | # 49 | # $ edx_lint write .editorconfig 50 | # 51 | # 7. This will modify the local file. Submit a pull request to get it 52 | # checked in so that others will benefit. 53 | # 54 | # 55 | # 56 | # 57 | # 58 | # STAY AWAY FROM THIS FILE! 59 | # 60 | # 61 | # 62 | # 63 | # 64 | # SERIOUSLY. 65 | # 66 | # ------------------------------ 67 | # Generated by edx-lint version: 5.2.5 68 | # ------------------------------ 69 | [*] 70 | end_of_line = lf 71 | insert_final_newline = true 72 | charset = utf-8 73 | indent_style = space 74 | indent_size = 4 75 | max_line_length = 120 76 | trim_trailing_whitespace = true 77 | 78 | [{Makefile, *.mk}] 79 | indent_style = tab 80 | indent_size = 8 81 | 82 | [*.{yml,yaml,json}] 83 | indent_size = 2 84 | 85 | [*.js] 86 | indent_size = 2 87 | 88 | [*.diff] 89 | trim_trailing_whitespace = false 90 | 91 | [.git/*] 92 | trim_trailing_whitespace = false 93 | 94 | [COMMIT_EDITMSG] 95 | max_line_length = 72 96 | 97 | [*.rst] 98 | max_line_length = 79 99 | 100 | # f2f02689fced7a2e0c62c2f9803184114dc2ae4b 101 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Change the value to the IP address of your machine/browser to enable debugging. 2 | XDEBUG_IP_ADDRESS=127.0.0.1 3 | # Provides local environment overrides if used. See docker.settings.private.php.example for details 4 | DRUPAL_EXTRA_SETTINGS=/var/www/html/sites/default/docker.settings.private.php 5 | 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/Bug-Report.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | title: "[Bug]: " 4 | labels: ["bug"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Please check the [devstack troubleshooting guide](https://edx.readthedocs.io/projects/open-edx-devstack/en/latest/troubleshoot_general_tips.html) and the [existing list of blocking bugs](https://github.com/openedx/devstack/labels/blocker) before filing a new issue. 10 | - type: textarea 11 | id: bug-report 12 | attributes: 13 | label: Describe the bug that you are seeing. 14 | validations: 15 | required: true 16 | - type: input 17 | id: container 18 | attributes: 19 | label: Did this happen on the host (your machine or the remote instance) or in the container? 20 | description: e.g. Did this happen outside of running `make dev.shell.` or inside running `make dev.shell.`? 21 | validations: 22 | required: true 23 | - type: textarea 24 | id: reproduction-steps 25 | attributes: 26 | label: Steps to reproduce. 27 | description: Do you have a way to replicate what you're seeing? 28 | validations: 29 | required: false 30 | - type: dropdown 31 | id: mac-type 32 | attributes: 33 | label: What system was this issue seen on? 34 | description: What type of OS/hardware was devstack running on when you observed it? 35 | options: 36 | - Apple Silicon 37 | - Apple Intel 38 | - Hosted Devstack 39 | - Linux 40 | - Other 41 | validations: 42 | required: true 43 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Adding new check for github-actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | reviewers: 9 | - "openedx/arbi-bom" 10 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ---- 2 | 3 | I've completed each of the following or determined they are not applicable: 4 | 5 | - [ ] Made a plan to communicate any major developer interface changes (or N/A) 6 | -------------------------------------------------------------------------------- /.github/workflows/add-depr-ticket-to-depr-board.yml: -------------------------------------------------------------------------------- 1 | # Run the workflow that adds new tickets that are either: 2 | # - labelled "DEPR" 3 | # - title starts with "[DEPR]" 4 | # - body starts with "Proposal Date" (this is the first template field) 5 | # to the org-wide DEPR project board 6 | 7 | name: Add newly created DEPR issues to the DEPR project board 8 | 9 | on: 10 | issues: 11 | types: [opened] 12 | 13 | jobs: 14 | routeissue: 15 | uses: openedx/.github/.github/workflows/add-depr-ticket-to-depr-board.yml@master 16 | secrets: 17 | GITHUB_APP_ID: ${{ secrets.GRAPHQL_AUTH_APP_ID }} 18 | GITHUB_APP_PRIVATE_KEY: ${{ secrets.GRAPHQL_AUTH_APP_PEM }} 19 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_ISSUE_BOT_TOKEN }} 20 | -------------------------------------------------------------------------------- /.github/workflows/add-remove-label-on-comment.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs when a comment is made on the ticket 2 | # If the comment starts with "label: " it tries to apply 3 | # the label indicated in rest of comment. 4 | # If the comment starts with "remove label: ", it tries 5 | # to remove the indicated label. 6 | # Note: Labels are allowed to have spaces and this script does 7 | # not parse spaces (as often a space is legitimate), so the command 8 | # "label: really long lots of words label" will apply the 9 | # label "really long lots of words label" 10 | 11 | name: Allows for the adding and removing of labels via comment 12 | 13 | on: 14 | issue_comment: 15 | types: [created] 16 | 17 | jobs: 18 | add_remove_labels: 19 | uses: openedx/.github/.github/workflows/add-remove-label-on-comment.yml@master 20 | 21 | -------------------------------------------------------------------------------- /.github/workflows/cli-tests.yml: -------------------------------------------------------------------------------- 1 | # CLI tests: Check that various Makefile targets behave as expected 2 | # (without going deeper into provisioning and such) 3 | 4 | name: CLI tests 5 | 6 | on: 7 | push: 8 | branches: [master] 9 | pull_request: 10 | branches: 11 | - '**' 12 | 13 | jobs: 14 | 15 | run_ci: 16 | runs-on: ${{ matrix.os.image }} 17 | env: 18 | DEVSTACK_WORKSPACE: /tmp 19 | SHALLOW_CLONE: 1 20 | # Don't report metrics as real usage 21 | DEVSTACK_METRICS_TESTING: ci 22 | strategy: 23 | matrix: 24 | os: 25 | - name: linux 26 | image: ubuntu-20.04 # Focal Fossa 27 | - name: mac 28 | image: macos-12 29 | python-version: 30 | - '3.8' 31 | fail-fast: false 32 | 33 | steps: 34 | - uses: actions/checkout@v4 35 | - name: setup python 36 | uses: actions/setup-python@v5 37 | with: 38 | python-version: ${{ matrix.python-version }} 39 | 40 | - name: Docker installation - Linux 41 | if: ${{ matrix.os.name == 'linux' }} 42 | run: | 43 | docker version 44 | sudo apt-get update 45 | sudo apt install apt-transport-https ca-certificates curl software-properties-common 46 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 47 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal test" 48 | sudo apt update 49 | sudo apt install docker-ce containerd.io 50 | docker version 51 | docker compose --version 52 | 53 | # Note: we cannot use Docker Desktop because it has not been licensed for use in GithubActions 54 | - name: Docker installation - Mac 55 | if: ${{ matrix.os.name == 'mac' }} 56 | run: | 57 | brew install lima docker docker-compose 58 | limactl start --name=default template://docker 59 | echo "DOCKER_HOST=unix:///Users/runner/.lima/default/sock/docker.sock" >> $GITHUB_ENV 60 | mkdir -p ~/.docker/cli-plugins 61 | ln -sfn /usr/local/opt/docker-compose/bin/docker-compose ~/.docker/cli-plugins/docker-compose 62 | 63 | - name: Install Python dependencies 64 | run: make requirements 65 | 66 | # proactively download and extract the image to avoid test timeouts in tests/metrics.py 67 | # this should be moved into a test setup 68 | - name: Pull redis docker image 69 | run: make dev.pull.redis 70 | 71 | - name: CLI tests 72 | run: pytest -s ./tests/*.py 73 | -------------------------------------------------------------------------------- /.github/workflows/commitlint.yml: -------------------------------------------------------------------------------- 1 | # Run commitlint on the commit messages in a pull request. 2 | 3 | name: Lint Commit Messages 4 | 5 | on: 6 | - pull_request 7 | 8 | jobs: 9 | commitlint: 10 | uses: openedx/.github/.github/workflows/commitlint.yml@master 11 | -------------------------------------------------------------------------------- /.github/workflows/follow-up-devstack-bugs.yml: -------------------------------------------------------------------------------- 1 | name: Add comment 2 | on: 3 | issues: 4 | types: 5 | - labeled 6 | jobs: 7 | add-comment: 8 | if: github.event.label.name == 'bug' 9 | runs-on: ubuntu-latest 10 | permissions: 11 | issues: write 12 | steps: 13 | - name: Add comment 14 | uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 15 | with: 16 | issue-number: ${{ github.event.issue.number }} 17 | body: | 18 | Follow-up checklist (for Arch-BOM usage) 19 | - [ ] Is the issue flaky or consistent? 20 | - [ ] Does it affect multiple people or multiple types of systems? 21 | - [ ] Update the devstack troubleshooting documentation page if necessary 22 | - [ ] Do we need a new troubleshooting section? 23 | - [ ] Did a troubleshooting section already exist, but it wasn't easy to find given the symptoms? 24 | - [ ] If a recurring issue, should we ticket an automated resolution in place of the doc? 25 | -------------------------------------------------------------------------------- /.github/workflows/provisioning-tests.yml: -------------------------------------------------------------------------------- 1 | # Core tests: Provision and bring up various services on devstack 2 | 3 | name: Provisioning tests 4 | 5 | on: 6 | push: 7 | branches: [master] 8 | paths-ignore: 9 | - '**.rst' 10 | pull_request: 11 | paths-ignore: 12 | - '**.rst' 13 | schedule: 14 | # run at 7:30 am M-F 15 | - cron: '30 11 * * 1-5' 16 | 17 | jobs: 18 | 19 | run_ci: 20 | runs-on: ${{ matrix.os }} 21 | env: 22 | DEVSTACK_WORKSPACE: /tmp 23 | SHALLOW_CLONE: 1 24 | # Don't report metrics as real usage 25 | DEVSTACK_METRICS_TESTING: ci 26 | strategy: 27 | matrix: 28 | os: 29 | - ubuntu-20.04 # Ubuntu 20.04 "Focal Fossa" 30 | python-version: [ '3.8' ] 31 | services: [ discovery+lms+forum ,registrar+lms, ecommerce+lms, edx_notes_api+lms, credentials+lms, xqueue, analyticsapi+insights+lms] 32 | fail-fast: false # some services can be flaky; let others run to completion even if one fails 33 | 34 | steps: 35 | - uses: actions/checkout@v4 36 | - name: setup python 37 | uses: actions/setup-python@v5 38 | with: 39 | python-version: ${{ matrix.python-version }} 40 | 41 | - name: installations and version upgrades 42 | run: | 43 | docker version 44 | sudo apt-get update 45 | sudo apt install apt-transport-https ca-certificates curl software-properties-common 46 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 47 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal test" 48 | sudo apt update 49 | sudo apt install docker-ce containerd.io 50 | docker version 51 | docker compose --version 52 | 53 | - name: free up disk space 54 | # 2023-09-28: google-cloud-sdk removed from this list because it was intermittently 55 | # unavailable as an apt package to remove, and might be migrating to snap. If more 56 | # disk space is needed, see if the snap is installed, and remove that. 57 | run: sudo apt remove --purge -y ghc-* azure-cli hhvm llvm-* dotnet-* powershell mono-* php* ruby* 58 | 59 | - name: set up requirements 60 | run: make requirements 61 | 62 | - name: clone repositories 63 | run: make dev.clone.https 64 | 65 | - name: pull images and print 66 | run: | 67 | make dev.pull.${{matrix.services}} 68 | docker images --digests | grep latest | sort 69 | 70 | - name: provision 71 | run: make dev.provision.${{matrix.services}} 72 | 73 | - name: "Bring up services" 74 | run: make dev.up.${{matrix.services}} 75 | 76 | - name: "Wait for services to become ready" 77 | run: | 78 | # Wait a reasonable amount of time for services to come up. If they 79 | # don't, then call the checks one more time to ensure that diagnostic 80 | # information is printed out. (It's suppressed by wait-for.) 81 | timeout 5m make dev.wait-for.${{matrix.services}} || timeout 1m make dev.check.${{matrix.services}} 82 | 83 | - name: notify on failure 84 | if: ${{ failure() && github.ref == 'refs/heads/master' }} 85 | uses: dawidd6/action-send-mail@v3 86 | with: 87 | server_address: email-smtp.us-east-1.amazonaws.com 88 | server_port: 465 89 | username: ${{secrets.EDX_SMTP_USERNAME}} 90 | password: ${{secrets.EDX_SMTP_PASSWORD}} 91 | subject: 'Failure: Devstack provisioning tests for ${{matrix.services}} #${{github.run_id}}' 92 | to: devstack-provisioning-tests@2u-internal.opsgenie.net 93 | from: github-actions 94 | body: | 95 | Devstack provisioning tests in ${{github.repository}} for ${{matrix.services}} failed! 96 | For details, see https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} 97 | Runbook url: https://2u-internal.atlassian.net/wiki/spaces/AT/pages/16384920/Failure+Devstack+provisioning+tests+-+Runbook 98 | 99 | - name: close alerts on success 100 | if: ${{ !failure() && github.ref == 'refs/heads/master' }} 101 | uses: dawidd6/action-send-mail@v3 102 | with: 103 | server_address: email-smtp.us-east-1.amazonaws.com 104 | server_port: 465 105 | username: ${{secrets.EDX_SMTP_USERNAME}} 106 | password: ${{secrets.EDX_SMTP_PASSWORD}} 107 | subject: 'Back to normal: Devstack provisioning tests for ${{matrix.services}} #${{github.run_id}}' 108 | to: devstack-provisioning-tests@2u-internal.opsgenie.net 109 | from: github-actions 110 | body: Devstack provisioning tests in ${{github.repository}} are back to normal for ${{matrix.services}} 111 | 112 | - name: docs 113 | run: make docs 114 | -------------------------------------------------------------------------------- /.github/workflows/quality.yml: -------------------------------------------------------------------------------- 1 | # Assorted quality checks for PRs. 2 | 3 | name: Quality checks 4 | 5 | on: 6 | push: 7 | branches: [master] 8 | pull_request: 9 | branches: 10 | - '**' 11 | 12 | jobs: 13 | 14 | run_ci: 15 | runs-on: ubuntu-20.04 16 | env: 17 | DEVSTACK_WORKSPACE: /tmp 18 | SHALLOW_CLONE: 1 19 | strategy: 20 | matrix: 21 | python-version: 22 | - '3.8' 23 | fail-fast: false 24 | 25 | steps: 26 | - uses: actions/checkout@v4 27 | - name: setup python 28 | uses: actions/setup-python@v5 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | 32 | - name: Test Makefile 33 | run: make selfcheck 34 | 35 | - name: Install Python dependencies 36 | run: make requirements 37 | 38 | - name: Test that docs build without errors 39 | run: make docs 40 | -------------------------------------------------------------------------------- /.github/workflows/self-assign-issue.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs when a comment is made on the ticket 2 | # If the comment starts with "assign me" it assigns the author to the 3 | # ticket (case insensitive) 4 | 5 | name: Assign comment author to ticket if they say "assign me" 6 | on: 7 | issue_comment: 8 | types: [created] 9 | 10 | jobs: 11 | self_assign_by_comment: 12 | uses: openedx/.github/.github/workflows/self-assign-issue.yml@master 13 | -------------------------------------------------------------------------------- /.github/workflows/upgrade-python-requirements.yml: -------------------------------------------------------------------------------- 1 | name: Upgrade Requirements 2 | 3 | on: 4 | schedule: 5 | - cron: "0 2 * * 3" 6 | workflow_dispatch: 7 | inputs: 8 | branch: 9 | description: 'Target branch to create requirements PR against' 10 | required: true 11 | default: 'master' 12 | jobs: 13 | call-upgrade-python-requirements-workflow: 14 | with: 15 | branch: ${{ github.event.inputs.branch }} 16 | team_reviewers: "arbi-bom" 17 | email_address: arbi-bom@edx.org 18 | send_success_notification: false 19 | secrets: 20 | requirements_bot_github_token: ${{ secrets.REQUIREMENTS_BOT_GITHUB_TOKEN }} 21 | requirements_bot_github_email: ${{ secrets.REQUIREMENTS_BOT_GITHUB_EMAIL }} 22 | edx_smtp_username: ${{ secrets.EDX_SMTP_USERNAME }} 23 | edx_smtp_password: ${{ secrets.EDX_SMTP_PASSWORD }} 24 | uses: openedx/.github/.github/workflows/upgrade-python-requirements.yml@master 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # virtualenv 79 | venv/ 80 | ENV/ 81 | 82 | # Spyder project settings 83 | .spyderproject 84 | 85 | # Rope project settings 86 | .ropeproject 87 | 88 | # OS X 89 | .DS_Store 90 | 91 | # VSCode 92 | .vscode/ 93 | 94 | # PyCharm 95 | .idea/ 96 | 97 | .dev/ 98 | 99 | # Docker 100 | .docker-sync/ 101 | .env 102 | 103 | # Personal makefile extensions 104 | local.mk 105 | 106 | # Local option overrides 107 | options.local.mk 108 | 109 | # emacs 110 | *~ -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | build: 9 | os: "ubuntu-22.04" 10 | tools: 11 | python: "3.8" 12 | 13 | # Build documentation in the docs/ directory with Sphinx 14 | sphinx: 15 | configuration: docs/conf.py 16 | 17 | python: 18 | install: 19 | - requirements: requirements/doc.txt 20 | -------------------------------------------------------------------------------- /README-windows.rst: -------------------------------------------------------------------------------- 1 | Open edX Devstack on Windows (Alpha) 2 | ==================================== 3 | 4 | System Requirements 5 | ------------------- 6 | 7 | * Windows 10 1803 (Spring 2018) 8 | 9 | * This has been tested on the Spring 2018 release of Windows 10 only 10 | 11 | * NTFS file system (symlinks and MSYS2 won't work on FAT* partitions) 12 | 13 | * Developer Mode enabled https://docs.microsoft.com/en-us/windows/uwp/get-started/enable-your-device-for-development 14 | 15 | * Needed to allow git to create symlinks 16 | 17 | * Docker for Windows 18 | 19 | * Git and Git bash from https://git-scm.com/ 20 | 21 | * Make from ezwinports installed 22 | 23 | * Download make without guile from https://sourceforge.net/projects/ezwinports/files/ 24 | 25 | * Copy the contents of the make zip file into C:\\Program Files\\Git\\mingw64 26 | 27 | Provisioning Devstack 28 | --------------------- 29 | 30 | Follow the instructions in the main README. Run the make commands in git bash. Skip the "make requirements" step. 31 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Devstack 2 | ################# 3 | 4 | |ci-provisioning-badge| |ci-cli-badge| |doc-badge| |license-badge| 5 | |status-badge| 6 | 7 | 8 | DEPRECATION NOTICE 9 | ****************** 10 | 11 | Going forward, devstack will be primarily used for development by 2U. To do development 12 | on Open edX, it is recommended that `Tutor`_ be used instead. 13 | 14 | For more information on this deprecation, please visit the `associated deprecation ticket`_. 15 | 16 | .. _Tutor: https://docs.tutor.edly.io/ 17 | .. _associated deprecation ticket: https://github.com/openedx/devstack/issues/907 18 | 19 | 20 | Getting Started 21 | *************** 22 | 23 | The `Getting Started guide`_ lives with the rest of the documentation in Read the Docs. 24 | 25 | .. _Getting Started guide: https://edx.readthedocs.io/projects/open-edx-devstack/en/latest/getting_started.html 26 | 27 | Getting Help 28 | ************ 29 | 30 | Documentation 31 | ============= 32 | 33 | Start by going through `the documentation`_ on Read the Docs. If you need more help see below. 34 | 35 | .. _the documentation: https://edx.readthedocs.io/projects/open-edx-devstack/en/latest 36 | 37 | More Help 38 | ========= 39 | 40 | If you're having trouble, we have discussion forums at 41 | https://discuss.openedx.org where you can connect with others in the 42 | community. 43 | 44 | Our real-time conversations are on Slack. You can request a `Slack 45 | invitation`_, then join our `community Slack workspace`_. 46 | 47 | For anything non-trivial, the best path is to open an issue in this 48 | repository with as many details about the issue you are facing as you 49 | can provide. 50 | 51 | https://github.com/openedx/devstack/issues 52 | 53 | For more information about these options, see the `Getting Help`_ page. 54 | 55 | .. _Slack invitation: https://openedx.org/slack 56 | .. _community Slack workspace: https://openedx.slack.com/ 57 | .. _Getting Help: https://openedx.org/getting-help 58 | 59 | License 60 | ******* 61 | 62 | The code in this repository is licensed under the AGPL 3.0 unless 63 | otherwise noted. 64 | 65 | Please see `LICENSE `_ for details. 66 | 67 | Contributing 68 | ************ 69 | 70 | Contributions are very welcome. 71 | Please read `How To Contribute `_ for details. 72 | 73 | This project is currently accepting all types of contributions, bug fixes, 74 | security fixes, maintenance work, or new features. However, please make sure 75 | to have a discussion about your new feature idea with the maintainers prior to 76 | beginning development to maximize the chances of your change being accepted. 77 | You can start a conversation by creating a new issue on this repo summarizing 78 | your idea. 79 | 80 | The Open edX Code of Conduct 81 | **************************** 82 | 83 | All community members are expected to follow the `Open edX Code of Conduct`_. 84 | 85 | .. _Open edX Code of Conduct: https://openedx.org/code-of-conduct/ 86 | 87 | People 88 | ****** 89 | 90 | **TODO:** Create ``catalog-info.yaml`` for Backstage, and update this section. 91 | 92 | Reporting Security Issues 93 | ************************* 94 | 95 | Please do not report security issues in public. Please email security@openedx.org. 96 | 97 | .. |ci-provisioning-badge| image:: https://github.com/openedx/devstack/actions/workflows/provisioning-tests.yml/badge.svg?branch=master 98 | :target: https://github.com/openedx/devstack/actions/workflows/provisioning-tests.yml 99 | :alt: CI Provisioning 100 | 101 | .. |ci-cli-badge| image:: https://github.com/openedx/devstack/actions/workflows/cli-tests.yml/badge.svg?branch=master 102 | :target: https://github.com/openedx/devstack/actions/workflows/cli-tests.yml 103 | :alt: CI CLI 104 | 105 | .. |doc-badge| image:: https://readthedocs.org/projects/open-edx-devstack/badge/?version=latest 106 | :target: https://open-edx-devstack.readthedocs.io/en/latest/ 107 | :alt: Documentation 108 | 109 | .. |license-badge| image:: https://img.shields.io/github/license/openedx/devstack.svg 110 | :target: https://github.com/openedx/devstack/blob/master/LICENSE 111 | :alt: License 112 | 113 | .. |status-badge| image:: https://img.shields.io/badge/Status-Maintained-brightgreen 114 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | image: Visual Studio 2017 2 | 3 | branches: 4 | only: 5 | - master 6 | 7 | environment: 8 | DEVSTACK_WORKSPACE: x:/devstack 9 | SHALLOW_CLONE: 1 10 | 11 | install: 12 | - curl -fsSL -o make-4.2.1-without-guile-w32-bin.zip https://sourceforge.net/projects/ezwinports/files/make-4.2.1-without-guile-w32-bin.zip/download 13 | - 7z x make-4.2.1-without-guile-w32-bin.zip -oC:\"Program Files"\Git\mingw64 14 | 15 | build_script: 16 | # Increase the Linux VM memory from the default 2 GB to 4 GB 17 | - ps: Get-VM 'MobyLinuxVM' | Set-VMMemory -DynamicMemoryEnabled $true -MaximumBytes (4*1024*1024*1024) 18 | # See https://ci.appveyor.com/project/appveyor-tests/docker-ce for context on 19 | # using Linux Docker containers in AppVeyor Windows VMs 20 | # 21 | # Switching Docker to Linux for the first time takes around a minute. This is 22 | # the time required to start the "MobyLinuxVM" VM: 23 | # This won't work until we switch back to a paid AppVeyor plan after resolving 24 | # https://openedx.atlassian.net/browse/TE-2761 25 | #- docker-switch-linux 26 | - md X:\devstack 27 | - "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make dev.clone.https\"" 28 | # Stop here until we get provisioning to finish reliably 29 | #- "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make dev.pull\"" 30 | 31 | test_script: 32 | - "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make help\"" 33 | - "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make check-memory\"" 34 | - "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make validate\"" 35 | - "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make dev.status\"" 36 | - "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make dev.checkout\"" 37 | # Stop here until we get provisioning to finish reliably 38 | #- "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make dev.provision\"" 39 | #- "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make dev.up\"" 40 | # LMS needs like 60 seconds to come up 41 | #- ps: Start-Sleep -s 60 42 | #- "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make healthchecks\"" 43 | #- "\"%ProgramFiles%/Git/bin/bash.exe\" -c \"make validate-lms-volume\"" 44 | -------------------------------------------------------------------------------- /check.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Run checks for the provided service(s). 3 | # To specify multiple services, separate them with spaces or plus signs (+). 4 | # To specify all services, just pass in "all". 5 | # 6 | # Examples: 7 | # ./check.sh lms 8 | # ./check.sh lms+forum 9 | # ./check.sh lms+forum discovery 10 | # ./check.sh all 11 | # 12 | # Exists 0 if successful; non-zero otherwise. 13 | # 14 | # Fails if no services specified. 15 | # 16 | # Note that passing in a non-existent service will not fail if there are 17 | # other successful checks. 18 | 19 | set -eu -o pipefail 20 | 21 | # Grab all arguments into one string, replacing plus signs with spaces. 22 | # Pad on either side with spaces so that the regex in `should_check` works correctly. 23 | services=" ${*//+/ } " 24 | 25 | # Which checks succeeded and failed. 26 | succeeded="" 27 | failed="" 28 | 29 | # Returns whether service in first arg should be checked. 30 | should_check() { 31 | local service="$1" 32 | if [[ "$services" == *" all "* ]] || [[ "$services" == *" $service "* ]]; then 33 | return 0 # Note that '0' means 'success' (i.e., true) in bash. 34 | else 35 | return 1 36 | fi 37 | } 38 | 39 | # Runs a check named $1 on service $2 using the command $3. 40 | run_check() { 41 | local check_name="$1" 42 | local service="$2" 43 | local cmd="$3" 44 | echo "> $cmd" 45 | set +e # Disable exit-on-error 46 | if bash -c "$cmd"; then # Run the command itself and check if it succeeded. 47 | succeeded="$succeeded $check_name" 48 | else 49 | docker compose logs --tail 500 "$service" # Just show recent logs, not all history 50 | failed="$failed $check_name" 51 | fi 52 | set -e # Re-enable exit-on-error 53 | echo # Newline 54 | } 55 | 56 | mysql_run_check() { 57 | container_name="$1" 58 | mysql_probe="SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = 'root')" 59 | # The use of `--protocol tcp` forces MySQL to connect over TCP rather than 60 | # via a UNIX socket. This is needed because when MySQL starts for the first 61 | # time in a new container, it starts a "temporary server" that runs for a 62 | # few seconds and then shuts down before the "real" server starts up. The 63 | # temporary server does not listen on the TCP port, but if the mysql 64 | # command is not told which server to use, it will first try the UNIX 65 | # socket and only after that will it try the default TCP port. 66 | # 67 | # By specifying that mysql should use TCP, we won't get an early false 68 | # positive "ready" response while the temporary server is running. 69 | run_check "${container_name}_query" "$container_name" \ 70 | "docker compose exec -T $(printf %q "$container_name") mysql --protocol tcp -uroot -se $(printf %q "$mysql_probe")" 71 | } 72 | 73 | if should_check mysql57; then 74 | echo "Checking MySQL 5.7 query endpoint:" 75 | mysql_run_check mysql57 76 | fi 77 | 78 | if should_check mysql80; then 79 | echo "Checking MySQL 8.0 query endpoint:" 80 | mysql_run_check mysql80 81 | fi 82 | 83 | if should_check mongo; then 84 | echo "Checking MongoDB status:" 85 | run_check mongo_status mongo \ 86 | "docker compose exec -T mongo mongo --eval \"db.serverStatus()\"" 87 | fi 88 | 89 | if should_check registrar; then 90 | echo "Checking Registrar heartbeat:" 91 | run_check registrar_heartbeat registrar \ 92 | "curl --fail -L http://localhost:18734/health" 93 | fi 94 | 95 | if should_check lms; then 96 | echo "Checking LMS heartbeat:" 97 | run_check lms_heartbeat lms \ 98 | "curl --fail -L http://localhost:18000/heartbeat" 99 | 100 | echo "Validating LMS volume:" 101 | run_check lms_volume lms \ 102 | "make validate-lms-volume" 103 | fi 104 | 105 | if should_check cms; then 106 | echo "Checking CMS heartbeat:" 107 | run_check cms_heartbeat cms \ 108 | "curl --fail -L http://localhost:18010/heartbeat" 109 | fi 110 | 111 | if should_check ecommerce; then 112 | echo "Checking ecommerce health:" 113 | run_check ecommerce_heartbeat ecommerce \ 114 | "curl --fail -L http://localhost:18130/health/" 115 | fi 116 | 117 | if should_check discovery; then 118 | echo "Checking discovery health:" 119 | run_check discovery_heartbeat discovery \ 120 | "curl --fail -L http://localhost:18381/health/" 121 | fi 122 | 123 | if should_check forum; then 124 | echo "Checking forum heartbeat:" 125 | run_check forum_heartbeat forum \ 126 | "curl --fail -L http://localhost:44567/heartbeat" 127 | fi 128 | 129 | if should_check edx_notes_api; then 130 | echo "Checking edx_notes_api heartbeat:" 131 | run_check edx_notes_api_heartbeat edx_notes_api \ 132 | "curl --fail -L http://localhost:18120/heartbeat" 133 | fi 134 | 135 | if should_check credentials; then 136 | echo "Checking credentials heartbeat:" 137 | run_check credentials_heartbeat credentials \ 138 | "curl --fail -L http://localhost:18150/health" 139 | fi 140 | 141 | if should_check xqueue; then 142 | echo "Checking xqueue status:" 143 | run_check xqueue_heartbeat xqueue \ 144 | "curl --fail -L http://localhost:18040/xqueue/status" 145 | fi 146 | 147 | if should_check insights; then 148 | echo "Running Analytics Dashboard Devstack tests: " 149 | run_check insights_heartbeat insights \ 150 | "curl --fail -L http://localhost:18110/health/" 151 | fi 152 | 153 | if should_check analyticsapi; then 154 | echo "Running Analytics Data API Devstack tests: " 155 | run_check analyticsapi_heartbeat analyticsapi \ 156 | "curl --fail -L http://localhost:19001/health/" 157 | fi 158 | 159 | echo "Successful checks:${succeeded:- NONE}" 160 | echo "Failed checks:${failed:- NONE}" 161 | if [[ -z "$succeeded" ]] && [[ -z "$failed" ]]; then 162 | echo "No checks ran. Exiting as failure." 163 | exit 1 164 | elif [[ -z "$failed" ]]; then 165 | echo "Check result: SUCCESS" 166 | exit 0 167 | else 168 | echo "Check result: FAILURE" 169 | exit 2 170 | fi 171 | -------------------------------------------------------------------------------- /compatibility.mk: -------------------------------------------------------------------------------- 1 | # This Makefile exists entirely to support old targets that were once 2 | # part of the documented Devstack interface but no longer are. 3 | # This file allows us to remove old targets from the main Makefile 4 | # (thus making it easier to read and making the `make help` message cleaner) 5 | # while avoiding breaking backwards-compatibility with developers' existing workflows. 6 | 7 | # Housekeeping Rules: 8 | # * Organize targets into Parameterized and Simple. Alphabetize within those sections. 9 | # * Keep target definitions simple. Ideally, targets in this file are just aliases to 10 | # equivalent commands in the main Makefile. 11 | 12 | # All devstack targets are "PHONY" in that they do not name actual files. 13 | # Thus, all non-parameterized targets should be added to this declaration. 14 | .PHONY: backup check-memory destroy \ 15 | dev.provision.services dev.repo.reset \ 16 | dev.up.all dev.up.watchers down \ 17 | healthchecks lms-restart \ 18 | lms-watcher-shell logs provision pull \ 19 | pull.xqueue restore static stats stop stop.all \ 20 | stop.watchers stop.xqueue cms-restart \ 21 | cms-watcher-shell validate \ 22 | xqueue_consumer-restart xqueue-restart 23 | 24 | ##################################################################### 25 | # Parameterized tagets. 26 | ##################################################################### 27 | 28 | dev.provision.services.%: 29 | make dev.provision.$* 30 | 31 | healthchecks.%: 32 | make dev.check.$* 33 | 34 | mysql-shell-%: 35 | make dev.dbshell.$* 36 | 37 | %-update-db: 38 | make dev.migrate.$* 39 | 40 | ##################################################################### 41 | # Simple tagets. 42 | ##################################################################### 43 | 44 | backup: dev.backup 45 | 46 | check-memory: dev.check-memory 47 | 48 | destroy: dev.destroy 49 | 50 | dev.provision.services: dev.provision 51 | 52 | dev.repo.reset: dev.reset-repos 53 | 54 | dev.up.all: dev.up.with-watchers 55 | 56 | dev.up.watchers: dev.up.lms_watcher+cms_watcher 57 | 58 | down: dev.down 59 | 60 | healthchecks: dev.check 61 | 62 | lms-restart: dev.restart-devserver.lms 63 | 64 | lms-watcher-shell: dev.shell.lms_watcher 65 | 66 | logs: dev.logs 67 | 68 | provision: dev.provision 69 | 70 | pull: dev.pull 71 | 72 | pull.xqueue: dev.pull.without-deps.xqueue+xqueue_consumer 73 | 74 | restore: dev.restore 75 | 76 | static: dev.static 77 | 78 | stats: dev.stats 79 | 80 | stop.all: dev.stop 81 | 82 | stop: dev.stop 83 | 84 | stop.watchers: dev.stop.lms_watcher+cms_watcher 85 | 86 | stop.xqueue: dev.stop.xqueue+xqueue_consumer 87 | 88 | cms-restart: dev.restart-devserver.cms 89 | 90 | cms-watcher-shell: dev.shell.cms_watcher 91 | 92 | validate: dev.validate 93 | 94 | xqueue_consumer-restart: dev.restart-devserver.xqueue_consumer 95 | 96 | xqueue-restart: dev.restart-devserver.xqueue 97 | -------------------------------------------------------------------------------- /configuration_files/analytics_api.yml: -------------------------------------------------------------------------------- 1 | AGGREGATE_PAGE_SIZE: 10 2 | ANALYTICS_DATABASE: reports 3 | API_AUTH_TOKEN: put-your-api-token-here 4 | API_ROOT: null 5 | BACKEND_SERVICE_EDX_OAUTH2_KEY: analytics_api-backend-service-key 6 | BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://127.0.0.1:8000/oauth2 7 | BACKEND_SERVICE_EDX_OAUTH2_SECRET: analytics_api-backend-service-secret 8 | CACHES: 9 | default: 10 | BACKEND: django.core.cache.backends.memcached.PyMemcacheCache 11 | KEY_PREFIX: analytics_api 12 | LOCATION: 13 | - memcache 14 | OPTIONS: 15 | no_delay: true 16 | ignore_exc: true 17 | use_pooling: true 18 | CSRF_COOKIE_SECURE: false 19 | DATABASES: 20 | default: 21 | ENGINE: django.db.backends.mysql 22 | HOST: db.edx 23 | NAME: analytics-api 24 | PASSWORD: password 25 | PORT: '3306' 26 | USER: api001 27 | reports: 28 | ENGINE: django.db.backends.mysql 29 | HOST: db.edx 30 | NAME: reports 31 | PASSWORD: password 32 | PORT: '3306' 33 | USER: reports001 34 | DATETIME_FORMAT: '%Y-%m-%dT%H%M%S' 35 | DATE_FORMAT: '%Y-%m-%d' 36 | DEFAULT_PAGE_SIZE: 25 37 | EDX_DRF_EXTENSIONS: 38 | OAUTH2_USER_INFO_URL: http://127.0.0.1:8000/user_info 39 | ELASTICSEARCH_AWS_ACCESS_KEY_ID: null 40 | ELASTICSEARCH_AWS_SECRET_ACCESS_KEY: null 41 | ELASTICSEARCH_CONNECTION_CLASS: null 42 | ELASTICSEARCH_CONNECTION_DEFAULT_REGION: us-east-1 43 | ELASTICSEARCH_LEARNERS_HOST: localhost 44 | ELASTICSEARCH_LEARNERS_INDEX: roster_1_2 45 | ELASTICSEARCH_LEARNERS_UPDATE_INDEX: index_updates 46 | EXTRA_APPS: [] 47 | JWT_AUTH: 48 | JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload 49 | JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature 50 | JWT_ISSUERS: 51 | - AUDIENCE: SET-ME-PLEASE 52 | ISSUER: http://127.0.0.1:8000/oauth2 53 | SECRET_KEY: SET-ME-PLEASE 54 | JWT_PUBLIC_SIGNING_JWK_SET: '' 55 | LANGUAGE_CODE: en-us 56 | LMS_BASE_URL: http://127.0.0.1:8000/ 57 | MAX_PAGE_SIZE: 100 58 | MEDIA_STORAGE_BACKEND: 59 | DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage 60 | MEDIA_ROOT: /edx/var/analytics_api/media 61 | MEDIA_URL: /media/ 62 | REPORT_DOWNLOAD_BACKEND: 63 | COURSE_REPORT_FILE_LOCATION_TEMPLATE: '{course_id}_{report_name}.csv' 64 | DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage 65 | MEDIA_ROOT: /edx/var/analytics_api/static/reports 66 | MEDIA_URL: http://localhost:8100/static/reports/ 67 | SECRET_KEY: Your secret key here 68 | SESSION_EXPIRE_AT_BROWSER_CLOSE: false 69 | SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 70 | SOCIAL_AUTH_EDX_OAUTH2_KEY: analytics_api-sso-key 71 | SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://127.0.0.1:8000/logout 72 | SOCIAL_AUTH_EDX_OAUTH2_SECRET: analytics_api-sso-secret 73 | SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 74 | SOCIAL_AUTH_REDIRECT_IS_HTTPS: false 75 | STATICFILES_DIRS: 76 | - static 77 | STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage 78 | STATIC_ROOT: /edx/var/analytics_api/staticfiles 79 | TIME_ZONE: UTC 80 | -------------------------------------------------------------------------------- /configuration_files/discovery.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | API_ROOT: null 5 | AWS_SES_REGION_ENDPOINT: email.us-east-1.amazonaws.com 6 | AWS_SES_REGION_NAME: us-east-1 7 | BACKEND_SERVICE_EDX_OAUTH2_KEY: discovery-backend-service-key 8 | BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 9 | BACKEND_SERVICE_EDX_OAUTH2_SECRET: discovery-backend-service-secret 10 | CACHES: 11 | default: 12 | BACKEND: django.core.cache.backends.memcached.PyMemcacheCache 13 | KEY_PREFIX: discovery 14 | LOCATION: 15 | - edx.devstack.memcached:11211 16 | OPTIONS: 17 | no_delay: true 18 | ignore_exc: true 19 | use_pooling: true 20 | CELERY_BROKER_URL: redis://:password@edx.devstack.redis:6379/ 21 | CORS_ORIGIN_WHITELIST: [] 22 | CSRF_COOKIE_SECURE: false 23 | DATABASES: 24 | default: 25 | ATOMIC_REQUESTS: 'false' 26 | CONN_MAX_AGE: 60 27 | ENGINE: django.db.backends.mysql 28 | HOST: edx.devstack.mysql 29 | NAME: discovery 30 | OPTIONS: 31 | connect_timeout: 10 32 | init_command: SET sql_mode='STRICT_TRANS_TABLES' 33 | PASSWORD: password 34 | PORT: 3306 35 | USER: discov001 36 | read_replica: 37 | ATOMIC_REQUESTS: 'false' 38 | CONN_MAX_AGE: 60 39 | ENGINE: django.db.backends.mysql 40 | HOST: edx.devstack.mysql 41 | NAME: discovery 42 | OPTIONS: 43 | connect_timeout: 10 44 | init_command: SET sql_mode='STRICT_TRANS_TABLES' 45 | PASSWORD: password 46 | PORT: 3306 47 | USER: discov001 48 | DEFAULT_PARTNER_ID: 1 49 | EDX_DRF_EXTENSIONS: 50 | OAUTH2_USER_INFO_URL: http://127.0.0.1:8000/user_info 51 | ELASTICSEARCH_CLUSTER_URL: http://127.0.0.1:9200/ 52 | ELASTICSEARCH_INDEX_NAME: catalog 53 | EMAIL_BACKEND: django_ses.SESBackend 54 | EMAIL_HOST: localhost 55 | EMAIL_HOST_PASSWORD: '' 56 | EMAIL_HOST_USER: '' 57 | EMAIL_PORT: 25 58 | EMAIL_USE_TLS: false 59 | ENABLE_PUBLISHER: false 60 | EXTRA_APPS: 61 | - course_discovery.apps.edx_catalog_extensions 62 | JWT_AUTH: 63 | JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload 64 | JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature 65 | JWT_ISSUERS: 66 | - AUDIENCE: lms-key 67 | ISSUER: http://edx.devstack.lms:18000/oauth2 68 | SECRET_KEY: lms-secret 69 | JWT_PUBLIC_SIGNING_JWK_SET: '' 70 | LANGUAGE_CODE: en 71 | MEDIA_STORAGE_BACKEND: 72 | DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage 73 | MEDIA_ROOT: /edx/var/discovery/media 74 | MEDIA_URL: /media/ 75 | OPENEXCHANGERATES_API_KEY: '' 76 | PARLER_DEFAULT_LANGUAGE_CODE: en 77 | PARLER_LANGUAGES: 78 | 1: 79 | - code: en 80 | default: 81 | fallbacks: 82 | - en 83 | hide_untranslated: 'False' 84 | PLATFORM_NAME: Your Platform Name Here 85 | PUBLISHER_FROM_EMAIL: null 86 | SECRET_KEY: Your secret key here 87 | SESSION_EXPIRE_AT_BROWSER_CLOSE: false 88 | SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 89 | SOCIAL_AUTH_EDX_OAUTH2_KEY: discovery-sso-key 90 | SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout 91 | SOCIAL_AUTH_EDX_OAUTH2_SECRET: discovery-sso-secret 92 | SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 93 | SOCIAL_AUTH_REDIRECT_IS_HTTPS: false 94 | STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage 95 | STATIC_ROOT: /edx/var/discovery/staticfiles 96 | TIME_ZONE: UTC 97 | USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME 98 | -------------------------------------------------------------------------------- /configuration_files/ecommerce.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | AFFILIATE_COOKIE_KEY: dev_affiliate_id 4 | API_ROOT: null 5 | BACKEND_SERVICE_EDX_OAUTH2_KEY: ecommerce-backend-service-key 6 | BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 7 | BACKEND_SERVICE_EDX_OAUTH2_SECRET: ecommerce-backend-service-secret 8 | ECOMMERCE_WORKER_BROKER_HOST: 172.17.0.2 9 | BROKER_URL: amqp://celery:celery@172.17.0.2:5672 10 | CACHES: 11 | default: 12 | BACKEND: django.core.cache.backends.memcached.MemcachedCache 13 | KEY_PREFIX: ecommerce 14 | LOCATION: 15 | - edx.devstack.memcached:11211 16 | COMPREHENSIVE_THEME_DIRS: 17 | - /edx/var/edx-themes/edx-themes/ecommerce 18 | - /edx/app/ecommerce/ecommerce/ecommerce/themes 19 | CORS_ALLOW_CREDENTIALS: false 20 | CORS_ORIGIN_WHITELIST: [] 21 | CORS_URLS_REGEX: '' 22 | CSRF_COOKIE_SECURE: false 23 | DATABASES: 24 | default: 25 | ATOMIC_REQUESTS: true 26 | CONN_MAX_AGE: 60 27 | ENGINE: django.db.backends.mysql 28 | HOST: edx.devstack.mysql80 29 | NAME: ecommerce 30 | OPTIONS: 31 | connect_timeout: 10 32 | init_command: SET sql_mode='STRICT_TRANS_TABLES' 33 | PASSWORD: password 34 | PORT: '3306' 35 | USER: ecomm001 36 | DEFAULT_SITE_THEME: null 37 | ECOMMERCE_URL_ROOT: http://localhost:18130 38 | EDX_API_KEY: PUT_YOUR_API_KEY_HERE 39 | EDX_DRF_EXTENSIONS: 40 | JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES: 41 | - tracking_context 42 | JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING: 43 | administrator: is_staff 44 | email: email 45 | full_name: full_name 46 | tracking_context: tracking_context 47 | user_id: lms_user_id 48 | OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info 49 | ENABLE_COMPREHENSIVE_THEMING: false 50 | ENROLLMENT_FULFILLMENT_TIMEOUT: 7 51 | ENTERPRISE_SERVICE_URL: http://edx.devstack.lms:18000/enterprise/ 52 | ENTERPRISE_LEARNER_PORTAL_HOSTNAME: localhost:8734 53 | EXTRA_APPS: [] 54 | JWT_AUTH: 55 | JWT_ALGORITHM: HS256 56 | JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload 57 | JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature 58 | JWT_DECODE_HANDLER: ecommerce.extensions.api.handlers.jwt_decode_handler 59 | JWT_ISSUERS: 60 | - AUDIENCE: lms-key 61 | ISSUER: http://localhost:18000/oauth2 62 | SECRET_KEY: lms-secret 63 | - AUDIENCE: lms-key 64 | ISSUER: ecommerce_worker 65 | SECRET_KEY: lms-secret 66 | JWT_LEEWAY: 1 67 | JWT_PUBLIC_SIGNING_JWK_SET: '' 68 | JWT_SECRET_KEY: lms-secret 69 | JWT_VERIFY_EXPIRATION: true 70 | LANGUAGE_CODE: en 71 | LANGUAGE_COOKIE_NAME: openedx-language-preference 72 | LOGGING_ROOT_OVERRIDES: {} 73 | LOGGING_SUBSECTION_OVERRIDES: {} 74 | MEDIA_STORAGE_BACKEND: 75 | DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage 76 | MEDIA_ROOT: /edx/var/ecommerce/media 77 | MEDIA_URL: /media/ 78 | OSCAR_FROM_EMAIL: oscar@example.com 79 | PAYMENT_MICROFRONTEND_URL: null 80 | PAYMENT_PROCESSOR_CONFIG: 81 | edx: 82 | cybersource: 83 | access_key: SET-ME-PLEASE 84 | apple_pay_country_code: US 85 | apple_pay_merchant_id_certificate_path: /edx/etc/ssl/apple_pay_merchant.pem 86 | apple_pay_merchant_id_domain_association: 'This value should also be in 87 | private configuration. It, too, 88 | 89 | will span multiple lines. 90 | 91 | ' 92 | apple_pay_merchant_identifier: merchant.com.example 93 | cancel_page_url: /checkout/cancel-checkout/ 94 | merchant_id: SET-ME-PLEASE 95 | payment_page_url: https://testsecureacceptance.cybersource.com/pay 96 | profile_id: SET-ME-PLEASE 97 | receipt_page_url: /checkout/receipt/ 98 | secret_key: SET-ME-PLEASE 99 | send_level_2_3_details: true 100 | soap_api_url: https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.140.wsdl 101 | sop_access_key: SET-ME-PLEASE 102 | sop_payment_page_url: https://testsecureacceptance.cybersource.com/silent/pay 103 | sop_profile_id: SET-ME-PLEASE 104 | sop_secret_key: SET-ME-PLEASE 105 | transaction_key: SET-ME-PLEASE 106 | paypal: 107 | cancel_url: /checkout/cancel-checkout/ 108 | client_id: SET-ME-PLEASE 109 | client_secret: SET-ME-PLEASE 110 | error_url: /checkout/error/ 111 | mode: sandbox 112 | receipt_url: /checkout/receipt/ 113 | PLATFORM_NAME: Your Platform Name Here 114 | SAILTHRU_KEY: sailthru key here 115 | SAILTHRU_SECRET: sailthru secret here 116 | SECRET_KEY: Your secret key here 117 | SESSION_COOKIE_SECURE: true 118 | SESSION_EXPIRE_AT_BROWSER_CLOSE: false 119 | SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 120 | SOCIAL_AUTH_EDX_OAUTH2_KEY: ecommerce-sso-key 121 | SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout 122 | SOCIAL_AUTH_EDX_OAUTH2_SECRET: ecommerce-sso-secret 123 | SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 124 | SOCIAL_AUTH_REDIRECT_IS_HTTPS: false 125 | STATICFILES_STORAGE: ecommerce.theming.storage.ThemeStorage 126 | STATIC_ROOT: /edx/var/ecommerce/staticfiles 127 | THEME_SCSS: sass/themes/default.scss 128 | TIME_ZONE: UTC 129 | USERNAME_REPLACEMENT_WORKER: OVERRIDE THIS WITH A VALID USERNAME 130 | SDN_CHECK_API_URL: https://data.trade.gov/consolidated_screening_list/v1/search 131 | SDN_CHECK_API_KEY: sdn search key here 132 | -------------------------------------------------------------------------------- /configuration_files/insights.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | APPLICATION_NAME: Insights 4 | BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://edx.devstack.lms:18000/oauth2 5 | CACHES: 6 | default: 7 | BACKEND: django.core.cache.backends.memcached.PyMemcacheCache 8 | KEY_PREFIX: default_env-default_deployment-insights 9 | LOCATION: 10 | - edx.devstack.memcached:11211 11 | OPTIONS: 12 | no_delay: true 13 | ignore_exc: true 14 | use_pooling: true 15 | CDN_DOMAIN: null 16 | CMS_COURSE_SHORTCUT_BASE_URL: http://edx.devstack.lms:18000/course 17 | COURSE_API_URL: http://edx.devstack.lms:18000/api/courses/v1/ 18 | CSRF_COOKIE_NAME: insights_csrftoken 19 | CSRF_COOKIE_SECURE: false 20 | DATABASES: 21 | default: 22 | ENGINE: django.db.backends.mysql 23 | HOST: edx.devstack.mysql 24 | NAME: dashboard 25 | PASSWORD: secret 26 | PORT: '3306' 27 | USER: rosencrantz 28 | DATA_API_AUTH_TOKEN: edx 29 | DATA_API_URL: http://edx.devstack.analyticsapi:18100/api/v0 30 | DOCUMENTATION_LOAD_ERROR_URL: http://127.0.0.1/en/latest/Reference.html#error-conditions 31 | EMAIL_HOST: smtp.example.com 32 | EMAIL_HOST_PASSWORD: mail_password 33 | EMAIL_HOST_USER: mail_user 34 | EMAIL_PORT: 587 35 | ENABLE_AUTO_AUTH: true 36 | GRADING_POLICY_API_URL: http://edx.devstack.lms:18000/api/grades/v1/ 37 | HELP_URL: http://127.0.0.1/en/latest 38 | LANGUAGE_CODE: en-us 39 | LANGUAGE_COOKIE_NAME: insights_language 40 | LEARNER_API_LIST_DOWNLOAD_FIELDS: null 41 | LMS_COURSE_SHORTCUT_BASE_URL: URL_FOR_LMS_COURSE_LIST_PAGE 42 | MODULE_PREVIEW_URL: http://edx.devstack.lms:18000/xblock 43 | OPEN_SOURCE_URL: http://set-me-please 44 | PLATFORM_NAME: edX 45 | PRIVACY_POLICY_URL: http://example.com/privacy-policy 46 | RESEARCH_URL: https://www.edx.org/research-pedagogy 47 | SECRET_KEY: YOUR_SECRET_KEY_HERE 48 | SEGMENT_IGNORE_EMAIL_REGEX: null 49 | SEGMENT_IO_KEY: YOUR_KEY 50 | SESSION_COOKIE_NAME: insights_sessionid 51 | SESSION_EXPIRE_AT_BROWSER_CLOSE: false 52 | SOCIAL_AUTH_REDIRECT_IS_HTTPS: false 53 | SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://localhost:18000 54 | SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://edx.devstack.lms:18000 55 | SOCIAL_AUTH_EDX_OAUTH2_PUBLIC_URL_ROOT: http://localhost:18000 56 | SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout 57 | STATICFILES_DIRS: 58 | - /edx/app/insights/edx_analytics_dashboard/analytics_dashboard/static 59 | STATIC_ROOT: /edx/var/insights/staticfiles 60 | SUPPORT_EMAIL: '' 61 | TERMS_OF_SERVICE_URL: http://example.com/terms-service 62 | TIME_ZONE: UTC 63 | -------------------------------------------------------------------------------- /configuration_files/registrar.yml: -------------------------------------------------------------------------------- 1 | API_ROOT: http://localhost:18734/api 2 | BACKEND_SERVICE_EDX_OAUTH2_KEY: registrar-backend-service-key 3 | BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL: http://localhost:18000/oauth2 4 | BACKEND_SERVICE_EDX_OAUTH2_SECRET: registrar-backend-service-secret 5 | CACHES: 6 | default: 7 | BACKEND: django.core.cache.backends.memcached.PyMemcacheCache 8 | KEY_PREFIX: registrar 9 | LOCATION: 10 | - edx.devstack.memcached:11211 11 | OPTIONS: 12 | no_delay: true 13 | ignore_exc: true 14 | use_pooling: true 15 | CELERY_ALWAYS_EAGER: false 16 | CELERY_BROKER_HOSTNAME: '' 17 | CELERY_BROKER_PASSWORD: '' 18 | CELERY_BROKER_TRANSPORT: '' 19 | CELERY_BROKER_USER: '' 20 | CELERY_BROKER_VHOST: '' 21 | CELERY_DEFAULT_EXCHANGE: registrar 22 | CELERY_DEFAULT_QUEUE: registrar.default 23 | CELERY_DEFAULT_ROUTING_KEY: registrar 24 | CERTIFICATE_LANGUAGES: 25 | en: English 26 | es_419: Spanish 27 | CORS_ORIGIN_WHITELIST: [] 28 | CSRF_COOKIE_SECURE: false 29 | CSRF_TRUSTED_ORIGINS: [] 30 | DATABASES: 31 | default: 32 | ATOMIC_REQUESTS: false 33 | CONN_MAX_AGE: 60 34 | ENGINE: django.db.backends.mysql 35 | HOST: edx.devstack.mysql 36 | NAME: registrar 37 | OPTIONS: 38 | connect_timeout: 10 39 | init_command: SET sql_mode='STRICT_TRANS_TABLES' 40 | PASSWORD: password 41 | PORT: '3306' 42 | USER: registrar001 43 | DISCOVERY_BASE_URL: null 44 | EDX_DRF_EXTENSIONS: 45 | OAUTH2_USER_INFO_URL: http://edx.devstack.lms:18000/oauth2/user_info 46 | EXTRA_APPS: [] 47 | JWT_AUTH: 48 | JWT_AUTH_COOKIE_HEADER_PAYLOAD: edx-jwt-cookie-header-payload 49 | JWT_AUTH_COOKIE_SIGNATURE: edx-jwt-cookie-signature 50 | JWT_ISSUERS: 51 | - AUDIENCE: lms-key 52 | ISSUER: http://localhost:18000/oauth2 53 | SECRET_KEY: lms-secret 54 | JWT_PUBLIC_SIGNING_JWK_SET: '' 55 | LANGUAGE_CODE: en 56 | LANGUAGE_COOKIE_NAME: openedx-language-preference 57 | LMS_BASE_URL: null 58 | MEDIA_STORAGE_BACKEND: 59 | DEFAULT_FILE_STORAGE: django.core.files.storage.FileSystemStorage 60 | MEDIA_ROOT: /edx/var/registrar/media 61 | MEDIA_URL: /api/media/ 62 | REGISTRAR_SERVICE_USER: registrar_service_user 63 | SECRET_KEY: hBiEM5pDr8GsZv1lh6GKmD0c9SF5Z00TFEoRY1zSmCxijFrR 64 | SEGMENT_KEY: null 65 | SESSION_EXPIRE_AT_BROWSER_CLOSE: false 66 | SOCIAL_AUTH_EDX_OAUTH2_ISSUER: http://127.0.0.1:8000 67 | SOCIAL_AUTH_EDX_OAUTH2_KEY: registrar-sso-key 68 | SOCIAL_AUTH_EDX_OAUTH2_LOGOUT_URL: http://localhost:18000/logout 69 | SOCIAL_AUTH_EDX_OAUTH2_SECRET: registrar-sso-secret 70 | SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT: http://127.0.0.1:8000 71 | SOCIAL_AUTH_REDIRECT_IS_HTTPS: false 72 | STATICFILES_STORAGE: django.contrib.staticfiles.storage.StaticFilesStorage 73 | STATIC_ROOT: /edx/var/registrar/staticfiles 74 | TIME_ZONE: UTC 75 | -------------------------------------------------------------------------------- /configuration_files/xqueue.yml: -------------------------------------------------------------------------------- 1 | CONSUMER_DELAY: 10 2 | CSRF_COOKIE_SECURE: false 3 | DATABASES: 4 | default: 5 | ATOMIC_REQUESTS: true 6 | CONN_MAX_AGE: 0 7 | ENGINE: django.db.backends.mysql 8 | HOST: edx.devstack.mysql80 9 | NAME: xqueue 10 | OPTIONS: {} 11 | PASSWORD: password 12 | PORT: '3306' 13 | USER: xqueue001 14 | LOCAL_LOGLEVEL: INFO 15 | LOGGING_ENV: sandbox 16 | LOG_DIR: /edx/var/logs/xqueue 17 | NEWRELIC_APPNAME: default_env-default_deployment-xqueue 18 | NEWRELIC_LICENSE_KEY: '' 19 | REQUESTS_BASIC_AUTH: 20 | - edx 21 | - edx 22 | SESSION_COOKIE_SECURE: false 23 | SUBMISSION_PROCESSING_DELAY: 1 24 | SYSLOG_SERVER: localhost 25 | UPLOAD_BUCKET: sandbox-bucket 26 | UPLOAD_PATH_PREFIX: sandbox-xqueue 27 | USERS: 28 | lms: password 29 | XQUEUES: 30 | certificates: null 31 | edX-Open_DemoX: http://localhost:18050 32 | open-ended: null 33 | open-ended-message: null 34 | test-pull: null 35 | -------------------------------------------------------------------------------- /course-generator/build-course-json.sh: -------------------------------------------------------------------------------- 1 | # Script to build course configurations in proper json format that can be passed into create-courses.sh 2 | # See test-course.json for a master list of course configurations 3 | # TODO: Link documentation for course-generator-tool 4 | # USAGE: ./build-course-json.sh [course-config-file] 5 | 6 | if [ "$#" -ne 1 ]; then 7 | echo "Illegal number of parameters" 8 | exit 9 | fi 10 | config_file="$1" 11 | write_to_config() { 12 | # Write (json) content to the config file 13 | echo "$@" > $config_file 14 | } 15 | 16 | wrap_json() { 17 | # Wrap the content in curly braces '{}' 18 | echo "{" 19 | echo "$@" 20 | echo "}" 21 | } 22 | 23 | build_course_base() { 24 | # Print the (key-value pairs) base requirements of the course json. 25 | # $1 = organization 26 | # $2 = number (setting this to null acts as a proxy for random during course provisioning) 27 | # $3 = run 28 | # $4 = user 29 | # $5 = partner 30 | # 31 | # Output: '"organization":..., "number":...,' 32 | 33 | base_values=("$@") 34 | base_keys=( 35 | "organization" 36 | "number" 37 | "run" 38 | "user" 39 | "partner" 40 | ) 41 | 42 | last=`expr ${#base_keys[@]} - 1` 43 | for ((i=0;i<${#base_keys[@]};++i)); do 44 | echo "\"${base_keys[i]}\": ${base_values[i]}" 45 | if [ $i -ne $last ]; then 46 | echo "," 47 | fi 48 | done 49 | } 50 | 51 | build_course_fields() { 52 | # Print the (key-value pairs) fields (studio settings) of the course json 53 | # $1 = "display_name" 54 | # 55 | # Output: '"display_name":...,' 56 | 57 | fields_values=("$@") 58 | fields_keys=( 59 | "display_name" 60 | "mobile_available" 61 | ) 62 | 63 | last=`expr ${#fields_keys[@]} - 1` 64 | for ((i=0;i<${#fields_keys[@]};++i)); do 65 | echo "\"${fields_keys[i]}\": ${fields_values[i]}" 66 | if [ $i -ne $last ]; then 67 | echo "," 68 | fi 69 | done 70 | } 71 | 72 | build_course_enrollment() { 73 | # Print the (key-value pairs) enrollment data of the course json 74 | # $1 = audit 75 | # $2 = honor 76 | # $3 = verified 77 | # $4 = professional_education 78 | # $5 = no_id_verification 79 | # $6 = credit 80 | # $7 = credit_provider 81 | # 82 | # Output:'"audit":..., "honor":...,' 83 | 84 | enrollment_values=("$@") 85 | enrollment_keys=( 86 | "audit" 87 | "honor" 88 | "verified" 89 | "professional_education" 90 | "no_id_verification" 91 | "credit" 92 | "credit_provider" 93 | ) 94 | 95 | last=`expr ${#enrollment_keys[@]} - 1` 96 | for ((i=0;i<${#enrollment_keys[@]};++i)); do 97 | echo "\"${enrollment_keys[i]}\": ${enrollment_values[i]}" 98 | if [ $i -ne $last ]; then 99 | echo "," 100 | fi 101 | done 102 | } 103 | 104 | ################################################# 105 | ### DEFINE COURSE BUILDER FUNCTIONS HERE ### 106 | ################################################# 107 | # Output should be a course json object 108 | build_course_mode() { 109 | # Builds properly formatted, wrapped json for single course with configurable enrollment settings 110 | # $1 = display_name 111 | # $2 = audit 112 | # $3 = honor 113 | # $4 = verified 114 | # $5 = professional_education 115 | # $6 = no_id_verification 116 | # $7 = credit 117 | # $8 = credit_provider 118 | # 119 | # Output: '{...course-settings...}' 120 | 121 | wrap_json $( 122 | # 1. Base settings 123 | build_course_base "\"test-course-generator\"" null "\"1\"" "\"edx@example.com\"" "\"edx\"" 124 | echo "," 125 | 126 | # 2. Fields settings ("fields" key is at the same level as base settings) 127 | echo "\"fields\":" 128 | wrap_json $(build_course_fields "$1" true) 129 | echo "," 130 | 131 | # 3. Enrollment Settings ("enrollment" key is at the same level as base settings) 132 | echo "\"enrollment\":" 133 | wrap_json $(build_course_enrollment "$2" "$3" "$4" "$5" "$6" "$7" "$8") 134 | ) 135 | } 136 | ################################################# 137 | 138 | ################################ 139 | ### DEFINE COURSES HERE ### 140 | ################################ 141 | # array of courses, where each element is course json object 142 | courses=( 143 | # Build audit course 144 | "$(build_course_mode "\"audit course\"" true false false false false false null)" 145 | # Build honor course 146 | "$(build_course_mode "\"honor course\"" false true false false false false null)" 147 | # Build verified course with audit seat 148 | "$(build_course_mode "\"verified course with audit seat\"" true false true false false false null)" 149 | # Build verified course with honor seat 150 | "$(build_course_mode "\"verified course with honor seat\"" false true true false false false null)" 151 | # Build professional course with required id verification 152 | "$(build_course_mode "\"professional course id required\"" false false false true false false null)" 153 | # Build professional course without required id verification 154 | "$(build_course_mode "\"professional course id not required\"" false false false true true false null)" 155 | # Build credit course with audit seat 156 | "$(build_course_mode "\"credit course with audit seat\"" true false true false false true "\"test-credit-provider\"")" 157 | # Build credit course with honor seat 158 | "$(build_course_mode "\"credit course with honor seat\"" false true true false false true "\"test-credit-provider\"")" 159 | ) 160 | ################################ 161 | 162 | # Print courses in proper format 163 | # '{"courses": [...list of course json objects...]}' 164 | courses_json=$(wrap_json $( 165 | echo "\"courses\":" 166 | echo "[" 167 | last=`expr ${#courses[@]} - 1` 168 | for ((i=0;i<${#courses[@]};++i)); do 169 | echo "${courses[i]}" 170 | if [ $i -ne $last ]; then 171 | echo "," 172 | fi 173 | done 174 | echo "]" 175 | )) 176 | write_to_config "$courses_json" 177 | 178 | # Pretty print json 179 | cat $config_file | python -m json.tool > "tmp.json" 180 | cat "tmp.json" > $config_file 181 | rm "tmp.json" 182 | -------------------------------------------------------------------------------- /course-generator/create-courses.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Script that provisions cms, and ecommerce with courses 3 | # USAGE: ./create-courses [--cms] [--ecommerce] course-config.json 4 | cms=false 5 | ecommerce=false 6 | echo "Parsing options" 7 | container_error=false 8 | for arg in "$@"; do 9 | if [ $arg == "--cms" ]; then 10 | if [ ! "$(docker compose exec lms bash -c 'echo "Course will be created for cms"; exit $?')" ]; then 11 | echo "Issue with cms container" 12 | container_error=true 13 | else 14 | cms=true 15 | fi 16 | elif [ $arg == "--ecommerce" ]; then 17 | if [ ! "$(docker compose exec ecommerce bash -c 'echo "Course will be created for ecommerce"; exit $?')" ]; then 18 | echo "Issue with ecommerce container" 19 | container_error=true 20 | else 21 | ecommerce=true 22 | fi 23 | fi 24 | done 25 | 26 | if $container_error; then 27 | echo "Aborting course creation. Check your containers" 28 | exit 29 | fi 30 | 31 | # Users can specify null course numbers in the course_config json as a proxy for random numbers 32 | ## This will allow users to rerun the command multiple times and avoid duplicate course ids 33 | course_config_file="${@: -1}" 34 | if [[ ! -f $course_config_file ]] ; then 35 | echo "$course_config_file does not exist. Must provide a valid course config file." 36 | exit 37 | fi 38 | course_json="" 39 | while IFS='' read -r line || [[ -n "$line" ]]; do 40 | course_json=$course_json${line/"\"number\": null"/"\"number\": \""$RANDOM"\""} 41 | done < "${@: -1}" 42 | 43 | if $cms ; then 44 | echo "Creating courses on cms." 45 | docker compose exec lms bash -c "source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py cms --settings=devstack_docker generate_courses '$course_json'" 46 | fi 47 | 48 | if $ecommerce ; then 49 | echo "Creating courses on ecommerce." 50 | docker compose exec ecommerce bash -c "source /edx/app/ecommerce/ecommerce_env && python /edx/app/ecommerce/ecommerce/manage.py generate_courses '$course_json'" 51 | fi 52 | -------------------------------------------------------------------------------- /course-generator/starter-course.json: -------------------------------------------------------------------------------- 1 | { 2 | "courses": [ 3 | { 4 | "enrollment": { 5 | "audit": false, 6 | "honor": true, 7 | "verified": true, 8 | "professional_education": false, 9 | "no_id_verification": false, 10 | "credit": true, 11 | "credit_provider": "test-credit-provider" 12 | }, 13 | "fields": { 14 | "display_name": "Test_Course", 15 | "enable_proctored_exams": true, 16 | "mobile_available": true, 17 | "course_survey_required": true, 18 | "discussion_sort_alpha": true, 19 | "invitation_only": true, 20 | "allow_anonymous": true, 21 | "bypass_home": true, 22 | "entrance_exam_enabled": true, 23 | "show_timezone": true, 24 | "allow_unsupported_xblocks": true, 25 | "allow_public_wiki_access": true, 26 | "allow_proctoring_opt_out": true, 27 | "is_new": true, 28 | "issue_badges": true, 29 | "certificates_show_before_end": true, 30 | "disable_progress_graph": true, 31 | "no_grade": true, 32 | "enable_ccx": true, 33 | "allow_anonymous_to_peers": true, 34 | "create_zendesk_tickets": true, 35 | "hide_progress_tab": true, 36 | "show_calculator": true, 37 | "cert_html_view_enabled": true, 38 | "enable_subsection_gating": true, 39 | "enable_timed_exams": true, 40 | "self_paced": true 41 | }, 42 | "organization": "test-course-generator", 43 | "number": null, 44 | "run": "1", 45 | "user": "edx@example.com", 46 | "partner": "edx" 47 | } 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /course-generator/starter-courses.json: -------------------------------------------------------------------------------- 1 | { 2 | "courses": [ 3 | { 4 | "enrollment": { 5 | "audit": true, 6 | "credit": false, 7 | "credit_provider": null, 8 | "honor": false, 9 | "no_id_verification": false, 10 | "professional_education": false, 11 | "verified": false 12 | }, 13 | "fields": { 14 | "display_name": "audit course" 15 | }, 16 | "number": null, 17 | "organization": "test-course-generator", 18 | "partner": "edx", 19 | "run": "1", 20 | "user": "edx@example.com" 21 | }, 22 | { 23 | "enrollment": { 24 | "audit": false, 25 | "credit": false, 26 | "credit_provider": null, 27 | "honor": true, 28 | "no_id_verification": false, 29 | "professional_education": false, 30 | "verified": false 31 | }, 32 | "fields": { 33 | "display_name": "honor course" 34 | }, 35 | "number": null, 36 | "organization": "test-course-generator", 37 | "partner": "edx", 38 | "run": "1", 39 | "user": "edx@example.com" 40 | }, 41 | { 42 | "enrollment": { 43 | "audit": true, 44 | "credit": false, 45 | "credit_provider": null, 46 | "honor": false, 47 | "no_id_verification": false, 48 | "professional_education": false, 49 | "verified": true 50 | }, 51 | "fields": { 52 | "display_name": "verified course with audit seat" 53 | }, 54 | "number": null, 55 | "organization": "test-course-generator", 56 | "partner": "edx", 57 | "run": "1", 58 | "user": "edx@example.com" 59 | }, 60 | { 61 | "enrollment": { 62 | "audit": false, 63 | "credit": false, 64 | "credit_provider": null, 65 | "honor": true, 66 | "no_id_verification": false, 67 | "professional_education": false, 68 | "verified": true 69 | }, 70 | "fields": { 71 | "display_name": "verified course with honor seat" 72 | }, 73 | "number": null, 74 | "organization": "test-course-generator", 75 | "partner": "edx", 76 | "run": "1", 77 | "user": "edx@example.com" 78 | }, 79 | { 80 | "enrollment": { 81 | "audit": false, 82 | "credit": false, 83 | "credit_provider": null, 84 | "honor": false, 85 | "no_id_verification": false, 86 | "professional_education": true, 87 | "verified": false 88 | }, 89 | "fields": { 90 | "display_name": "professional course id required" 91 | }, 92 | "number": null, 93 | "organization": "test-course-generator", 94 | "partner": "edx", 95 | "run": "1", 96 | "user": "edx@example.com" 97 | }, 98 | { 99 | "enrollment": { 100 | "audit": false, 101 | "credit": false, 102 | "credit_provider": null, 103 | "honor": false, 104 | "no_id_verification": true, 105 | "professional_education": true, 106 | "verified": false 107 | }, 108 | "fields": { 109 | "display_name": "professional course id not required" 110 | }, 111 | "number": null, 112 | "organization": "test-course-generator", 113 | "partner": "edx", 114 | "run": "1", 115 | "user": "edx@example.com" 116 | }, 117 | { 118 | "enrollment": { 119 | "audit": true, 120 | "credit": true, 121 | "credit_provider": "test-credit-provider", 122 | "honor": false, 123 | "no_id_verification": false, 124 | "professional_education": false, 125 | "verified": true 126 | }, 127 | "fields": { 128 | "display_name": "credit course with audit seat" 129 | }, 130 | "number": null, 131 | "organization": "test-course-generator", 132 | "partner": "edx", 133 | "run": "1", 134 | "user": "edx@example.com" 135 | }, 136 | { 137 | "enrollment": { 138 | "audit": false, 139 | "credit": true, 140 | "credit_provider": "test-credit-provider", 141 | "honor": true, 142 | "no_id_verification": false, 143 | "professional_education": false, 144 | "verified": true 145 | }, 146 | "fields": { 147 | "display_name": "credit course with honor seat" 148 | }, 149 | "number": null, 150 | "organization": "test-course-generator", 151 | "partner": "edx", 152 | "run": "1", 153 | "user": "edx@example.com" 154 | } 155 | ] 156 | } 157 | -------------------------------------------------------------------------------- /course-generator/test-course.json: -------------------------------------------------------------------------------- 1 | { 2 | "courses": [ 3 | { 4 | "organization": "test-course-generator", 5 | "number": null, 6 | "run": "1", 7 | "user": "edx@example.com", 8 | "partner": "edx", 9 | "fields": { 10 | "display_name": "Test_Course", 11 | "enable_proctored_exams": null, 12 | "mobile_available": null, 13 | "discussion_link": null, 14 | "course_survey_required": null, 15 | "discussion_sort_alpha": null, 16 | "social_sharing_url": null, 17 | "enrollment_domain": null, 18 | "entrance_exam_minimum_score_pct": null, 19 | "max_student_enrollments_allowed": null, 20 | "announcement": null, 21 | "invitation_only": null, 22 | "certificates_display_behavior": null, 23 | "allow_anonymous": null, 24 | "enrollment_start": null, 25 | "start": null, 26 | "catalog_visibility": null, 27 | "instructor_info": null, 28 | "display_organization": null, 29 | "cert_name_short": null, 30 | "course_survey_name": null, 31 | "cert_html_view_overrides": null, 32 | "bypass_home": null, 33 | "entrance_exam_enabled": null, 34 | "show_timezone": null, 35 | "minimum_grade_credit": null, 36 | "cert_name_long": null, 37 | "cosmetic_display_price": null, 38 | "ccx_connector": null, 39 | "advertised_start": null, 40 | "certificate_available_date": null, 41 | "allow_unsupported_xblocks": null, 42 | "learning_info": null, 43 | "discussion_blackouts": null, 44 | "lti_passports": null, 45 | "allow_public_wiki_access": null, 46 | "allow_proctoring_opt_out": null, 47 | "wiki_slug": null, 48 | "tabs": null, 49 | "remote_gradebook": null, 50 | "is_new": null, 51 | "discussion_topics": null, 52 | "video_upload_pipeline": null, 53 | "course_edit_method": null, 54 | "issue_badges": null, 55 | "certificates_show_before_end": null, 56 | "teams_configuration": null, 57 | "html_textbooks": null, 58 | "disable_progress_graph": null, 59 | "end": null, 60 | "entrance_exam_id": null, 61 | "no_grade": null, 62 | "pdf_textbooks": null, 63 | "certificates": null, 64 | "pre_requisite_courses": null, 65 | "enable_ccx": null, 66 | "advanced_modules": null, 67 | "end_of_course_survey_url": null, 68 | "grading_policy": null, 69 | "due_date_display_format": null, 70 | "banner_image": null, 71 | "display_coursenumber": null, 72 | "allow_anonymous_to_peers": null, 73 | "enrollment_end": null, 74 | "create_zendesk_tickets": null, 75 | "hide_progress_tab": null, 76 | "show_calculator": null, 77 | "language": null, 78 | "css_class": null, 79 | "cert_html_view_enabled": null, 80 | "course_image": null, 81 | "video_thumbnail_image": null, 82 | "cohort_config": null, 83 | "enable_subsection_gating": null, 84 | "info_sidebar_name": null, 85 | "enable_timed_exams": null, 86 | "self_paced": null 87 | }, 88 | "enrollment": { 89 | "audit": true, 90 | "honor": false, 91 | "verified": true, 92 | "professional_education": false, 93 | "no_id_verification": false, 94 | "credit": false, 95 | "credit_provider": "test-credit-provider" 96 | } 97 | } 98 | ] 99 | } 100 | -------------------------------------------------------------------------------- /credentials/assets/demo-asset-banner-image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openedx-unsupported/devstack/28f6d7ea1fa30fd7e0bdc10f269999f15f7f8876/credentials/assets/demo-asset-banner-image.png -------------------------------------------------------------------------------- /credentials/assets/demo-asset-certificate-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openedx-unsupported/devstack/28f6d7ea1fa30fd7e0bdc10f269999f15f7f8876/credentials/assets/demo-asset-certificate-logo.png -------------------------------------------------------------------------------- /credentials/assets/demo-asset-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openedx-unsupported/devstack/28f6d7ea1fa30fd7e0bdc10f269999f15f7f8876/credentials/assets/demo-asset-logo.png -------------------------------------------------------------------------------- /credentials/generate_program_certificate.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo 'Attempting to award a program certificate to the edX user' 3 | echo 'Updating Discovery...' 4 | echo 'Adding assets to the edX demo organization' 5 | docker compose exec -T discovery bash -c 'mkdir /edx/app/discovery/discovery/provision-temp' 6 | docker cp ./assets edx.devstack.discovery:/edx/app/discovery/discovery/provision-temp/assets 7 | docker compose exec -T discovery bash -c 'source /edx/app/discovery/discovery_env && python /edx/app/discovery/discovery/manage.py add_logos_to_organization --partner=edX --logo=/edx/app/discovery/discovery/provision-temp/assets/demo-asset-logo.png --certificate_logo=/edx/app/discovery/discovery/provision-temp/assets/demo-asset-certificate-logo.png --banner_image=/edx/app/discovery/discovery/provision-temp/assets/demo-asset-banner-image.png' 8 | docker compose exec -T discovery bash -c 'rm -rf /edx/app/discovery/discovery/provision-temp' 9 | 10 | echo 'Updating credentials...' 11 | echo 'setting catalog and lms base urls' 12 | docker compose exec -T credentials bash -c 'source /edx/app/credentials/credentials_env && python /edx/app/credentials/credentials/manage.py create_or_update_site --site-domain example.com --site-name example.com --platform-name edX --tos-url https://www.edx.org/edx-terms-service --privacy-policy-url https://www.edx.org/edx-privacy-policy --homepage-url https://www.edx.org --company-name "edX Inc." --certificate-help-url https://edx.readthedocs.org/projects/edx-guide-for-students/en/latest/SFD_certificates.html#web-certificates --lms-url-root http://edx.devstack.lms:18000/ --catalog-api-url http://edx.devstack.discovery:18381/api/v1/ --theme-name edx.org' 13 | echo 'copying discovery catalog' 14 | docker compose exec -T credentials bash -c 'source /edx/app/credentials/credentials_env && python /edx/app/credentials/credentials/manage.py copy_catalog' 15 | echo 'creating a program certificate configuration' 16 | docker compose exec -T credentials bash -c 'source /edx/app/credentials/credentials_env && python /edx/app/credentials/credentials/manage.py create_program_certificate_configuration' 17 | 18 | echo 'Updating LMS...' 19 | echo 'creating a credentials API connection' 20 | docker compose exec -T lms bash -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms create_credentials_api_configuration' 21 | echo 'changing edX user enrollment in demo course from audit to verified' 22 | docker compose exec -T lms bash -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms change_enrollment -u edx -c course-v1:edX+DemoX+Demo_Course --from audit --to verified' 23 | echo 'manually ID verifying edX user' 24 | docker compose exec -T lms bash -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms manual_verifications --email edx@example.com' 25 | echo 'generating course certificate' 26 | docker compose exec -T lms bash -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms cert_generation -u 3 -c course-v1:edX+DemoX+Demo_Course' 27 | echo 'notifying credentials' 28 | docker compose exec -T lms bash -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker notify_credentials --courses course-v1:edX+DemoX+Demo_Course --notify_programs' 29 | -------------------------------------------------------------------------------- /destroy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | read -p "This will delete all data in your devstack. Would you like to proceed? [y/n] " -r 5 | if [[ $REPLY =~ ^[Yy]$ ]] 6 | then 7 | docker compose down -v 8 | fi 9 | -------------------------------------------------------------------------------- /docker-compose-host.yml: -------------------------------------------------------------------------------- 1 | 2 | version: "2.1" 3 | 4 | services: 5 | credentials: 6 | volumes: 7 | - ${DEVSTACK_WORKSPACE}/credentials:/edx/app/credentials/credentials 8 | - credentials_node_modules:/edx/app/credentials/credentials/node_modules 9 | - credentials_tox:/edx/app/credentials/credentials/.tox 10 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 11 | discovery: 12 | volumes: 13 | - ${DEVSTACK_WORKSPACE}/course-discovery:/edx/app/discovery/discovery 14 | - discovery_node_modules:/edx/app/discovery/discovery/node_modules 15 | - discovery_tox:/edx/app/discovery/discovery/.tox 16 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 17 | ecommerce: 18 | volumes: 19 | - ${DEVSTACK_WORKSPACE}/ecommerce:/edx/app/ecommerce/ecommerce 20 | - ecommerce_node_modules:/edx/app/ecommerce/ecommerce/node_modules 21 | - ecommerce_tox:/edx/app/ecommerce/ecommerce/.tox 22 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 23 | forum: 24 | volumes: 25 | - ${DEVSTACK_WORKSPACE}/cs_comments_service:/edx/app/forum/cs_comments_service 26 | lms: 27 | volumes: 28 | - ${DEVSTACK_WORKSPACE}/edx-platform:/edx/app/edxapp/edx-platform 29 | - edxapp_media:/edx/var/edxapp/media 30 | - edxapp_node_modules:/edx/app/edxapp/edx-platform/node_modules 31 | - edxapp_tox:/edx/app/edxapp/edx-platform/.tox 32 | - edxapp_uploads:/edx/var/edxapp/uploads 33 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 34 | edx_notes_api: 35 | volumes: 36 | - ${DEVSTACK_WORKSPACE}/edx-notes-api:/edx/app/notes/ 37 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 38 | registrar: 39 | volumes: 40 | - ${DEVSTACK_WORKSPACE}/registrar:/edx/app/registrar 41 | registrar-worker: 42 | volumes: 43 | - ${DEVSTACK_WORKSPACE}/registrar:/edx/app/registrar 44 | cms: 45 | volumes: 46 | - ${DEVSTACK_WORKSPACE}/edx-platform:/edx/app/edxapp/edx-platform 47 | - edxapp_media:/edx/var/edxapp/media 48 | - edxapp_node_modules:/edx/app/edxapp/edx-platform/node_modules 49 | - edxapp_tox:/edx/app/edxapp/edx-platform/.tox 50 | - edxapp_uploads:/edx/var/edxapp/uploads 51 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 52 | insights: 53 | volumes: 54 | - ${DEVSTACK_WORKSPACE}/edx-analytics-dashboard:/edx/app/insights/insights 55 | - insights_node_modules:/edx/app/insights/insights/node_modules 56 | analyticsapi: 57 | volumes: 58 | - ${DEVSTACK_WORKSPACE}/edx-analytics-data-api:/edx/app/analytics_api/analytics_api 59 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 60 | 61 | # Note that frontends mount `src` to /edx/app/src instead of /edx/src. 62 | # See ADR #5 for rationale. 63 | frontend-app-account: 64 | volumes: 65 | - ${DEVSTACK_WORKSPACE}/frontend-app-account:/edx/app/frontend-app-account 66 | - frontend_app_account_node_modules:/edx/app/frontend-app-account/node_modules 67 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 68 | 69 | frontend-app-profile: 70 | volumes: 71 | - ${DEVSTACK_WORKSPACE}/frontend-app-profile:/edx/app/frontend-app-profile 72 | - frontend_app_profile_node_modules:/edx/app/frontend-app-profile/node_modules 73 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 74 | 75 | frontend-app-authn: 76 | volumes: 77 | - ${DEVSTACK_WORKSPACE}/frontend-app-authn:/edx/app/frontend-app-authn 78 | - frontend_app_authn_node_modules:/edx/app/frontend-app-authn/node_modules 79 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 80 | 81 | frontend-app-course-authoring: 82 | volumes: 83 | - ${DEVSTACK_WORKSPACE}/frontend-app-course-authoring:/edx/app/frontend-app-course-authoring 84 | - frontend_app_course_authoring_node_modules:/edx/app/frontend-app-course-authoring/node_modules 85 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 86 | frontend-app-gradebook: 87 | volumes: 88 | - ${DEVSTACK_WORKSPACE}/frontend-app-gradebook:/edx/app/frontend-app-gradebook 89 | - frontend_app_gradebook_node_modules:/edx/app/frontend-app-gradebook/node_modules 90 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 91 | frontend-app-ora-grading: 92 | volumes: 93 | - ${DEVSTACK_WORKSPACE}/frontend-app-ora-grading:/edx/app/frontend-app-ora-grading 94 | - frontend_app_ora_grading_node_modules:/edx/app/frontend-app-ora-grading/node_modules 95 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 96 | frontend-app-learner-dashboard: 97 | volumes: 98 | - ${DEVSTACK_WORKSPACE}/frontend-app-learner-dashboard:/edx/app/frontend-app-learner-dashboard 99 | - frontend_app_learner_dashboard_node_modules:/edx/app/frontend-app-learner-dashboard/node_modules 100 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 101 | frontend-app-learner-record: 102 | volumes: 103 | - ${DEVSTACK_WORKSPACE}/frontend-app-learner-record:/edx/app/frontend-app-learner-record 104 | - frontend_app_learner_record_node_modules:/edx/app/frontend-app-learner-record/node_modules 105 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 106 | frontend-app-learning: 107 | volumes: 108 | - ${DEVSTACK_WORKSPACE}/frontend-app-learning:/edx/app/frontend-app-learning 109 | - frontend_app_learning_node_modules:/edx/app/frontend-app-learning/node_modules 110 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 111 | frontend-app-library-authoring: 112 | volumes: 113 | - ${DEVSTACK_WORKSPACE}/frontend-app-library-authoring:/edx/app/frontend-app-library-authoring 114 | - frontend_app_library_authoring_node_modules:/edx/app/frontend-app-library-authoring/node_modules 115 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 116 | frontend-app-payment: 117 | volumes: 118 | - ${DEVSTACK_WORKSPACE}/frontend-app-payment:/edx/app/frontend-app-payment 119 | - frontend_app_payment_node_modules:/edx/app/frontend-app-payment/node_modules 120 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 121 | frontend-app-program-console: 122 | volumes: 123 | - ${DEVSTACK_WORKSPACE}/frontend-app-program-console:/edx/app/frontend-app-program-console 124 | - frontend_app_program_console_node_modules:/edx/app/frontend-app-program-console/node_modules 125 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 126 | frontend-app-publisher: 127 | volumes: 128 | - ${DEVSTACK_WORKSPACE}/frontend-app-publisher:/edx/app/frontend-app-publisher 129 | - frontend_app_publisher_node_modules:/edx/app/frontend-app-publisher/node_modules 130 | - ${DEVSTACK_WORKSPACE}/src:/edx/app/src 131 | 132 | volumes: 133 | credentials_node_modules: 134 | discovery_node_modules: 135 | ecommerce_node_modules: 136 | insights_node_modules: 137 | edxapp_media: 138 | edxapp_node_modules: 139 | edxapp_uploads: 140 | frontend_app_account_node_modules: 141 | frontend_app_profile_node_modules: 142 | frontend_app_authn_node_modules: 143 | frontend_app_course_authoring_node_modules: 144 | frontend_app_gradebook_node_modules: 145 | frontend_app_ora_grading_node_modules: 146 | frontend_app_learner_dashboard_node_modules: 147 | frontend_app_learner_record_node_modules: 148 | frontend_app_learning_node_modules: 149 | frontend_app_library_authoring_node_modules: 150 | frontend_app_payment_node_modules: 151 | frontend_app_program_console_node_modules: 152 | frontend_app_publisher_node_modules: 153 | credentials_tox: 154 | discovery_tox: 155 | ecommerce_tox: 156 | edxapp_tox: 157 | -------------------------------------------------------------------------------- /docker-compose-themes.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | discovery: 5 | volumes: 6 | - ${DEVSTACK_WORKSPACE}/edx-themes:/edx/app/edx-themes 7 | ecommerce: 8 | volumes: 9 | - ${DEVSTACK_WORKSPACE}/edx-themes:/edx/app/edx-themes 10 | lms: 11 | volumes: 12 | - ${DEVSTACK_WORKSPACE}/edx-themes:/edx/app/edx-themes 13 | cms: 14 | volumes: 15 | - ${DEVSTACK_WORKSPACE}/edx-themes:/edx/app/edx-themes 16 | -------------------------------------------------------------------------------- /docker-compose-watchers.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | lms_watcher: 5 | command: bash -c 'cd /edx/app/edxapp/edx-platform && source ../edxapp_env && while true; do paver watch_assets --w=$$ASSET_WATCHER_TIMEOUT; sleep 2; done' 6 | container_name: "edx.${COMPOSE_PROJECT_NAME:-devstack}.lms_watcher" 7 | environment: 8 | FRONTEND_TEST_SERVER_HOSTNAME: edx.devstack.lms_watcher 9 | ASSET_WATCHER_TIMEOUT: 12 10 | image: openedx/lms-dev:${OPENEDX_RELEASE:-latest} 11 | volumes: 12 | - ${DEVSTACK_WORKSPACE}/edx-platform:/edx/app/edxapp/edx-platform 13 | - edxapp_lms_assets:/edx/var/edxapp/staticfiles/ 14 | - edxapp_node_modules:/edx/app/edxapp/edx-platform/node_modules 15 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 16 | - ${DEVSTACK_WORKSPACE}/edx-themes:/edx/app/edx-themes 17 | networks: 18 | default: 19 | aliases: 20 | - edx.devstack.lms_watcher 21 | 22 | cms_watcher: 23 | command: bash -c 'cd /edx/app/edxapp/edx-platform && source ../edxapp_env && while true; do paver watch_assets --w=$$ASSET_WATCHER_TIMEOUT; sleep 2; done' 24 | container_name: "edx.${COMPOSE_PROJECT_NAME:-devstack}.cms_watcher" 25 | environment: 26 | FRONTEND_TEST_SERVER_HOSTNAME: edx.devstack.cms_watcher 27 | ASSET_WATCHER_TIMEOUT: 12 28 | image: openedx/lms-dev:${OPENEDX_RELEASE:-latest} 29 | volumes: 30 | - edxapp_cms_assets:/edx/var/edxapp/staticfiles/ 31 | - ${DEVSTACK_WORKSPACE}/edx-platform:/edx/app/edxapp/edx-platform 32 | - edxapp_node_modules:/edx/app/edxapp/edx-platform/node_modules 33 | - ${DEVSTACK_WORKSPACE}/src:/edx/src 34 | - ${DEVSTACK_WORKSPACE}/edx-themes:/edx/app/edx-themes 35 | networks: 36 | default: 37 | aliases: 38 | - edx.devstack.cms_watcher 39 | 40 | volumes: 41 | edxapp_lms_assets: 42 | edxapp_cms_assets: 43 | edxapp_node_modules: 44 | -------------------------------------------------------------------------------- /docs/advanced_configuration.rst: -------------------------------------------------------------------------------- 1 | Advanced Configuration Options 2 | ------------------------------ 3 | 4 | The file ``options.mk`` sets several configuration options to default values. 5 | For example ``DEVSTACK_WORKSPACE`` (the folder where your Git repos are expected to be) 6 | is set to this directory's parent directory by default, 7 | and ``DEFAULT_SERVICES`` (the list of services that are provisioned and run by default) 8 | is set to a fairly long list of services out of the box. 9 | For more detail, refer to the comments in the file itself. 10 | 11 | If you're feeling brave, you can create an git-ignored overrides file called 12 | ``options.local.mk`` in the same directory and set your own values. In general, 13 | it's good to bring down containers before changing any settings. 14 | 15 | Changing the Docker Compose Project Name 16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 17 | 18 | The ``COMPOSE_PROJECT_NAME`` variable is used to define Docker namespaced volumes 19 | and network based on this value, so changing it will give you a separate set of databases. 20 | This is handled for you automatically by setting the ``OPENEDX_RELEASE`` environment variable in ``options.mk`` 21 | (e.g. ``COMPOSE_PROJECT_NAME=devstack-juniper.master``. Should you want to manually override this, edit the ``options.local.mk`` in the root of this repo and create the file if it does not exist. Change the devstack project name by adding the following line:: 22 | 23 | # Example: COMPOSE_PROJECT_NAME=secondarydevstack 24 | COMPOSE_PROJECT_NAME= 25 | 26 | As a specific example, if ``OPENEDX_RELEASE`` is set in your environment as ``juniper.master``, then ``COMPOSE_PROJECT_NAME`` will default to ``devstack-juniper.master`` instead of ``devstack``. 27 | 28 | -------------------------------------------------------------------------------- /docs/building-images.rst: -------------------------------------------------------------------------------- 1 | Building Images for Devstack 2 | ============================ 3 | 4 | There are `Docker CI Jenkins jobs`_ on tools-edx-jenkins that build and push new 5 | Docker images to DockerHub on code changes to either the configuration repository or the IDA's codebase. These images 6 | are tagged according to the branch from which they were built (see NOTES below). 7 | If you want to build the images on your own, the Dockerfiles are available in the ``edx/configuration`` repo. 8 | 9 | NOTES: 10 | 11 | 1. edxapp and IDAs use the ``latest`` tag for configuration changes which have been merged to master branch of 12 | their repository and ``edx/configuration``. 13 | 2. Images for a named Open edX release are built from the corresponding branch 14 | of each repository and tagged appropriately, for example ``hawthorn.master`` 15 | or ``hawthorn.rc1``. 16 | 3. The elasticsearch used in devstack is built using elasticsearch-devstack/Dockerfile and the ``devstack`` tag. 17 | 18 | BUILD COMMANDS: 19 | 20 | .. code:: sh 21 | 22 | git checkout master 23 | git pull 24 | docker build -f docker/build/edxapp/Dockerfile . -t edxops/edxapp:latest 25 | 26 | .. code:: sh 27 | 28 | git checkout master 29 | git pull 30 | docker build -f docker/build/ecommerce/Dockerfile . -t edxops/ecommerce:devstack 31 | 32 | The build commands above will use your local configuration, but will pull 33 | application code from the master branch of the application's repository. If you 34 | would like to use code from another branch/tag/hash, modify the ``*_VERSION`` 35 | variable that lives in the ``ansible_overrides.yml`` file beside the 36 | ``Dockerfile``. Note that edx-platform is an exception; the variable to modify is ``edx_platform_version`` 37 | and not ``EDXAPP_VERSION``. 38 | 39 | For example, if you wanted to build tag ``release-2017-03-03`` for the 40 | E-Commerce Service, you would modify ``ECOMMERCE_VERSION`` in 41 | ``docker/build/ecommerce/ansible_overrides.yml``. 42 | 43 | .. _Docker CI Jenkins Jobs: https://tools-edx-jenkins.edx.org/job/DockerCI 44 | -------------------------------------------------------------------------------- /docs/database-dumps.rst: -------------------------------------------------------------------------------- 1 | Updating Relational Database Dumps 2 | ================================== 3 | 4 | We use relational database dumps to spend less time running relational database 5 | migrations and to speed up the provisioning of a devstack. These dumps are saved 6 | as .sql scripts in the root directory of this git repository and they should be 7 | updated occasionally - when relational database migrations take a prolonged amount 8 | of time *or* we want to incorporate database schema changes which were done manually. 9 | 10 | To update the relational database dumps: 11 | 12 | 1. Backup the data of your existing devstack if needed. 13 | 14 | 2. If you are unsure whether the django_migrations tables (which keeps which migrations 15 | were already applied) in each database are consistent with the existing database dumps, 16 | disable the loading of these database dumps during provisioning by commenting out 17 | the calls to ``load-db.sh`` in the ``provision-*.sh`` scripts. This ensures a start with a 18 | completely fresh database and incorporates any changes that may have required some form 19 | of manual intervention for existing installations (e.g. drop/move tables). 20 | 21 | 3. Run the shell script which destroys any existing devstack, creates a new one 22 | and updates the relational database dumps: 23 | 24 | .. code:: sh 25 | 26 | ./update-dbs-init-sql-scripts.sh 27 | -------------------------------------------------------------------------------- /docs/decisions/0001-avoid-default-service-set.rst: -------------------------------------------------------------------------------- 1 | 1. Avoid default service set 2 | ============================ 3 | 4 | Status 5 | ------ 6 | 7 | Approved 8 | 9 | Context 10 | ------- 11 | 12 | Commands like ``make dev.pull`` and ``make dev.up`` operate by default on a large subset of the services that devstack supports (via overridable variable ``DEFAULT_SERVICES``). There are also variants such as ``make dev.up.cms+credentials`` which will operate on a more constrained subset. However, many developers are not aware of these variants or are not in the habit of using them. By not constraining the command to selected services, developers pull down Docker images that they do not need for their current workflow, or find that devstack is using more memory and CPU than needed due to running unnecessary services. These issues have been repeatedly observed in supporting fellow edX devs in internal communications, and are likely an issue in the community as well. We also see people run into bugs in unrelated services, distracting them from their main task. 13 | 14 | Several people and teams have made efforts to improve the documentation and offer these better-scoped commands, but we still see complaints about memory, CPU, and network usage that can be solved by avoiding the default set. 15 | 16 | The term "default" is also too prescriptive, since it usually connotes a desirable path and we actually don't want people to use this default. The contents of ``DEFAULT_SERVICES`` is also incoherent, as it does not reflect any one workflow, but rather is simply a "large set" that covers something like 80% of cases (but too much for any one of them). 17 | 18 | Decision 19 | -------- 20 | 21 | We introduce an explicit alias for the default service set, ``large-and-slow``, and introduce targets like ``dev.pull.large-and-slow``. Creating a name for the large set with a built-in warning may help warn people away from it. 22 | 23 | Next, any direct usage of the bare target ``dev.pull`` triggers a warning in the terminal and an opportunity to cancel and use a more tightly scoped command. Using ``dev.pull.large-and-slow`` directly bypasses this warning; this set may still be needed for some use-cases, such as initial provisioning. This allows us to educate a broader range of developers (not just the ones who come and ask for help) and tighten the feedback loop to seconds rather than hours (warning in terminal vs. discussion in chat.) 24 | 25 | Finally, documentation is to be updated to better explain this distinction, and any mention of ``dev.pull`` updated to either ``dev.pull.large-and-slow`` or ``dev.pull.`` so that readers will be steered in the correct direction from the outset. 26 | 27 | The first pass only changes the ``pull`` and ``up`` families of make targets, since we believe they are the most commonly used and the most common to cause developer pain. ``provision``, ``check``, ``migrate``, and ``reset`` are good candidates for after this is proved out. 28 | 29 | Use of ``DEFAULT_SERVICES`` and the make targets which rely on it is not deprecated, but should always be an intentional act. 30 | 31 | Consequences 32 | ------------ 33 | 34 | People will be steered away from bare targets like ``dev.pull`` and ``DEFAULT_SERVICES`` may be reduced in importance. 35 | 36 | Developers first setting up devstack will still use the large set, since some parts of provisioning (specifically, the loading of test data) have non-trivial dependencies between services. 37 | 38 | Rejected Alternatives 39 | --------------------- 40 | 41 | - Shrinking ``DEFAULT_SERVICES``: Likely to break any number of workflows, or at least confuse people who rely on it. 42 | - Just document it better: We don't think people read the docs enough to discover docs on this issue. People probably mostly go looking through the docs when they have a specific error or a task they want to learn how to accomplish, but they may not even identify overly large service sets as a problem to solve. 43 | -------------------------------------------------------------------------------- /docs/decisions/0002-expect-cli-testing.rst: -------------------------------------------------------------------------------- 1 | 2. Use ``expect`` for CLI testing 2 | ================================= 3 | 4 | Status 5 | ------ 6 | 7 | Approved 8 | 9 | Context 10 | ------- 11 | 12 | Devstack has a CLI that a large number of developers depend upon, and when it breaks it can cause disruption across multiple teams. However, there is limited automated testing that would prevent such breakage. The CI script is currently set up to run through some common commands for a static set of services, from cloning repositories all the way through provisioning. These can catch some basic problems but only exercise a few core Makefile targets. 13 | 14 | Recently the CLI was changed to warn the developer when "bare" commands such as ``make dev.pull`` are run. The new ``make_warn_default_large.sh`` prints a warning and then waits for acknowledgement before proceeding. It was not obvious how to add automated tests for this. Using pytest and Python's ``subprocess`` module turned out to be overly difficult—this type of explicit process management requires a lot of low-level work such as designating the spawned process as a process group leader, killing the group at the end or on error, reading into buffers before the command is finished, matching stderr and stdout against regexes, managing timeouts, etc. The Expect utility handles this using a domain-specific language, and while it is not installed by default on Mac or Linux, it is designed for exactly this sort of task. 15 | 16 | It is possible that there's an expect-like wrapper of subprocess that would work from pytest, but we couldn't find one in the time we'd allotted for the task. 17 | 18 | Decision 19 | -------- 20 | 21 | A ``tests`` directory is added with a single Expect script which tests the warn-on-large-set path for one make command. More scripts can be added as other CLI changes are made. 22 | 23 | The Github CI configuration installs ``expect`` and runs the Expect script by name. 24 | 25 | Consequences 26 | ------------ 27 | 28 | Developers wishing to run the automated tests locally will have to have Expect installed. This should be available on both Mac and Linux. 29 | 30 | There is no provision made here for setting up different environments in which to run the tests (e.g. with/without an ``options.local.mk`` overrides file). If this is needed, it can be arranged from a wrapper script. 31 | 32 | Rejected Alternatives 33 | --------------------- 34 | 35 | Manual invocation of ``subprocess``, as described above. 36 | -------------------------------------------------------------------------------- /docs/decisions/0003-usage-metrics.rst: -------------------------------------------------------------------------------- 1 | 3. Collect usage metrics 2 | ======================== 3 | 4 | Status 5 | ------ 6 | 7 | Approved 8 | 9 | Context 10 | ------- 11 | 12 | Developer velocity can be significantly impacted by the development environment, but without a way to measure impact, it is difficult to tell if education initiatives, documentation, new capabilities, and other changes are having a positive effect on the developer experience. The Arch-BOM team has aready used surveys to get qualitative information about developer experience, but this self-reported information is "expensive" to get—people will only answer so many surveys before survey fatigue sets in. Quantitative information about tool usage patterns, failure rates, and other higher-frequency, mechanically collectible information would complement the more infrequent, qualitative reports. 13 | 14 | We believe that a framework for measuring the usage and time of essential devstack actions will give Arch-BOM more ongoing useful information of developer needs and trends over time in order to prioritize further devstack efforts. 15 | 16 | 17 | Decision 18 | -------- 19 | 20 | A number of high-use Makefile targets have been instrumented with metrics collection using an indirection technique. As an example, the target ``dev.provision.%`` now consists only of the command ``@scripts/send_metrics.py wrap "dev.provision.$*"``; the metrics script then calls ``make impl-dev.provision.%``, which is the "real" target. About 6 families of commands have been instrumented in this way. 21 | 22 | This ``send_metrics.py`` wrapper script calls Make as a child process, and if consent has been provided, the wrapper additionally collects command duration, exit code, make target, and some environmental information about git. 23 | 24 | If a consent decision has not been made, the wrapper script asks the developer (at the end of a command run) to opt in or out. The resulting explicit consent or decline is recorded via a config file, and an anonymous user ID is stored in this config file on first collection if consent is provided. 25 | 26 | Metrics are reported synchronously to Segment, then forwarded on to New Relic for analysis. There's a retention period on the data of a year. 27 | 28 | 29 | Consequences 30 | ------------ 31 | 32 | The current implementation only instruments Makefile targets. The most basic interactions with devstack occur via the Make interface, with additional interaction occurring inside of various Docker shell environments. Most of devstack's command documentation covers Make commands, so this seems like a good first step. It's difficult to capture arbitrary commands, and there are privacy issues there as well, so for now we're just capturing Make targets. 33 | 34 | Since devstack is used by both employees and contractors and also the wider Open edX community, it's not sufficient to simply start collecting and reporting metrics; there needs to be informed consent. This limits the amount of participation, even of edX employees. (We don't have a way of telling whether a developer is an employee.) 35 | 36 | Commands run while not connected to the internet, or by people who have Segment blocked at the DNS level, will not have metrics captured. 37 | 38 | 39 | Rejected Alternatives 40 | --------------------- 41 | 42 | The explicit indirection technique is bulky and makes it harder to maintain the Makefile. However, all of the other techniques we considered have serious downsides: 43 | 44 | - Adding a command call at the beginning of each target's block would add almost as much "chaff", and would not capture timings or exit codes, nor target dependency timings and failures. 45 | - Overriding the ``SHELL`` variable with a wrapper script would allow capturing some timings and exit codes, but only per-line, not per-target. Using ``.ONESHELL`` would solve this, except then the make target itself can't be captured without parsing the process tree—and this has only been tested on Linux, not Mac. 46 | - Asking people to call ``./make.sh`` instead of ``make`` would allow full capturing, but lose tab-completion, and it is likely very few people would change their workflows to accommodate this request. 47 | - Installing a package into the devstack virtualenv which declares a ``console_scripts`` override named ``make`` would allow intercepting make (and other) commands, but would only work for people who interact with devstack from a virtualenv, which turns out to be a minority of edX developers according to a poll. 48 | 49 | We'll continue looking for a low-profile way to instrument all targets, or change which targets we instrument over time. 50 | -------------------------------------------------------------------------------- /docs/decisions/0004-backends-depend-on-frontends.rst: -------------------------------------------------------------------------------- 1 | 4. Backend services now depend on frontend apps 2 | ----------------------------------------------- 3 | 4 | Status 5 | ====== 6 | 7 | **Reverted** due to resource depletion concerns. 8 | 9 | A consequence of implementing this decision was that an increased number of containers (specifically, frontend containers) were started by common commands like ``make dev.provision`` and ``make dev.up.lms``. Unfortunately, the increased system resource consumption was leading to blocking workflow disruptions such as Docker network timeouts. 10 | 11 | In absence of an immediately obvious way of reducing the additional resource burden that this decision's implementation requires, we have decided to revert it. Future work could include: 12 | 13 | * Revisit the *Rejected Alternatives* listed at the bottom of this decision record. Both of those alternatives allow smaller groups of containers to be started for different situtations. 14 | * Investigate how the memory and CPU footprints of the micro-frontend Docker containers could be reduced. 15 | * Investigate running all micro-frontends from a singular Docker container. 16 | 17 | Context 18 | ======= 19 | 20 | Micro-frontends as default experiences 21 | ************************************** 22 | 23 | As of mid June 2021 (between the Lilac and Maple releases), an Open edX instance with default configuration will now direct users to the Learning MFE (Micro-Frontend) for courseware, with a temporary opt-out flag existing to revert to legacy LMS-rendered frontend. Thus, to test a typical learner experience, Devstack users now require the frontend-app-learning container to be started alongside the LMS. This is in contrast to the previous state of affairs, in which MFE experiences were only available via an opt-IN flag, allowing reasonable Devstack usage without having to start any MFE containers. 24 | 25 | We anticipate that other learner, author, and administrator experiences will soon begin to use MFE features by default, requiring that more and more MFEs be started in order to simulate user experiences in Devstack. Thus, we anticipate an imminent developer experience issue, in which developers will need to type in convoluated commands like:: 26 | 27 | make dev.up.frontend-app-authn+frontend-app-discussions+frontend-app-gradebook+frontend-app-learning 28 | 29 | 30 | in order to enable the feature set that was previously available using simply:: 31 | 32 | make dev.up.lms 33 | 34 | 35 | Docker-compose service dependencies 36 | *********************************** 37 | 38 | Devstack uses docker-compose to orchestrate containers by defining services in ``docker-compose.yml``. Note that "services" here encompasses backends, frontends, and generic resources like MySQL. 39 | 40 | Each service definition may indicate a list of depentent services using the ``depends_on`` key. Dependencies are transitive, and may not be cyclical. When a developer runs ``make dev.up.``, docker-compose is invoked in order to start both the service as well as its dependencies. For example, LMS depends on Mongo and Discovery, among other services. So, running ``make dev.up.lms`` will start not just LMS, but also Mongo, Discovery, all of Discovery's dependencies, and so on. 41 | 42 | Currently, micro-frontend services (those prefixed with ``frontend-app-``) are defined to depend on backends, but not vice versa. So, starting frontend-app-learning will automatically start LMS, but starting LMS will not automatically start frontend-app-learning. This makes sense under logic that "frontends depend on the APIs of backends in order to function". 43 | 44 | However, it can be argued that the opposite dependency relationship also makes sense. That is, one may assert that backends should depend on frontends in order to expose their APIs in a usable way. One could further assert that frontends shouldn't have hard dependencies on backend APIs, and should instead gracefully degrade when some or all of its APIs are unavailable. 45 | 46 | 47 | Decision 48 | ======== 49 | 50 | Whichever dependency direction (frontends depend on backends, or vice versa) is more logically sound, we conclude that, for the purposes of Devstack, *asserting that backends depend on frontends is more useful to developers*. Specifically, it is beneficial to current and future developer workflows if ``make dev.up.lms`` automatically starts and learning-related frontends, ``make dev.up.cms`` automatically starts all authoring-related frontends, ``make dev.up.ecommerce`` starts all purchasing-related frontends, and so on. 51 | 52 | A necessary corollary to this decision is that *all micro-frontends required for default functionality must be included in devstack*. While it is encouraged that *all* new and existing micro-frontends are added to devstack using the pattern described above, it is absolutely necessary that MFEs which are required for out-of-the-box functionality be added to devstack. 53 | 54 | 55 | Consequences 56 | ============ 57 | 58 | * ``docker-compose.yml`` will be updated to reflect that backend services depend on frontend-app services, not the other way around. Devstack documentation will be upated accordingly. 59 | * ``docker-compose-host.yml`` will be updated to address an issue with local usage of JS packages, which currently forces some frontend development workflows to occur outside of devstack. The `documentation in frontend-build`_ will be updated accordingly. See `ADR 5`_ for details. 60 | * An email and Slack message will be sent out to explain these changes and how we anticipate that they will impact developer workflows. The email will explain that if a micro-frontend is required to simulate common user story in the default configuration, then that frontend should be devstack, and should be automatically started by the relevant backend using ``depends_on``. 61 | 62 | 63 | .. _documentation in frontend-build: https://github.com/openedx/frontend-build#local-module-configuration-for-webpack 64 | .. _ADR 5: ./0005-frontend-package-mounts.rst 65 | 66 | Rejected Alternatives 67 | ===================== 68 | 69 | * Keep the old dependency relationships, but add convenience targets (such as ``dev.up.domain.learning``) to start groups of related micro-frontends. We determine that this would increase the already-large congnitive overhead of the Devstack interface. 70 | * Invert dependency relationships as described in this ADR, and also add targets such as ``make dev.up.lms-backend`` in order to start LMS without associated frontends. We determine that this would create a cascade of new inconsistencies in the Devstack interface: since only one of ``lms`` or ``lms-backend`` could exist as a docker-compose service, rules for the other would have to be hard-coded into the Makefile as special cases. 71 | -------------------------------------------------------------------------------- /docs/getting_started.rst: -------------------------------------------------------------------------------- 1 | Getting Started 2 | --------------- 3 | 4 | Prerequisites 5 | ~~~~~~~~~~~~~ 6 | 7 | You will need to have the following installed: 8 | 9 | - make 10 | - Python 3.8 11 | - Docker, including ``docker compose`` 12 | 13 | This project requires **Docker 19.03+ CE**. We recommend Docker Stable, but 14 | Docker Edge should work as well. Ensure that your Docker installation includes 15 | ``docker compose``; on some operating systems (e.g. Ubuntu Linux) this may require 16 | a separate package. 17 | 18 | **NOTE:** Switching between Docker Stable and Docker Edge will remove all images and 19 | settings. Don't forget to restore your memory setting and be prepared to 20 | provision. 21 | 22 | For macOS users, please use `Docker for Mac`_, which comes with ``docker 23 | compose``. Previous Mac-based tools (e.g. boot2docker) are *not* supported. 24 | Please be aware that the `licensing terms`_ for Docker for Mac (aka Docker 25 | Desktop) may mean that it is no longer free for your organization's use. 26 | 27 | Since a Docker-based devstack runs many containers, 28 | you should configure Docker with a sufficient 29 | amount of resources. We find that `configuring Docker for Mac`_ 30 | with a minimum of **2 CPUs, 8GB of memory, and a disk image size of 96GB** 31 | does work. 32 | 33 | `Docker for Windows`_ may work but has not been tested and is *not* supported. 34 | 35 | If you are using Linux, developers on Ubuntu (and Debian) should ensure 36 | they've uninstalled docker.io and docker-compose from the main Ubuntu 37 | repositories and instead install docker-ce and docker-compose-plugin from the 38 | official Docker package repository: 39 | https://docs.docker.com/engine/install/ubuntu/. Also they should use the 40 | ``overlay2`` storage driver, kernel version 4.0+ and *not* ``overlay``. To 41 | check which storage driver your ``docker-daemon`` uses, run the following 42 | command. 43 | 44 | .. code:: sh 45 | 46 | docker info | grep -i 'storage driver' 47 | 48 | .. _Docker for Mac: https://docs.docker.com/desktop/install/mac-install/ 49 | .. _licensing terms: https://www.docker.com/pricing/faq 50 | .. _configuring Docker for Mac: https://docs.docker.com/desktop/settings/mac/#advanced 51 | .. _Docker for Windows: https://docs.docker.com/desktop/install/windows-install/ 52 | 53 | Please note 54 | ~~~~~~~~~~~ 55 | 56 | You should run all ``make`` commands described below on your local machinge, *not* 57 | from within a Virtual Machine, as these commands are meant to stand up a VM-like environment using 58 | Docker containers. 59 | 60 | Directions to setup devstack 61 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 | 63 | The default devstack services can be run by following the steps below. 64 | 65 | **Note:** This will set up a large number of services, more than you are likely to need to work with, but that's only necessary for first-time provisioning. See :doc:`service_list` and the :doc:`most common development workflow ` for how to run and update devstack with just the services you need, rather than the ``large-and-slow`` default set. 66 | 67 | #. The Docker Compose file mounts a host volume for each service's executing 68 | code. The host directory defaults to be a sibling of this directory. For 69 | example, if this repo is cloned to ``~/workspace/devstack``, host volumes 70 | will be expected in ``~/workspace/course-discovery``, 71 | ``~/workspace/ecommerce``, etc. These repos can be cloned with the command 72 | below. 73 | 74 | .. code:: sh 75 | 76 | make dev.clone # or, `make dev.clone.https` if you don't have SSH keys set up. 77 | 78 | You may customize where the local repositories are found by setting the 79 | ``DEVSTACK_WORKSPACE`` environment variable. 80 | 81 | (macOS only) Share the cloned service directories in Docker, using 82 | **Docker -> Preferences -> File Sharing** in the Docker menu. 83 | 84 | #. Pull any changes made to the various images on which the devstack depends. 85 | 86 | .. code:: sh 87 | 88 | make dev.pull.large-and-slow 89 | 90 | Note - 91 | If you are setting up devstack to develop on Open edx named releases, see this `document on developing on named releases`_ before following this step 3. 92 | 93 | .. _document on developing on named releases: https://edx.readthedocs.io/projects/open-edx-devstack/en/latest/developing_on_named_release_branches.html 94 | 95 | #. Run the provision command, if you haven't already, to configure the various 96 | services with superusers (for development without the auth service) and 97 | tenants (for multi-tenancy). 98 | 99 | **NOTE:** When running the provision command, databases for ecommerce and edxapp 100 | will be dropped and recreated. 101 | 102 | The username for the superuser is ``edx@example.com`` and the password is ``edx``. You can access 103 | the services directly via Django admin at the ``/admin/`` path, or login via 104 | single sign-on at ``/login/``. 105 | 106 | Default: 107 | 108 | .. code:: sh 109 | 110 | make dev.provision 111 | 112 | This is expected to take a while, produce a lot of output from a bunch of steps, and finally end with ``Provisioning complete!`` 113 | 114 | 115 | #. Start the desired services. This command will mount the repositories under the 116 | ``DEVSTACK_WORKSPACE`` directory. 117 | 118 | **NOTE:** it may take up to 60 seconds for the LMS to start, even after the ``dev.up.*`` command outputs ``done``. 119 | 120 | Default: 121 | 122 | .. code:: sh 123 | 124 | make dev.up.large-and-slow 125 | 126 | To stop a service, use ``make dev.stop.``, and to both stop it 127 | and remove the container (along with any changes you have made 128 | to the filesystem in the container) use ``make dev.remove-containers.``. 129 | 130 | After the services have started, if you need shell access to one of the 131 | services, run ``make dev.shell.``. For example to access the 132 | Catalog/Course Discovery Service, you can run: 133 | 134 | .. code:: sh 135 | 136 | make dev.shell.discovery 137 | 138 | To see logs from containers running in detached mode, you can either use 139 | "Kitematic" (available from the "Docker for Mac" menu), or by running the 140 | following: 141 | 142 | .. code:: sh 143 | 144 | make dev.logs 145 | 146 | To view the logs of a specific service container run ``make dev.logs.``. 147 | For example, to access the logs for Ecommerce, you can run: 148 | 149 | .. code:: sh 150 | 151 | make dev.logs.ecommerce 152 | 153 | For information on the supported ``make`` commands, you can run: 154 | 155 | .. code:: sh 156 | 157 | make help 158 | 159 | Devstack collects some basic usage metrics to help gain a better understanding of how devstack is used and to surface any potential issues on local devstack environments. To learn more, read `0003-usage-metrics.rst ADR <./docs/decisions/0003-usage-metrics.rst>`_. 160 | 161 | This data collection is behind a consent flag, so please help devstack's maintainers by enabling metrics collection by running the following: 162 | 163 | .. code:: sh 164 | 165 | make metrics-opt-in 166 | 167 | Now that you're up and running, read about the :doc:`most common development workflow `. 168 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Open edX Devstack 2 | ################# 3 | 4 | Devstack is a local Docker-based environment for developing in the Open edX platform. 5 | 6 | The Devstack runs as multiple containers with `Docker Compose`_ at its core. 7 | 8 | .. _Docker Compose: https://docs.docker.com/compose/ 9 | 10 | Contents 11 | ******** 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | getting_started 17 | logging_in 18 | workflow 19 | service_list 20 | devstack_interface 21 | devstack_faq 22 | building-images 23 | database-dumps 24 | developing_on_named_release_branches 25 | pycharm_integration 26 | testing_and_debugging 27 | troubleshoot_general_tips 28 | manual_upgrades 29 | advanced_configuration 30 | -------------------------------------------------------------------------------- /docs/logging_in.rst: -------------------------------------------------------------------------------- 1 | Usernames and Passwords 2 | ----------------------- 3 | 4 | The provisioning script creates a Django superuser for every service. 5 | 6 | :: 7 | 8 | Email: edx@example.com 9 | Username: edx 10 | Password: edx 11 | 12 | The LMS also includes demo accounts. The passwords for each of these accounts 13 | is ``edx``. 14 | 15 | .. list-table:: 16 | :widths: 20 60 17 | :header-rows: 1 18 | 19 | * - Account 20 | - Description 21 | * - ``staff@example.com`` 22 | - An LMS and Studio user with course creation and editing permissions. 23 | This user is a course team member with the Admin role, which gives 24 | rights to work with the demonstration course in Studio, the LMS, and 25 | Insights. 26 | * - ``verified@example.com`` 27 | - A student account that you can use to access the LMS for testing 28 | verified certificates. 29 | * - ``audit@example.com`` 30 | - A student account that you can use to access the LMS for testing course 31 | auditing. 32 | * - ``honor@example.com`` 33 | - A student account that you can use to access the LMS for testing honor 34 | code certificates. 35 | -------------------------------------------------------------------------------- /docs/manual_upgrades.rst: -------------------------------------------------------------------------------- 1 | Manual upgrade instructions 2 | ########################### 3 | 4 | Occasionally there is a change to devstack that requires existing devstack installations to be manually upgraded. When this happens, instructions should be added here. 5 | 6 | Please add new instructions to the top, include a date, and make a post in the `Devstack forum `_. 7 | 8 | (If you just need to update your devstack to the latest version of everything, see :doc:`updating_devstack`.) 9 | 10 | 11 | 2024-02-25 - Mongo upgrade from version 4.4 to 5.0 12 | ************************************************** 13 | 14 | As mongo 4.4 is reaching EOL, we have upgraded mongo version 4.4 to mongo version 5.0. Developers will need to follow the following instructions. 15 | 16 | 1. Take latest ``git pull`` of ``devstack`` 17 | 18 | 2. Take the latest pull of images :: 19 | 20 | make dev.pull 21 | 22 | 3. Run mongo5 upgrade script, already added to devstack repo :: 23 | 24 | ./upgrade_mongo_5_0.sh 25 | 26 | 2023-10-05 - MySQL upgrade from version 5.7 to 8.0 27 | ************************************************** 28 | 29 | The MySQL service has been upgraded from version 5.7 to 8.0. Developers will need to follow the following instructions. 30 | 31 | 1. Take latest ``git pull`` of ``devstack`` and ``edx-platform``. 32 | 33 | 2. Take the latest pull of images :: 34 | 35 | make dev.pull 36 | 37 | 3. Run provisioning command :: 38 | 39 | make dev.provision 40 | 41 | 4. [Optional] Additionally, there is a database copy command to help you transfer data from MySQL 5.7 to 8.0. After provisioning use the ``dev.dbcopyall8`` command. This command will stop all of your services, clean your ``mysql80`` container, and copy all of your databases from ``mysql57`` to ``mysql80``. :: 42 | 43 | make dev.dbcopyall8 44 | 45 | This command copies the following databases: 46 | 47 | - credentials 48 | - discovery 49 | - ecommerce 50 | - registrar 51 | - notes 52 | - edxapp 53 | - xqueue 54 | - edxapp_csmh 55 | - dashboard 56 | - analytics-api 57 | - reports 58 | - reports_v1 59 | 60 | If you prefer not to copy all databases, update ``DB_NAMES_LIST`` in the ``Makefile`` of devstack before running the dbcopy command. 61 | 62 | 5. Now start your desired services again using ``dev.up`` command. For example running following command will start ``lms``, ``cms`` :: 63 | 64 | make dev.up.lms+cms 65 | 66 | 6. You might need to apply latest migrations to your ``mysql80`` container for some services. To do that, you can use ``dev.migrate`` command. For example for ``lms`` you can run :: 67 | 68 | make dev.migrate.lms 69 | 70 | 71 | 2023-08-02 - Forum upgrade from Ruby 2 to 3 72 | ******************************************* 73 | 74 | The forum service has been upgraded from Ruby 2 to Ruby 3. Developers who use forum will need to pull the new image and reprovision the service:: 75 | 76 | make dev.pull.forum # pull in new forum image 77 | make dev.provision.forum # provision forum service 78 | -------------------------------------------------------------------------------- /docs/testing_and_debugging.rst: -------------------------------------------------------------------------------- 1 | Testing and Debugging 2 | ===================== 3 | 4 | .. contents:: Table of Contents 5 | 6 | Debugging using PDB 7 | ------------------- 8 | 9 | It's possible to debug any of the containers' Python services using PDB. To do so, 10 | start up the containers as usual with: 11 | 12 | .. code:: sh 13 | 14 | make dev.up 15 | 16 | This command starts each relevant container with the equivalent of the '--it' option, 17 | allowing a developer to attach to the process once the process is up and running. 18 | 19 | To attach to a container and its process, use ``make dev.attach.``. For example: 20 | 21 | .. code:: sh 22 | 23 | make dev.attach.lms 24 | 25 | Set a PDB breakpoint anywhere in the code using one of the following: 26 | 27 | .. code:: sh 28 | 29 | breakpoint() # Works in Python >= 3.7 30 | import pdb;pdb.set_trace() # Works in any version of Python 31 | 32 | and your attached session will offer an interactive PDB prompt when the breakpoint is hit. 33 | 34 | You may be able to detach from the container with the ``Ctrl-P, Ctrl-Q`` key sequence. 35 | If that doesn't work, you will have either close your terminal window or 36 | stop the service with: 37 | 38 | .. code:: sh 39 | 40 | make dev.stop. 41 | 42 | You can bring that same service back up with: 43 | 44 | .. code:: sh 45 | 46 | make dev.up. 47 | 48 | Running LMS and CMS Tests 49 | ---------------------------- 50 | 51 | After entering a shell for the appropriate service via ``make lms-shell`` or 52 | ``make cms-shell``, you can run any of the usual paver commands from the 53 | `edx-platform testing documentation`_. Examples: 54 | 55 | .. code:: sh 56 | 57 | paver run_quality 58 | paver test_a11y 59 | paver test_bokchoy 60 | paver test_js 61 | paver test_lib 62 | paver test_python 63 | 64 | Tests can also be run individually. Example: 65 | 66 | .. code:: sh 67 | 68 | pytest openedx/core/djangoapps/user_api 69 | 70 | Tests can also be easily run with a shortcut from the host machine, 71 | so that you maintain your command history: 72 | 73 | .. code:: sh 74 | 75 | ./in lms pytest openedx/core/djangoapps/user_api 76 | 77 | 78 | .. _edx-platform testing documentation: https://docs.openedx.org/projects/edx-platform/en/latest/concepts/testing/testing.html#running-python-unit-tests 79 | -------------------------------------------------------------------------------- /docs/workflow.rst: -------------------------------------------------------------------------------- 1 | Workflow 2 | ======== 3 | 4 | Here's a common workflow you might use in devstack for feature development or debugging in an IDA. 5 | 6 | These instructions are written using the LMS as an example. Replace ``lms`` with ``cms``, ``credentials``, ``discovery``, etc. as appropriate. 7 | 8 | #. Get your IDA's repo ready for development. 9 | 10 | - You'll be developing from a git repo that is checked out to the same parent directory as the one devstack is in. For example, if you have ``~/edx-repos/devstack``, you'll be developing the LMS in ``~/edx-repos/edx-platform``. 11 | 12 | - Make sure your IDA's repo is checked out to the commit you want to use for development, and that that commit is based on an up to date branch, so that it matches the disk images devstack will pull. 13 | 14 | #. Launch your service in a clean state: 15 | 16 | #. Run ``make dev.remove-containers dev.pull.lms dev.up.lms`` to halt any running services and remove their containers, pull the latest disk images, and launch your service. 17 | #. Optionally, watch ``make dev.logs.lms`` to follow the logs. This lets you see when the service finishes coming up, and prints the port it is listening on. 18 | 19 | - Your service is up when you see a block of messages that looks like the following:: 20 | 21 | edx.devstack.lms | System check identified no issues (0 silenced). 22 | edx.devstack.lms | November 25, 2020 - 19:04:18 23 | edx.devstack.lms | Django version 2.2.17, using settings 'lms.envs.devstack_docker' 24 | edx.devstack.lms | Starting development server at http://0.0.0.0:18000/ 25 | edx.devstack.lms | Quit the server with CONTROL-C. 26 | 27 | - If the logs show warning messages about missing tables or needed migrations, run ``make dev.migrate.lms`` and then continue 28 | 29 | - If there are complaints about import failures, Python package requirements may have changed since the last disk image. Run ``make lms-shell`` and then ``make requirements`` from inside the shell, then restart the service with ``make dev.restart-devserver.lms``. 30 | 31 | #. Your service should now be up and accessible, and you can develop in your IDA's repo. When you make changes on disk, a file watcher will restart the service in devstack. It may take a moment for the service to come back up with your changes. 32 | 33 | - For some changes, this auto-restarting is insufficient, and you'll need to make a change from inside ``make lms-shell`` (such as ``make requirements`` or a migrations or other management command) and then run ``make dev.restart-devserver.lms`` from the outside. Running ``make dev.restart-devserver.lms`` may also fix issues if the runserver command is not restarting automatically after code changes. 34 | 35 | #. When you're done, you can run ``make dev.stop.lms`` to shut down the service but leave the container intact (with requirements installations and other file changes preserved). 36 | 37 | Variations 38 | ---------- 39 | 40 | Multiple services 41 | ~~~~~~~~~~~~~~~~~ 42 | 43 | If you're working on multiple services at a time, you can use Make targets of a different form that take a list of services. For example, if you want to pull images for ``lms``, ``cms``, and ``credentials``, you can run ``make dev.pull.lms+cms+credentials``. This will pull down images for the three services, as well as for all of their runtime dependencies. 44 | 45 | You can also use the more tab-completion-friendly commands separately: ``make lms-pull cms-pull credentials-pull``. 46 | 47 | Time-savers 48 | ~~~~~~~~~~~ 49 | 50 | If you want to pull down just the images for one service but not its dependencies, there is a ``without-deps`` variant for both pulling images and for bringing a service up, and for both service-leading and service-trailing Make target variants. For example, ``dev.up.without-deps.lms`` and ``lms-up-without-deps`` may both be used, where the former is more amenable to use with multiple services at the same time. 51 | 52 | Database backups 53 | ~~~~~~~~~~~~~~~~ 54 | 55 | You can routinely create backups of your local databases. To create a backup, use ``make dev.backup``. When you want to restore you database to the backup, run ``make dev.restore``. Warning, this will restore all your databases. You might have to cycle the database containers off and on using ``make dev.remove-containers.`` and ``make dev.up.``. 56 | 57 | Comprehensive backup 58 | ~~~~~~~~~~~~~~~~~~~~ 59 | 60 | You can also back up and restore *all* devstack-related volumes -- not just databases, but also node_modules and static assets volumes. (These commands currently only work on Linux.) 61 | 62 | - Back up: ``make stop && sudo rsync -savx --numeric-ids --include='/devstack_***' --exclude='*' --delete /var/lib/docker/volumes/ .dev/backups/2023-07-18/`` 63 | - Restore: ``make stop && sudo rsync -savx --numeric-ids --include='/devstack_***' --exclude='*' --delete .dev/backups/2023-07-18/ /var/lib/docker/volumes/`` 64 | 65 | The above example creates and restores from a backup directory named ``2023-07-18`` and assumes that you're working from the master branch; if you're working from a named release or have explicitly specified an alternative ``COMPOSE_PROJECT_NAME``, you'll need to adjust the ``--include`` parameter. 66 | 67 | Containers should be stopped before the backup or restore is performed, or databases are very likely to become corrupted. 68 | 69 | Running micro-frontends outside of devstack 70 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | 72 | Although several micro-frontends (MFEs) are built into devstack (the full list is in :doc:`service_list`), some users prefer to run those MFEs directly on their host machine. You can achieve this by first removing the devstack MFE container, and then starting the host version. For example:: 73 | 74 | make dev.down.frontend-app-learning # Bring down the devstack version of the Learning MFE. 75 | cd # Navigate to the Learning MFE's repository. 76 | npm ci && npm start # Install JS packages, and start the NPM devserver on your local host. 77 | 78 | Of course ``learning`` can be replaced with ``gradebook``, ``payment``, or another frontend-app name. 79 | 80 | If you forget to bring down the devstack version of the MFE, you will notice a port conflict when trying to start the host version. 81 | -------------------------------------------------------------------------------- /enterprise/provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker manage_user enterprise_worker enterprise_worker@example.com --staff' 6 | cat enterprise/worker_permissions.py | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms shell --settings=devstack_docker' 7 | 8 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker create_dot_application --grant-type client-credentials --client-id "enterprise-backend-service-key" --client-secret "enterprise-backend-service-secret" enterprise-backend-service enterprise_worker' 9 | -------------------------------------------------------------------------------- /enterprise/worker_permissions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Run like so: 3 | # ./manage.py lms shell -c "`cat worker_permissions.py`" 4 | 5 | from django.contrib.auth import get_user_model 6 | from django.contrib.auth.models import Permission 7 | 8 | 9 | User = get_user_model() 10 | enterprise_worker = User.objects.get(username='enterprise_worker') 11 | 12 | enterprise_model_permissions = list(Permission.objects.filter(content_type__app_label='enterprise')) 13 | 14 | enterprise_worker.user_permissions.add(*enterprise_model_permissions) 15 | enterprise_worker.save() 16 | -------------------------------------------------------------------------------- /in: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Run a command in a service's container. 3 | # Try to bring up the service if it isn't already up. 4 | # Example: 5 | # ~/devstack> ./in registrar ls /edx/app/ 6 | # edx_ansible nginx registrar supervisor 7 | # Example 2: 8 | # ~/devstack> ./in registrar "cd /edx/app/registrar && ls" 9 | # data registrar registrar.sh venvs 10 | # devstack.sh registrar_env registrar-workers.sh 11 | # nodeenvs registrar_gunicorn.py staticfiles 12 | 13 | set -e 14 | set -o pipefail 15 | set -u 16 | 17 | service="$1" 18 | shift 19 | 20 | container_id=$(make --silent --no-print-directory dev.print-container."$service") 21 | if [[ -z "$container_id" ]]; then 22 | make --silent --no-print-directory dev.up."$service" 23 | container_id=$(make --silent --no-print-directory dev.print-container."$service") 24 | fi 25 | 26 | docker exec -it "$container_id" bash -c "$*" 27 | -------------------------------------------------------------------------------- /load-db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Load the specified database from a file of the same name. 4 | # 5 | # Example: 6 | # $ load-db edxapp 7 | # 8 | # This will load the edxapp database from a file named exapp.sql. 9 | 10 | set -eu -o pipefail 11 | 12 | if [ -z "$1" ] 13 | then 14 | echo "You must supply a database name!" 15 | exit 1 16 | fi 17 | 18 | echo "Loading the $1 database..." 19 | mysql_container=$(make --silent --no-print-directory dev.print-container.mysql80) 20 | docker exec -i "$mysql_container" mysql -uroot $1 < $1.sql 21 | echo "Finished loading the $1 database!" 22 | -------------------------------------------------------------------------------- /microfrontend.yml: -------------------------------------------------------------------------------- 1 | # This file contains configuration common to all microfrontends 2 | 3 | version: "2.1" 4 | 5 | services: 6 | microfrontend: 7 | # Use `npm ci` rather than `npm install` for a few reasons: 8 | # 9 | # - Repeatability: Respect the currently checked out package 10 | # versions rather than upgrading when package.json and 11 | # package-lock.json don't match. (Two people using this at 12 | # different times on the same commit should get the same 13 | # results.) 14 | # - Immutability: Don't change the repo's working directory 15 | # unexpectedly when there's a lock mismatch. 16 | # 17 | # Fail fast if package install fails to avoid mysterious 18 | # errors later. 19 | command: bash -c 'npm ci || exit 1; while true; do npm start; sleep 2; done' 20 | stdin_open: true 21 | tty: true 22 | image: node:16 23 | environment: 24 | - NODE_ENV=development 25 | -------------------------------------------------------------------------------- /mongo-provision.js: -------------------------------------------------------------------------------- 1 | conn = new Mongo(); 2 | 3 | 4 | users = [ 5 | { 6 | 'user': 'admin', 7 | 'pwd': 'password', 8 | 'roles': ['root'], 9 | 'database': 'admin' 10 | }, 11 | { 12 | 'user': 'cs_comments_service', 13 | 'pwd': 'password', 14 | 'roles': ['readWrite'], 15 | 'database': 'cs_comments_service' 16 | }, 17 | { 18 | 'user': 'edxapp', 19 | 'pwd': 'password', 20 | 'roles': ['readWrite'], 21 | 'database': 'edxapp' 22 | } 23 | ]; 24 | 25 | for (var i = 0; i < users.length; i++) { 26 | var user = users[i]; 27 | var username = user.user; 28 | var db = conn.getDB(user.database); 29 | delete user.database; 30 | 31 | if (db.getUser(username) == null) { 32 | db.createUser(user); 33 | } else { 34 | delete user.user; 35 | db.updateUser(username, user); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /openedx.yaml: -------------------------------------------------------------------------------- 1 | # This file describes this Open edX repo, as described in OEP-2: 2 | # http://open-edx-proposals.readthedocs.io/en/latest/oep-0002-bp-repo-metadata.html#specification 3 | 4 | nick: dev 5 | oeps: 6 | oep-2: true 7 | oep-7: true # Python 3 8 | oep-5: true 9 | oep-10: true 10 | openedx-release: {ref: master} 11 | owner: jmbowman 12 | supporting-teams: 13 | - platform-core 14 | -------------------------------------------------------------------------------- /options.mk: -------------------------------------------------------------------------------- 1 | # DEFAULT DEVSTACK OPTIONS 2 | # Included into Makefile and exported to command environment. 3 | # Defaults are listed in this file. 4 | # Local git-ignored overrides can be configured by creating `options.local.mk`. 5 | 6 | # WHEN ADDING NEW OPTIONS TO THIS FILE: 7 | # 1. Provide an explanation of what the option is for. 8 | # 2. Explain what values it can be overriden to. 9 | # 3. Set the default value with `?=` (i.e., "set if not already set") such that values 10 | # set in `options.local.mk` or passed in via the environment are not clobbered. 11 | 12 | # Include local overrides to options. 13 | # You can use this file to configure your Devstack. It is ignored by git. 14 | -include options.local.mk # Prefix with hyphen to tolerate absence of file. 15 | 16 | # Folder in which we looks for repositories. 17 | # Defaults to parent of this repository. 18 | DEVSTACK_WORKSPACE ?= $(shell pwd)/.. 19 | 20 | # Open edX named release branch (omitting open-release/ prefix). 21 | # For example, `hawthorn.master` or `zebrawood.rc1`. 22 | # By deafult, this value is undefined (it's only listed here as documentation). 23 | # If it is defined in options.local.mk or the environment, then Devstack will try 24 | # to use the Docker images and Git repo branches that correspond to that release. 25 | # If the release does not exist, you will see errors. 26 | # OPENEDX_RELEASE ?= 27 | 28 | # Name of Docker Compose project. 29 | # Volumes and network are namespaced based on this value, 30 | # so changing it will give you a separate set of databases. 31 | # See https://docs.docker.com/compose/reference/envvars/#compose_project_name 32 | # If OPENEDX_RELAESE is defined, defaults to `devstack-${OPENEDX_RELEASE}`; 33 | # otherwise, it defaults to `devstack`. Any periods will be replaced with hyphens to comply with docker project naming rules (eg `devstack-quince.master` will become `devstack-quince-master`). 34 | # Be sure to bring down services before changing the value of `COMPOSE_PROJECT_NAME`. 35 | ifdef OPENEDX_RELEASE 36 | COMPOSE_PROJECT_NAME ?= devstack-$(echo ${OPENEDX_RELEASE} | tr . -) 37 | else 38 | COMPOSE_PROJECT_NAME ?= devstack 39 | endif 40 | 41 | # Docker Compse HTTP timeout, in seconds. 42 | # By default, increased so that devstack provisioning does not fail in unstable networks. 43 | COMPOSE_HTTP_TIMEOUT ?= 180 44 | 45 | # Whether we should always copy programs to LMS cache upon LMS startup. 46 | # If 'true', then run `make dev.cache-programs` whenever we bring up 47 | # containers. 48 | # Defaults to false. Case-sensitive. 49 | ALWAYS_CACHE_PROGRAMS ?= false 50 | 51 | # Services that are to be pulled, provisioned, run, and checked by default 52 | # when no services are specified manually. 53 | # Should be a subset of $(EDX_SERVICES). 54 | # Separated by plus signs. Listed in alphabetical order for clarity. 55 | # WARNING: You may remove services from this list in order to make Devstack lighter, 56 | # but beware that some services have implicit, undocumented dependencies on 57 | # other ones. For example, Discovery depends on both LMS and Ecommerce being 58 | # provisioned and started in order to provision correctly. 59 | # Tread at your own risk. 60 | # TODO: Re-evaluate this list and consider paring it down to a tighter core. 61 | # The current value was chosen such that it would not change the existing 62 | # Devstack behavior. 63 | DEFAULT_SERVICES ?= \ 64 | credentials+discovery+ecommerce+edx_notes_api+forum+frontend-app-authn+frontend-app-gradebook+frontend-app-payment+frontend-app-publisher+frontend-app-learning+lms+cms 65 | 66 | # All edX services, whether or not they are run by default. 67 | # Separated by plus signs. 68 | # Separated by plus signs. Listed in alphabetical order for clarity. 69 | EDX_SERVICES ?= \ 70 | analyticsapi+credentials+discovery+ecommerce+edx_notes_api+forum+frontend-app-account+frontend-app-learner-dashboard+frontend-app-learner-record+frontend-app-profile+frontend-app-authn+frontend-app-course-authoring+frontend-app-gradebook+frontend-app-ora-grading+frontend-app-learning+frontend-app-library-authoring+frontend-app-payment+frontend-app-program-console+frontend-app-publisher+insights+lms+lms_watcher+registrar+registrar-worker+cms+cms_watcher+xqueue+xqueue_consumer 71 | 72 | # Services with database migrations. 73 | # Should be a subset of $(EDX_SERVICES). 74 | # Separated by plus signs. Listed in alphabetical order for clarity. 75 | # Services must provide a Makefile target named: $(service)-update-db 76 | # Note: This list should contain _all_ db-backed services, even if not 77 | # configured to run; the list will be filtered later against $(DEFAULT_SERVICES). 78 | DB_SERVICES ?= \ 79 | credentials+discovery+ecommerce+lms+registrar+cms 80 | 81 | # Services with static assets to be built. 82 | # Should be a subset of $(EDX_SERVICES). 83 | # Services must provide a Makefile target named: dev.migrate.$(service) 84 | # Separated by plus signs. Listed in alphabetical order for clarity. 85 | # Note: This list should contain _all_ services with static asse to compile ts, even if not 86 | # configured to run; the list will be filtered later against $(DEFAULT_SERVICES). 87 | ASSET_SERVICES ?= \ 88 | credentials+discovery+ecommerce+insights+lms+registrar+cms 89 | 90 | # All third-party services. 91 | # Separated by plus signs. Listed in alphabetical order for clarity. 92 | THIRD_PARTY_SERVICES ?= \ 93 | chrome+coursegraph+elasticsearch710+firefox+memcached+mongo+mysql57+mysql80+opensearch12+redis+namenode+datanode+resourcemanager+nodemanager+sparkmaster+sparkworker+vertica 94 | -------------------------------------------------------------------------------- /programs/README.md: -------------------------------------------------------------------------------- 1 | # Support for Provisioning Programs 2 | 3 | This directory holds a few scripts that set up a demo program. 4 | 5 | Currently, the demo program is very simple, just one demo course and no purchase-bundling support enabled. 6 | 7 | Normally you don't need to manually run these scripts to provision the demo course, as it is automatically added when provisioning fresh devstacks. 8 | 9 | ## Reprovisioning 10 | 11 | If you have an existing older devstack installation and want to add the demo program to it, simply run: 12 | 13 | ./programs/provision.sh 14 | 15 | And it will set it up for you. This can be run multiple times safely. 16 | 17 | ## Recaching 18 | 19 | If you have an existing devstack with the demo program, but want to recache the programs inside LMS (something you need to do every time you bring the LMS container back up), simply run: 20 | 21 | make dev.cache-programs 22 | 23 | To do this while launching a service, run: 24 | 25 | make dev.up.with-programs. 26 | 27 | To make this the default behavior for `dev.up.*`, add the following to `options.local.mk`, creating the file if it does not yet exist: 28 | 29 | ALWAYS_CACHE_PROGRAMS=true 30 | -------------------------------------------------------------------------------- /programs/discovery.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Run like so: 3 | # ./manage.py shell -c "`cat discovery.py`" 4 | 5 | import urllib.request as request 6 | from course_discovery.apps.core.models import Partner 7 | from course_discovery.apps.course_metadata.models import ( 8 | Course, CourseRun, Organization, Program, ProgramType, SeatType 9 | ) 10 | 11 | DEMO_IMAGE_URL = 'http://edx.devstack.lms:18000/asset-v1:edX+DemoX+Demo_Course+type@asset+block@images_course_image.jpg' 12 | 13 | 14 | # Make sure micromasters type exists 15 | micromasters, _ = ProgramType.objects.get_or_create(slug='micromasters', defaults={'name': 'MicroMasters'}) 16 | micromasters.applicable_seat_types.add( 17 | SeatType.objects.get_or_create(slug='verified', defaults={'name': 'Verified'})[0], 18 | SeatType.objects.get_or_create(slug='professional', defaults={'name': 'Professional'})[0], 19 | SeatType.objects.get_or_create(slug='credit', defaults={'name': 'Credit'})[0], 20 | ) 21 | 22 | # Add a demo program 23 | edx_partner = Partner.objects.get(short_code='edx') # created during normal provision 24 | program, _ = Program.objects.update_or_create( 25 | marketing_slug='demo-program', 26 | defaults={ 27 | 'title': 'edX Demonstration Program', 28 | 'type': micromasters, 29 | 'status': 'active', 30 | 'partner': edx_partner, 31 | 'overview': 'A demo program for testing.', 32 | 'total_hours_of_effort': 4, 33 | 'min_hours_effort_per_week': 1, 34 | 'max_hours_effort_per_week': 4, 35 | 'one_click_purchase_enabled': True, 36 | 'card_image_url': DEMO_IMAGE_URL, 37 | }, 38 | ) 39 | 40 | # Now, after an ID has been created, connect the program to other models 41 | 42 | course = Course.objects.get(key='edX+DemoX') 43 | program.courses.set([course]) 44 | 45 | try: 46 | # This run causes run-time exceptions, because it uses old style key. 47 | deprecated_run = CourseRun.objects.get(key='edX/DemoX/Demo_Course') 48 | program.excluded_course_runs = [deprecated_run] 49 | except CourseRun.DoesNotExist: 50 | # This key only seems to be in some existing devstacks, don't worry if it doesn't exist 51 | pass 52 | 53 | edx_org = Organization.objects.get(key='edX') 54 | program.authoring_organizations.set([edx_org]) 55 | program.credit_backing_organizations.set([edx_org]) 56 | 57 | # And set up the banner image 58 | if not program.banner_image.name: 59 | program.banner_image.save('banner.jpg', request.urlopen(DEMO_IMAGE_URL)) 60 | 61 | program.save() 62 | -------------------------------------------------------------------------------- /programs/lms.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Run like so: 3 | # ./manage.py lms shell -c "`cat lms.py`" 4 | 5 | from django.contrib.sites.models import Site 6 | from openedx.core.djangoapps.catalog.models import CatalogIntegration 7 | from openedx.core.djangoapps.programs.models import ProgramsApiConfig 8 | from openedx.core.djangoapps.site_configuration.models import SiteConfiguration 9 | 10 | DISCOVERY_API_URL = 'http://edx.devstack.discovery:18381/api/v1/' 11 | 12 | 13 | def set_current_config(cls, args): 14 | if not cls.equal_to_current(args): 15 | config = cls(**args) 16 | config.save() 17 | 18 | 19 | # Enable the program dashboard 20 | set_current_config(ProgramsApiConfig, {'enabled': True}) 21 | 22 | # Enable the discovery worker 23 | set_current_config(CatalogIntegration, { 24 | 'enabled': True, 25 | 'internal_api_url': 'https://example.com/api', # required but unused 26 | 'service_username': 'discovery_worker', 27 | }) 28 | 29 | # Tell LMS about discovery 30 | SiteConfiguration.objects.update_or_create( 31 | site=Site.objects.get(domain='example.com'), 32 | defaults={ 33 | 'enabled': True, 34 | 'site_values': {'COURSE_CATALOG_API_URL': DISCOVERY_API_URL}, 35 | }, 36 | ) 37 | -------------------------------------------------------------------------------- /programs/provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | # 6 | # To add programs support, we need to tweak/add certain rows in the database. 7 | # We want to go through Django for this (rather than direct db modification), since we have a lot of Python 8 | # side-effect code and validation in our models. 9 | # 10 | # We *could* create several tiny management commands. But this use case is very special cased. Instead, we just 11 | # have the scripts here and pass them into Django's management shell. 12 | # 13 | # But to get the commands through Docker and Django intact, we have to do some creative quoting. 14 | 15 | # Run this command with no arguments to completely provision an existing devstack. 16 | # This can be run multiple times in a row with no ill effect (it's idempotent). 17 | 18 | BASEDIR=$(dirname "$0") 19 | 20 | # Main items are green, rest is dull grey since they are noisy, but we still might want to see their output, 21 | # for error cases and the like. 22 | notice() { 23 | SWITCH='\033[' 24 | GREY="${SWITCH}1;90m" 25 | GREEN="${SWITCH}0;32m" 26 | echo -e "${GREEN}${@}${GREY}" 27 | } 28 | 29 | # We reset color once we're done with the script. 30 | # If we wanted to be really fancy, we'd trap signals and reset there too. 31 | reset_color() { 32 | SWITCH="\033[" 33 | NORMAL="${SWITCH}0m" 34 | echo -e -n "${NORMAL}" 35 | } 36 | 37 | docker_exec() { 38 | service=${1} 39 | cmd=${2} 40 | app=${3:-$service} 41 | repo=${4:-$app} 42 | 43 | CMDS=" 44 | source /edx/app/$app/${app}_env && 45 | /edx/app/$app/$repo/manage.py $cmd 46 | " 47 | 48 | docker compose exec -T "$service" bash -e -c "$CMDS" 49 | } 50 | 51 | provision_ida() { 52 | service=$1 53 | cmd=$2 54 | shift 2 55 | 56 | # Escape double quotes and backticks from the Python 57 | PROGRAM_SCRIPT="$(sed -E 's/("|`)/\\\1/g' < "$BASEDIR/$service.py")" 58 | 59 | cmd="$cmd -c \"$PROGRAM_SCRIPT\"" 60 | 61 | docker_exec "$service" "$cmd" "$@" 62 | } 63 | 64 | trap reset_color 1 2 3 6 15 65 | 66 | if [ "$1" = "lms" -o -z "$1" ]; then 67 | notice Adding program support to LMS... 68 | provision_ida lms "lms shell" edxapp edx-platform 69 | fi 70 | 71 | if [ "$1" = "discovery" -o -z "$1" ]; then 72 | notice Adding demo program to Discovery... 73 | set +e 74 | # FIXME[bash-e]: Bash scripts should use -e -- but this command fails 75 | provision_ida discovery "shell" 76 | set -e 77 | fi 78 | 79 | if [ "$1" = "cache" -o -z "$1" ]; then 80 | notice Caching programs inside the LMS... 81 | docker_exec lms "lms cache_programs" edxapp edx-platform 82 | fi 83 | 84 | reset_color 85 | -------------------------------------------------------------------------------- /provision-analyticsapi.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | . scripts/colors.sh 5 | set -x 6 | 7 | name=analyticsapi 8 | port=19001 9 | 10 | docker compose up -d ${name} 11 | 12 | echo -e "${GREEN}Installing requirements for ${name}...${NC}" 13 | docker compose exec -T ${name} bash -e -c 'source /edx/app/analytics_api/analytics_api_env && cd /edx/app/analytics_api/analytics_api && make develop' -- ${name} 14 | 15 | echo -e "${GREEN}Running migrations for ${name}...${NC}" 16 | docker compose exec -T ${name} bash -e -c 'source /edx/app/analytics_api/analytics_api_env && export DJANGO_SETTINGS_MODULE="analyticsdataserver.settings.devstack" && cd /edx/app/analytics_api/analytics_api && make migrate-all' -- ${name} 17 | 18 | echo -e "${GREEN}Creating default user and authentication token for ${name}...${NC}" 19 | docker compose exec -T ${name} bash -e -c 'source /edx/app/analytics_api/analytics_api_env && cd /edx/app/analytics_api/analytics_api && python manage.py set_api_key edx edx' -- ${name} 20 | 21 | echo -e "${GREEN}Loading test data for ${name}...${NC}" 22 | docker compose exec -T ${name} bash -e -c 'source /edx/app/analytics_api/analytics_api_env && cd /edx/app/analytics_api/analytics_api && make loaddata' -- ${name} 23 | -------------------------------------------------------------------------------- /provision-coursegraph.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | . scripts/colors.sh 5 | set -x 6 | 7 | # Pulling the image will almost always be a no-op, but will be important 8 | # when we bump the version in docker-compose.yml or when Neo4j releases a patch. 9 | # Also, this gives us a chance to refresh the container in case it's gotten into 10 | # a weird state. 11 | echo -e "${GREEN} Ensuring Coursegraph image is up to date...${NC}" 12 | docker compose rm --force --stop coursegraph 13 | docker compose pull coursegraph 14 | 15 | echo -e "${GREEN} Starting Coursegraph and CMS...${NC}" 16 | docker compose up -d coursegraph cms 17 | sleep 10 # Give Neo4j some time to boot up. 18 | 19 | echo -e "${GREEN} Updating CMS courses in Coursegraph...${NC}" 20 | docker compose exec cms bash -c 'source /edx/app/edxapp/edxapp_env && cd /edx/app/edxapp/edx-platform/ && ./manage.py cms dump_to_neo4j --host coursegraph.devstack.edx --user neo4j --password edxedxedx' 21 | 22 | echo -e "${GREEN} Coursegraph is now up-to-date with CMS!${NC}" 23 | -------------------------------------------------------------------------------- /provision-credentials.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | . scripts/colors.sh 5 | set -x 6 | 7 | # NOTE (CCB): We do NOT call provision-ida because it expects a virtualenv. 8 | # The new images for Credentials do not use virtualenv. 9 | 10 | name=credentials 11 | port=18150 12 | 13 | docker compose up -d $name 14 | 15 | echo -e "${GREEN}Installing requirements for ${name}...${NC}" 16 | docker compose exec -T ${name} bash -e -c 'source /edx/app/credentials/credentials_env && cd /edx/app/credentials/credentials && make requirements' -- "$name" 17 | 18 | echo -e "${GREEN}Running migrations for ${name}...${NC}" 19 | docker compose exec -T ${name} bash -e -c 'source /edx/app/credentials/credentials_env && cd /edx/app/credentials/credentials && make migrate' -- "$name" 20 | 21 | echo -e "${GREEN}Creating super-user for ${name}...${NC}" 22 | docker compose exec -T ${name} bash -e -c 'source /edx/app/credentials/credentials_env && cd /edx/app/credentials/credentials && echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\"edx\", \"edx@example.com\", \"edx\") if not User.objects.filter(username=\"edx\").exists() else None" | python /edx/app/$1/$1/manage.py shell' -- "$name" 23 | 24 | echo -e "${GREEN}Configuring site for ${name}...${NC}" 25 | docker compose exec -T ${name} bash -e -c 'source /edx/app/credentials/credentials_env && cd /edx/app/credentials/credentials && ./manage.py create_or_update_site --site-id=1 --site-domain=localhost:18150 --site-name="Open edX" --platform-name="Open edX" --company-name="Open edX" --lms-url-root=http://localhost:18000 --catalog-api-url=http://edx.devstack.discovery:18381/api/v1/ --tos-url=http://localhost:18000/tos --privacy-policy-url=http://localhost:18000/privacy --homepage-url=http://localhost:18000 --certificate-help-url=http://localhost:18000/faq --records-help-url=http://localhost:18000/faq --theme-name=openedx' 26 | 27 | ./provision-ida-user.sh ${name} ${name} ${port} 28 | 29 | # Compile static assets last since they are absolutely necessary for all services. This will allow developers to get 30 | # started if they do not care about static assets 31 | echo -e "${GREEN}Compiling static assets for ${name}...${NC}" 32 | docker compose exec -T ${name} bash -e -c ' if ! source /edx/app/credentials/credentials_env && cd /edx/app/credentials/credentials && make static 2>creds_make_static.err; then echo "------- Last 100 lines of stderr"; tail creds_make_static.err -n 100; echo "-------"; fi;' -- "$name" 33 | 34 | # Restart credentials devserver. 35 | make dev.restart-devserver.credentials 36 | -------------------------------------------------------------------------------- /provision-discovery.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Provisioning script for the discovery service 3 | set -eu -o pipefail 4 | set -x 5 | 6 | docker compose up -d lms 7 | docker compose up -d cms 8 | docker compose up -d ecommerce 9 | sleep 5 # Give above services some time to boot up 10 | 11 | ./provision-ida.sh discovery discovery 18381 12 | 13 | docker compose exec -T discovery bash -e -c 'rm -rf /edx/var/discovery/*' 14 | docker compose exec -T discovery bash -e -c 'source /edx/app/discovery/discovery_env && python /edx/app/discovery/discovery/manage.py create_or_update_partner --site-id 1 --site-domain localhost:18381 --code edx --name edX --courses-api-url "http://edx.devstack.lms:18000/api/courses/v1/" --lms-coursemode-api-url "http://edx.devstack.lms:18000/api/course_modes/v1/" --ecommerce-api-url "http://edx.devstack.ecommerce:18130/api/v2/" --organizations-api-url "http://edx.devstack.lms:18000/api/organizations/v0/" --lms-url "http://edx.devstack.lms:18000/" --studio-url "http://edx.devstack.cms:18010/" --publisher-url "http://edx.devstack.frontend-app-publisher:18400/"' 15 | 16 | set +e 17 | # FIXME[bash-e]: Bash scripts should use -e -- but this script fails 18 | # (after many retries) when trying to talk to ecommerce 19 | docker compose exec -T discovery bash -e -c 'source /edx/app/discovery/discovery_env && python /edx/app/discovery/discovery/manage.py refresh_course_metadata' 20 | docker compose exec -T discovery bash -e -c 'source /edx/app/discovery/discovery_env && python /edx/app/discovery/discovery/manage.py add_provisioning_data' 21 | set -e 22 | 23 | docker compose exec -T discovery bash -e -c 'source /edx/app/discovery/discovery_env && python /edx/app/discovery/discovery/manage.py update_index --disable-change-limit' 24 | 25 | # Add demo program 26 | ./programs/provision.sh discovery 27 | -------------------------------------------------------------------------------- /provision-e2e.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openedx-unsupported/devstack/28f6d7ea1fa30fd7e0bdc10f269999f15f7f8876/provision-e2e.sh -------------------------------------------------------------------------------- /provision-ecommerce.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | # Load database dumps for the largest databases to save time 6 | ./load-db.sh ecommerce 7 | 8 | ./provision-ida.sh ecommerce ecommerce 18130 9 | 10 | # Configure ecommerce 11 | docker compose exec -T ecommerce bash -e -c 'source /edx/app/ecommerce/ecommerce_env && python /edx/app/ecommerce/ecommerce/manage.py create_or_update_site --site-id=1 --site-domain=localhost:18130 --partner-code=edX --partner-name="Open edX" --lms-url-root=http://edx.devstack.lms:18000 --lms-public-url-root=http://localhost:18000 --client-side-payment-processor=cybersource --payment-processors=cybersource,paypal --sso-client-id=ecommerce-sso-key --sso-client-secret=ecommerce-sso-secret --backend-service-client-id=ecommerce-backend-service-key --backend-service-client-secret=ecommerce-backend-service-secret --from-email staff@example.com --discovery_api_url=http://edx.devstack.discovery:18381/api/v1/ --enable-microfrontend-for-basket-page=1 --payment-microfrontend-url=http://localhost:1998' 12 | docker compose exec -T ecommerce bash -e -c 'source /edx/app/ecommerce/ecommerce_env && python /edx/app/ecommerce/ecommerce/manage.py oscar_populate_countries --initial-only' 13 | docker compose exec -T ecommerce bash -e -c 'source /edx/app/ecommerce/ecommerce_env && python /edx/app/ecommerce/ecommerce/manage.py create_demo_data --partner=edX' 14 | -------------------------------------------------------------------------------- /provision-forum.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | docker compose up -d forum 6 | docker compose exec -T forum bash -e -c 'source /edx/app/forum/ruby_env && source /edx/app/forum/devstack_forum_env && cd /edx/app/forum/cs_comments_service && bundle install --deployment --path /edx/app/forum/.gem/ && bin/rake search:initialize' 7 | -------------------------------------------------------------------------------- /provision-ida-user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | #This script depends on the LMS being up! 6 | 7 | . scripts/colors.sh 8 | 9 | app_name=$1 10 | client_name=$2 11 | client_port=$3 12 | 13 | echo -e "${GREEN}Creating service user and OAuth2 applications for ${app_name}...${NC}" 14 | 15 | # Create the service user. 16 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker manage_user $1_worker $1_worker@example.com --staff --superuser' -- "$app_name" 17 | 18 | # Create the DOT applications - one for single sign-on and one for backend service IDA-to-IDA authentication. 19 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker create_dot_application --grant-type authorization-code --skip-authorization --redirect-uris "http://localhost:$3/complete/edx-oauth2/" --client-id "$1-sso-key" --client-secret "$1-sso-secret" --scopes "user_id" $1-sso $1_worker' -- "$app_name" "$client_name" "$client_port" 20 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker create_dot_application --grant-type client-credentials --client-id "$1-backend-service-key" --client-secret "$1-backend-service-secret" $1-backend-service $1_worker' -- "$app_name" "$client_name" "$client_port" 21 | -------------------------------------------------------------------------------- /provision-ida.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | . scripts/colors.sh 5 | set -x 6 | 7 | app_name=$1 # The name of the IDA application, i.e. /edx/app/ 8 | client_name=$2 # The name of the Oauth client stored in the edxapp DB. 9 | client_port=$3 # The port corresponding to this IDA service in devstack. 10 | container_name=${4:-$1} # (Optional) The name of the container. If missing, will use app_name. 11 | 12 | make dev.up.$app_name 13 | 14 | echo -e "${GREEN}Installing requirements for ${app_name}...${NC}" 15 | docker compose exec -T ${container_name} bash -e -c 'source /edx/app/$1/$1_env && cd /edx/app/$1/$1/ && make requirements' -- "$app_name" 16 | 17 | echo -e "${GREEN}Running migrations for ${app_name}...${NC}" 18 | docker compose exec -T ${container_name} bash -e -c 'source /edx/app/$1/$1_env && cd /edx/app/$1/$1/ && make migrate' -- "$app_name" 19 | 20 | echo -e "${GREEN}Creating super-user for ${app_name}...${NC}" 21 | docker compose exec -T ${container_name} bash -e -c 'source /edx/app/$1/$1_env && echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\"edx\", \"edx@example.com\", \"edx\") if not User.objects.filter(username=\"edx\").exists() else None" | python /edx/app/$1/$1/manage.py shell' -- "$app_name" 22 | 23 | ./provision-ida-user.sh $app_name $client_name $client_port 24 | 25 | # Compile static assets last since they are absolutely necessary for all services. This will allow developers to get 26 | # started if they do not care about static assets 27 | echo -e "${GREEN}Compiling static assets for ${app_name}...${NC}" 28 | docker compose exec -T ${container_name} bash -e -c 'source /edx/app/$1/$1_env && cd /edx/app/$1/$1/ && make static' -- "$app_name" 29 | -------------------------------------------------------------------------------- /provision-insights.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | . scripts/colors.sh 5 | set -x 6 | 7 | name=insights 8 | port=18110 9 | 10 | docker compose up -d insights 11 | 12 | echo -e "${GREEN}Installing requirements for ${name}...${NC}" 13 | docker compose exec -T ${name} bash -e -c 'source /edx/app/insights/insights_env && cd /edx/app/insights/insights && make develop' -- ${name} 14 | 15 | # # Install Insights npm dependencies 16 | docker compose exec -T ${name} bash -e -c 'source /edx/app/insights/insights_env && cd /edx/app/insights/insights/ && npm ci && ./npm-post-install.sh' 17 | 18 | echo -e "${GREEN}Running migrations for ${name}...${NC}" 19 | docker compose exec -T ${name} bash -e -c 'source /edx/app/insights/insights_env && export DJANGO_SETTINGS_MODULE="analytics_dashboard.settings.devstack" && cd /edx/app/insights/insights && make migrate' -- ${name} 20 | 21 | ./provision-ida-user.sh ${name} ${name} ${port} 22 | 23 | # Compile static assets last since they are absolutely necessary for all services. This will allow developers to get 24 | # started if they do not care about static assets 25 | echo -e "${GREEN}Compiling static assets for ${name}...${NC}" 26 | docker compose exec -T ${name} bash -e -c 'source /edx/app/insights/insights_env && cd /edx/app/insights/insights && make static' -- "$name" 27 | -------------------------------------------------------------------------------- /provision-lms.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | apps=( lms cms ) 6 | 7 | cms_port=18010 8 | 9 | # Load database dumps for the largest databases to save time 10 | ./load-db.sh edxapp 11 | ./load-db.sh edxapp_csmh 12 | 13 | # Bring edxapp containers online 14 | for app in "${apps[@]}"; do 15 | docker compose up -d $app 16 | done 17 | 18 | # install git for both LMS and CMS 19 | for app in "${apps[@]}"; do 20 | docker compose exec -T $app bash -e -c 'apt-get update && apt-get -y install --no-install-recommends git' 21 | 22 | docker compose exec -T $app bash -e -c 'source /edx/app/edxapp/edxapp_env && cd /edx/app/edxapp/edx-platform && NO_PYTHON_UNINSTALL=1 paver install_prereqs' 23 | 24 | #Installing prereqs crashes the process 25 | docker compose restart $app 26 | done 27 | 28 | # Run edxapp migrations first since they are needed for the service users and OAuth clients 29 | # Make migrate runs migrations for both lms and cms. 30 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && make migrate' 31 | 32 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && /edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms showmigrations --database student_module_history --traceback --pythonpath=. --settings devstack_docker' 33 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && /edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py lms migrate --database student_module_history --noinput --traceback --pythonpath=. --settings devstack_docker' 34 | 35 | docker compose exec -T cms bash -e -c 'source /edx/app/edxapp/edxapp_env && /edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms showmigrations --database student_module_history --traceback --pythonpath=. --settings devstack_docker' 36 | docker compose exec -T cms bash -e -c 'source /edx/app/edxapp/edxapp_env && /edx/app/edxapp/venvs/edxapp/bin/python /edx/app/edxapp/edx-platform/manage.py cms migrate --database student_module_history --noinput --traceback --pythonpath=. --settings devstack_docker' 37 | 38 | # Create a superuser for edxapp 39 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker manage_user edx edx@example.com --superuser --staff' 40 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && echo "from django.contrib.auth import get_user_model; User = get_user_model(); user = User.objects.get(username=\"edx\"); user.set_password(\"edx\"); user.save()" | python /edx/app/edxapp/edx-platform/manage.py lms shell --settings=devstack_docker' 41 | 42 | # Create an enterprise service user for edxapp and give them appropriate permissions 43 | ./enterprise/provision.sh 44 | 45 | # Enable the LMS-E-Commerce integration 46 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker configure_commerce' 47 | 48 | # Create demo course and users 49 | #docker compose exec -T lms bash -e -c '/edx/app/edx_ansible/venvs/edx_ansible/bin/ansible-playbook /edx/app/edx_ansible/edx_ansible/playbooks/demo.yml -v -c local -i "127.0.0.1," --extra-vars="COMMON_EDXAPP_SETTINGS=devstack_docker"' 50 | if [[ ${DEVSTACK_SKIP_DEMO-false} == "true" ]] 51 | then 52 | echo "Skipping import of demo course. DEVSTACK_SKIP_DEMO is set to true" 53 | else 54 | # FIXME: Using old version of demo course (open-release/quince.1) until we can 55 | # update devstack and other repos to match: https://github.com/openedx/devstack/issues/1273 56 | docker compose exec -T lms bash -e -c 'git clone https://github.com/openedx/edx-demo-course.git --branch open-release/quince.1 /tmp/edx-demo-course' 57 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py cms --settings=devstack_docker import /edx/var/edxapp/data /tmp/edx-demo-course && rm -rf /tmp/edx-demo-course' 58 | fi 59 | 60 | demo_hashed_password='pbkdf2_sha256$20000$TjE34FJjc3vv$0B7GUmH8RwrOc/BvMoxjb5j8EgnWTt3sxorDANeF7Qw=' 61 | for user in honor audit verified staff ; do 62 | email="$user@example.com" 63 | # Set staff flag for staff user 64 | if [[ $user == "staff" ]] ; then 65 | docker compose exec -T lms bash -e -c "source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker --service-variant lms manage_user $user $email --initial-password-hash '$demo_hashed_password' --staff" 66 | else 67 | docker compose exec -T lms bash -e -c "source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker --service-variant lms manage_user $user $email --initial-password-hash '$demo_hashed_password'" 68 | fi 69 | if [[ "${DEVSTACK_SKIP_DEMO-false}" != "true" ]] 70 | then 71 | # Enroll users in the demo course 72 | docker compose exec -T lms bash -e -c "source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker --service-variant lms enroll_user_in_course -e $email -c course-v1:edX+DemoX+Demo_Course" 73 | fi 74 | done 75 | 76 | 77 | # Fix missing vendor file by clearing the cache 78 | docker compose exec -T lms bash -e -c 'rm /edx/app/edxapp/edx-platform/.prereqs_cache/Node_prereqs.sha1' 79 | 80 | # Create static assets for both LMS and CMS 81 | for app in "${apps[@]}"; do 82 | docker compose exec -T $app bash -e -c 'source /edx/app/edxapp/edxapp_env && cd /edx/app/edxapp/edx-platform && paver update_assets --settings devstack_docker' 83 | done 84 | 85 | # Allow LMS SSO for CMS 86 | ./provision-ida-user.sh cms cms "$cms_port" 87 | 88 | # Provision a retirement service account user 89 | ./provision-retirement-user.sh retirement retirement_service_worker 90 | 91 | # Add demo program 92 | ./programs/provision.sh lms 93 | -------------------------------------------------------------------------------- /provision-mysql80.sql: -------------------------------------------------------------------------------- 1 | -- The use of `CREATE USER IF NOT EXISTS` is necessary since the 2 | -- mysql80_data volume may already contain these users due to previous 3 | -- provisioning https://github.com/openedx/devstack/issues/1113 4 | 5 | CREATE DATABASE IF NOT EXISTS credentials; 6 | CREATE USER IF NOT EXISTS 'credentials001'@'%' IDENTIFIED BY 'password'; 7 | GRANT ALL ON credentials.* TO 'credentials001'@'%'; 8 | 9 | CREATE DATABASE IF NOT EXISTS discovery; 10 | CREATE USER IF NOT EXISTS 'discov001'@'%' IDENTIFIED BY 'password'; 11 | GRANT ALL ON discovery.* TO 'discov001'@'%'; 12 | 13 | CREATE DATABASE IF NOT EXISTS ecommerce; 14 | CREATE USER IF NOT EXISTS 'ecomm001'@'%' IDENTIFIED BY 'password'; 15 | GRANT ALL ON ecommerce.* TO 'ecomm001'@'%'; 16 | 17 | CREATE DATABASE IF NOT EXISTS notes; 18 | CREATE USER IF NOT EXISTS 'notes001'@'%' IDENTIFIED BY 'password'; 19 | GRANT ALL ON notes.* TO 'notes001'@'%'; 20 | 21 | CREATE DATABASE IF NOT EXISTS registrar; 22 | CREATE USER IF NOT EXISTS 'registrar001'@'%' IDENTIFIED BY 'password'; 23 | GRANT ALL ON registrar.* TO 'registrar001'@'%'; 24 | 25 | CREATE DATABASE IF NOT EXISTS xqueue; 26 | CREATE USER IF NOT EXISTS 'xqueue001'@'%' IDENTIFIED BY 'password'; 27 | GRANT ALL ON xqueue.* TO 'xqueue001'@'%'; 28 | 29 | CREATE DATABASE IF NOT EXISTS `dashboard`; 30 | CREATE USER IF NOT EXISTS 'analytics001'@'%' IDENTIFIED BY 'password'; 31 | GRANT ALL ON `dashboard`.* TO 'analytics001'@'%'; 32 | 33 | CREATE DATABASE IF NOT EXISTS `analytics-api`; 34 | GRANT ALL ON `analytics-api`.* TO 'analytics001'@'%'; 35 | 36 | CREATE DATABASE IF NOT EXISTS `reports`; 37 | GRANT ALL ON `reports`.* TO 'analytics001'@'%'; 38 | 39 | CREATE DATABASE IF NOT EXISTS `reports_v1`; 40 | GRANT ALL ON `reports_v1`.* TO 'analytics001'@'%'; 41 | 42 | CREATE DATABASE IF NOT EXISTS edxapp; 43 | CREATE DATABASE IF NOT EXISTS edxapp_csmh; 44 | CREATE USER IF NOT EXISTS 'edxapp001'@'%' IDENTIFIED BY 'password'; 45 | GRANT ALL ON edxapp.* TO 'edxapp001'@'%'; 46 | GRANT ALL ON edxapp_csmh.* TO 'edxapp001'@'%'; 47 | 48 | FLUSH PRIVILEGES; 49 | -------------------------------------------------------------------------------- /provision-notes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Provisioning script for the notes service 3 | set -eu -o pipefail 4 | 5 | . scripts/colors.sh 6 | set -x 7 | 8 | name=edx_notes_api 9 | port=18734 10 | client_name=edx-notes # The name of the Oauth client stored in the edxapp DB. 11 | 12 | docker compose up -d $name 13 | 14 | echo -e "${GREEN}Installing requirements for ${name}...${NC}" 15 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/notes && make requirements' -- "$name" 16 | 17 | echo -e "${GREEN}Running migrations for ${name}...${NC}" 18 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/notes && make migrate' -- "$name" 19 | 20 | echo -e "${GREEN}Creating super-user for ${name}...${NC}" 21 | docker compose exec -T ${name} bash -e -c 'cho "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\"edx\", \"edx@example.com\", \"edx\") if not User.objects.filter(username=\"edx\").exists() else None" | python /edx/app/notes/manage.py shell' -- "$name" 22 | 23 | ./provision-ida-user.sh $name $client_name $port 24 | 25 | # Compile static assets last since they are absolutely necessary for all services. This will allow developers to get 26 | # started if they do not care about static assets 27 | echo -e "${GREEN}Compiling static assets for ${name}...${NC}" 28 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/notes && make static' -- "$name" 29 | 30 | # This will build the elasticsearch index for notes. 31 | echo -e "${GREEN}Creating indexes for ${name}...${NC}" 32 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/notes/ && python manage.py search_index --rebuild -f' 33 | -------------------------------------------------------------------------------- /provision-registrar.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | . scripts/colors.sh 5 | set -x 6 | 7 | name=registrar 8 | port=18734 9 | 10 | docker compose up -d $name 11 | 12 | echo -e "${GREEN}Installing requirements for ${name}...${NC}" 13 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/registrar && make requirements' -- "$name" 14 | 15 | echo -e "${GREEN}Running migrations for ${name}...${NC}" 16 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/registrar && make migrate' -- "$name" 17 | 18 | echo -e "${GREEN}Creating super-user for ${name}...${NC}" 19 | docker compose exec -T ${name} bash -e -c 'cd /edx/app/registrar && make createsuperuser' -- "$name" 20 | 21 | ./provision-ida-user.sh ${name} ${name} ${port} 22 | 23 | # Compile static assets last since they are absolutely necessary for all services. This will allow developers to get 24 | # started if they do not care about static assets 25 | echo -e "${GREEN}Compiling static assets for ${name}...${NC}" 26 | docker compose exec -T ${name} bash -e -c ' if ! cd /edx/app/registrar && make static 2>registrar_make_static.err; then echo "------- Last 100 lines of stderr"; tail regsitrar_make_static.err -n 100; echo "-------"; fi;' -- "$name" 27 | -------------------------------------------------------------------------------- /provision-retirement-user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #This script depends on the LMS being up! 3 | set -eu -o pipefail 4 | 5 | . scripts/colors.sh 6 | set -x 7 | 8 | app_name=$1 9 | user_name=$2 10 | 11 | echo -e "${GREEN}Creating retirement service user ${user_name} and DOT Application ${app_name}...${NC}" 12 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker manage_user $1 $1@example.com --staff --superuser' -- "$user_name" 13 | docker compose exec -T lms bash -e -c 'source /edx/app/edxapp/edxapp_env && python /edx/app/edxapp/edx-platform/manage.py lms --settings=devstack_docker create_dot_application $1 $2' -- "$app_name" "$user_name" 14 | -------------------------------------------------------------------------------- /provision-xqueue.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | set -x 4 | 5 | # Bring up XQueue, we don't need the consumer for provisioning 6 | docker compose up -d xqueue 7 | 8 | # Update dependencies 9 | docker compose exec -T xqueue bash -e -c 'source /edx/app/xqueue/xqueue_env && cd /edx/app/xqueue/xqueue && make requirements' 10 | # Run migrations 11 | docker compose exec -T xqueue bash -e -c 'source /edx/app/xqueue/xqueue_env && cd /edx/app/xqueue/xqueue && python manage.py migrate' 12 | # Add users that graders use to fetch data, there's one default user in Ansible which is part of our settings 13 | docker compose exec -T xqueue bash -e -c 'source /edx/app/xqueue/xqueue_env && cd /edx/app/xqueue/xqueue && python manage.py update_users' 14 | -------------------------------------------------------------------------------- /provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script will provision the services specified in the argument list, 4 | # or all services if no arguments are provided. 5 | # 6 | # Non-existant services will be ignored. 7 | # Specifying services more than once will cause them to be provisioned more 8 | # than once. 9 | # 10 | # To allow services to be passed in through a Makefile target, 11 | # services can be plus-sign-separated as well as space separated. 12 | # 13 | # Service(s) will generally be setup in the following manner 14 | # (but refer to individual ./provision-{service} scripts to be sure): 15 | # 1. Migrations run, 16 | # 2. Tenants—as in multi-tenancy—setup, 17 | # 3. Service users and OAuth clients setup in LMS, 18 | # 4. Static assets compiled/collected. 19 | # 20 | # DEVSTACK DEVELOPERS -- To add a service to provisioning: 21 | # * Create a provision-{service}.sh script for your new service. 22 | # * Add the name of the service to ALL_SERVICES. 23 | # 24 | # Example usages: 25 | # ./provision.sh # Provision all services. 26 | # ./provision.sh lms ecommerce discovery # Provision these three services. 27 | # ./provision.sh lms+ecommerce+discovery # Same as previous command. 28 | 29 | set -eu -o pipefail 30 | set -x 31 | 32 | . scripts/colors.sh 33 | 34 | # All provisionable services. 35 | # (Leading and trailing space are necessary for if-checks.) 36 | # The order here is the order we will use when provisioning, even if only a subset 37 | # of services are requested. 38 | # Changing this order may break provisioning. 39 | # For example, Discovery breaks if LMS is not provisioned first. 40 | ALL_SERVICES_IN_ORDER=" \ 41 | lms \ 42 | ecommerce \ 43 | discovery \ 44 | credentials \ 45 | forum \ 46 | notes \ 47 | registrar \ 48 | xqueue \ 49 | coursegraph \ 50 | insights \ 51 | analyticsapi \ 52 | " 53 | 54 | # What should we provision? 55 | if [[ $# -eq 0 ]]; then 56 | requested_services=$ALL_SERVICES_IN_ORDER 57 | else 58 | arg_string=" $* " 59 | # Replace plus signs with spaces in order to allow plus-sign-separated 60 | # services in addition to space-separated services. 61 | requested_services="${arg_string//+/ }" 62 | fi 63 | 64 | # Returns whether first arg contains second arg as substring. 65 | is_substring() { 66 | local str="$1" 67 | local substr="$2" 68 | if [[ "$str" == *" ${substr} "* ]]; then 69 | return 0 # Note that '0' means 'success' (i.e., true) in bash. 70 | else 71 | return 1 72 | fi 73 | } 74 | 75 | # Returns whether we need to boot mongo, 76 | # based on the space-separated list of services passed in the first argument. 77 | needs_mongo() { 78 | local services="$1" 79 | if is_substring "$services" lms || is_substring "$services" forum; then 80 | return 0 81 | else 82 | return 1 83 | fi 84 | } 85 | 86 | # Validate user input, building up list of services to provision. 87 | to_provision=" " 88 | for serv in $requested_services; do 89 | case "$serv" in 90 | cms) 91 | echo -e "${YELLOW}CMS is provisioned alongside LMS.\nPass 'lms' as an argument to ensure that CMS is provisioned.${NC}" 92 | continue 93 | ;; 94 | edx_notes_api) 95 | # Treat 'edx_notes_api' as an alias for 'notes'. 96 | service="notes" 97 | ;; 98 | *) 99 | service="$serv" 100 | esac 101 | if is_substring "$ALL_SERVICES_IN_ORDER" "$service"; then 102 | if ! is_substring "$to_provision" "$service"; then 103 | to_provision="${to_provision}${service} " 104 | fi 105 | else 106 | echo -e "${YELLOW}Service '${service}' either doesn't exist or isn't provisionable.${NC}" 107 | fi 108 | done 109 | 110 | # Order the services based on $ALL_SERVICES_IN_ORDER. 111 | to_provision_ordered=" " 112 | for ordered_service in $ALL_SERVICES_IN_ORDER; do 113 | if is_substring "$to_provision" "$ordered_service"; then 114 | to_provision_ordered="${to_provision_ordered}${ordered_service} " 115 | fi 116 | done 117 | 118 | if [[ "$to_provision_ordered" = " " ]]; then 119 | echo -e "${YELLOW}Nothing to provision; will exit.${NC}" 120 | exit 0 121 | fi 122 | echo -e "${GREEN}Will provision the following:\n ${to_provision_ordered}${NC}" 123 | 124 | # Bring the databases online. 125 | docker compose up -d mysql57 126 | docker compose up -d mysql80 127 | if needs_mongo "$to_provision_ordered"; then 128 | docker compose up -d mongo 129 | fi 130 | 131 | # Ensure the MySQL server is online and usable 132 | echo -e "${GREEN}Waiting for MySQL.${NC}" 133 | make dev.wait-for.mysql57+mysql80 134 | echo -e "${GREEN}MySQL is ready.${NC}" 135 | 136 | # Ensure that the MySQL databases and users are created for all IDAs. 137 | # (A no-op for databases and users that already exist). 138 | echo -e "${GREEN}Ensuring MySQL 5.7 databases and users exist...${NC}" 139 | docker compose exec -T mysql57 bash -e -c "mysql -uroot mysql" < provision.sql 140 | echo -e "${GREEN}Ensuring MySQL 8.0 databases and users exist...${NC}" 141 | docker compose exec -T mysql80 bash -e -c "mysql -uroot mysql" < provision-mysql80.sql 142 | 143 | # If necessary, ensure the MongoDB server is online and usable 144 | # and create its users. 145 | if needs_mongo "$to_provision_ordered"; then 146 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 147 | # mongo container and mongo process/shell inside the container 148 | make dev.wait-for.mongo 149 | echo -e "${GREEN}MongoDB ready.${NC}" 150 | echo -e "${GREEN}Creating MongoDB users...${NC}" 151 | docker compose exec -T mongo bash -e -c "mongo" < mongo-provision.js 152 | else 153 | echo -e "${GREEN}MongoDB preparation not required; skipping.${NC}" 154 | fi 155 | 156 | # Run the service-specific provisioning script(s) 157 | for service in $to_provision_ordered; do 158 | echo -e "${GREEN} Provisioning ${service}...${NC}" 159 | ./provision-"$service".sh 160 | echo -e "${GREEN} Provisioned ${service}.${NC}" 161 | done 162 | 163 | docker image prune -f 164 | 165 | echo -e "${GREEN}Provisioning complete!${NC}" 166 | -------------------------------------------------------------------------------- /provision.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS credentials; 2 | GRANT ALL ON credentials.* TO 'credentials001'@'%' IDENTIFIED BY 'password'; 3 | 4 | CREATE DATABASE IF NOT EXISTS discovery; 5 | GRANT ALL ON discovery.* TO 'discov001'@'%' IDENTIFIED BY 'password'; 6 | 7 | CREATE DATABASE IF NOT EXISTS ecommerce; 8 | GRANT ALL ON ecommerce.* TO 'ecomm001'@'%' IDENTIFIED BY 'password'; 9 | 10 | CREATE DATABASE IF NOT EXISTS notes; 11 | GRANT ALL ON notes.* TO 'notes001'@'%' IDENTIFIED BY 'password'; 12 | 13 | CREATE DATABASE IF NOT EXISTS registrar; 14 | GRANT ALL ON registrar.* TO 'registrar001'@'%' IDENTIFIED BY 'password'; 15 | 16 | CREATE DATABASE IF NOT EXISTS xqueue; 17 | GRANT ALL ON xqueue.* TO 'xqueue001'@'%' IDENTIFIED BY 'password'; 18 | 19 | CREATE DATABASE IF NOT EXISTS edxapp; 20 | CREATE DATABASE IF NOT EXISTS edxapp_csmh; 21 | GRANT ALL ON edxapp.* TO 'edxapp001'@'%' IDENTIFIED BY 'password'; 22 | GRANT ALL ON edxapp_csmh.* TO 'edxapp001'@'%'; 23 | 24 | CREATE DATABASE IF NOT EXISTS `dashboard`; 25 | GRANT ALL ON `dashboard`.* TO 'analytics001'@'%' IDENTIFIED BY 'password'; 26 | 27 | CREATE DATABASE IF NOT EXISTS `analytics-api`; 28 | GRANT ALL ON `analytics-api`.* TO 'analytics001'@'%' IDENTIFIED BY 'password'; 29 | 30 | CREATE DATABASE IF NOT EXISTS `reports`; 31 | GRANT ALL ON `reports`.* TO 'analytics001'@'%' IDENTIFIED BY 'password'; 32 | 33 | CREATE DATABASE IF NOT EXISTS `reports_v1`; 34 | GRANT ALL ON `reports_v1`.* TO 'analytics001'@'%' IDENTIFIED BY 'password'; 35 | 36 | 37 | FLUSH PRIVILEGES; 38 | -------------------------------------------------------------------------------- /requirements/base.in: -------------------------------------------------------------------------------- 1 | -c constraints.txt 2 | 3 | # Support for Apple Silicon begins with 6.0.0 4 | PyYAML>=6.0.0 # For parsing configuration files while generating offline installers 5 | -------------------------------------------------------------------------------- /requirements/base.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.8 3 | # by the following command: 4 | # 5 | # make upgrade 6 | # 7 | pyyaml==6.0.1 8 | # via -r requirements/base.in 9 | -------------------------------------------------------------------------------- /requirements/constraints.txt: -------------------------------------------------------------------------------- 1 | # Version constraints for pip-installation. 2 | # 3 | # This file doesn't install any packages. It specifies version constraints 4 | # that will be applied if a package is needed. 5 | # 6 | # When pinning something here, please provide an explanation of why. Ideally, 7 | # link to other information that will help people in the future to remove the 8 | # pin when possible. Writing an issue against the offending project and 9 | # linking to it here is good. 10 | 11 | # Common constraints for edx repos 12 | -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt 13 | -------------------------------------------------------------------------------- /requirements/dev.in: -------------------------------------------------------------------------------- 1 | # Additional requirements for development of this application 2 | -c constraints.txt 3 | 4 | -r pip-tools.txt # pip-tools and its dependencies, for managing requirements files 5 | -r base.txt # Core dependencies for this package 6 | -r test.txt # Dependencies required for running tests 7 | 8 | tox # Virtualenv management for tests 9 | -------------------------------------------------------------------------------- /requirements/dev.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.8 3 | # by the following command: 4 | # 5 | # make upgrade 6 | # 7 | build==1.0.3 8 | # via 9 | # -r requirements/pip-tools.txt 10 | # pip-tools 11 | cachetools==5.3.3 12 | # via tox 13 | chardet==5.2.0 14 | # via tox 15 | click==8.1.7 16 | # via 17 | # -r requirements/pip-tools.txt 18 | # pip-tools 19 | colorama==0.4.6 20 | # via tox 21 | distlib==0.3.8 22 | # via virtualenv 23 | exceptiongroup==1.2.0 24 | # via 25 | # -r requirements/test.txt 26 | # pytest 27 | filelock==3.13.1 28 | # via 29 | # tox 30 | # virtualenv 31 | importlib-metadata==7.0.1 32 | # via 33 | # -r requirements/pip-tools.txt 34 | # build 35 | iniconfig==2.0.0 36 | # via 37 | # -r requirements/test.txt 38 | # pytest 39 | packaging==23.2 40 | # via 41 | # -r requirements/pip-tools.txt 42 | # -r requirements/test.txt 43 | # build 44 | # pyproject-api 45 | # pytest 46 | # tox 47 | pexpect==4.9.0 48 | # via -r requirements/test.txt 49 | pip-tools==7.4.0 50 | # via -r requirements/pip-tools.txt 51 | platformdirs==4.2.0 52 | # via 53 | # tox 54 | # virtualenv 55 | pluggy==1.4.0 56 | # via 57 | # -r requirements/test.txt 58 | # pytest 59 | # tox 60 | ptyprocess==0.7.0 61 | # via 62 | # -r requirements/test.txt 63 | # pexpect 64 | pyproject-api==1.6.1 65 | # via tox 66 | pyproject-hooks==1.0.0 67 | # via 68 | # -r requirements/pip-tools.txt 69 | # build 70 | # pip-tools 71 | pytest==8.0.2 72 | # via -r requirements/test.txt 73 | pyyaml==6.0.1 74 | # via 75 | # -r requirements/base.txt 76 | # -r requirements/test.txt 77 | tomli==2.0.1 78 | # via 79 | # -r requirements/pip-tools.txt 80 | # -r requirements/test.txt 81 | # build 82 | # pip-tools 83 | # pyproject-api 84 | # pyproject-hooks 85 | # pytest 86 | # tox 87 | tox==4.13.0 88 | # via -r requirements/dev.in 89 | virtualenv==20.25.1 90 | # via tox 91 | wheel==0.42.0 92 | # via 93 | # -r requirements/pip-tools.txt 94 | # pip-tools 95 | zipp==3.17.0 96 | # via 97 | # -r requirements/pip-tools.txt 98 | # importlib-metadata 99 | 100 | # The following packages are considered to be unsafe in a requirements file: 101 | # pip 102 | # setuptools 103 | -------------------------------------------------------------------------------- /requirements/doc.in: -------------------------------------------------------------------------------- 1 | # Requirements for documentation validation 2 | -c constraints.txt 3 | 4 | -r base.txt # Core dependencies for this package 5 | 6 | doc8 # reStructuredText style checker 7 | sphinx-book-theme # Common theme for all Open edX projects 8 | readme_renderer # Validates README.rst for usage on PyPI 9 | Sphinx # Documentation builder 10 | -------------------------------------------------------------------------------- /requirements/doc.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.8 3 | # by the following command: 4 | # 5 | # make upgrade 6 | # 7 | accessible-pygments==0.0.4 8 | # via pydata-sphinx-theme 9 | alabaster==0.7.13 10 | # via sphinx 11 | babel==2.14.0 12 | # via 13 | # pydata-sphinx-theme 14 | # sphinx 15 | beautifulsoup4==4.12.3 16 | # via pydata-sphinx-theme 17 | certifi==2024.2.2 18 | # via requests 19 | charset-normalizer==3.3.2 20 | # via requests 21 | doc8==1.1.1 22 | # via -r requirements/doc.in 23 | docutils==0.19 24 | # via 25 | # doc8 26 | # pydata-sphinx-theme 27 | # readme-renderer 28 | # restructuredtext-lint 29 | # sphinx 30 | idna==3.6 31 | # via requests 32 | imagesize==1.4.1 33 | # via sphinx 34 | importlib-metadata==7.0.1 35 | # via sphinx 36 | jinja2==3.1.3 37 | # via sphinx 38 | markupsafe==2.1.5 39 | # via jinja2 40 | nh3==0.2.15 41 | # via readme-renderer 42 | packaging==23.2 43 | # via 44 | # pydata-sphinx-theme 45 | # sphinx 46 | pbr==6.0.0 47 | # via stevedore 48 | pydata-sphinx-theme==0.14.4 49 | # via sphinx-book-theme 50 | pygments==2.17.2 51 | # via 52 | # accessible-pygments 53 | # doc8 54 | # pydata-sphinx-theme 55 | # readme-renderer 56 | # sphinx 57 | pytz==2024.1 58 | # via babel 59 | pyyaml==6.0.1 60 | # via -r requirements/base.txt 61 | readme-renderer==43.0 62 | # via -r requirements/doc.in 63 | requests==2.31.0 64 | # via sphinx 65 | restructuredtext-lint==1.4.0 66 | # via doc8 67 | snowballstemmer==2.2.0 68 | # via sphinx 69 | soupsieve==2.5 70 | # via beautifulsoup4 71 | sphinx==6.2.1 72 | # via 73 | # -r requirements/doc.in 74 | # pydata-sphinx-theme 75 | # sphinx-book-theme 76 | sphinx-book-theme==1.0.1 77 | # via -r requirements/doc.in 78 | sphinxcontrib-applehelp==1.0.4 79 | # via sphinx 80 | sphinxcontrib-devhelp==1.0.2 81 | # via sphinx 82 | sphinxcontrib-htmlhelp==2.0.1 83 | # via sphinx 84 | sphinxcontrib-jsmath==1.0.1 85 | # via sphinx 86 | sphinxcontrib-qthelp==1.0.3 87 | # via sphinx 88 | sphinxcontrib-serializinghtml==1.1.5 89 | # via sphinx 90 | stevedore==5.2.0 91 | # via doc8 92 | tomli==2.0.1 93 | # via doc8 94 | typing-extensions==4.10.0 95 | # via pydata-sphinx-theme 96 | urllib3==2.2.1 97 | # via requests 98 | zipp==3.17.0 99 | # via importlib-metadata 100 | -------------------------------------------------------------------------------- /requirements/pip-tools.in: -------------------------------------------------------------------------------- 1 | -c constraints.txt 2 | 3 | pip-tools 4 | -------------------------------------------------------------------------------- /requirements/pip-tools.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.8 3 | # by the following command: 4 | # 5 | # make upgrade 6 | # 7 | build==1.0.3 8 | # via pip-tools 9 | click==8.1.7 10 | # via pip-tools 11 | importlib-metadata==7.0.1 12 | # via build 13 | packaging==23.2 14 | # via build 15 | pip-tools==7.4.0 16 | # via -r requirements/pip-tools.in 17 | pyproject-hooks==1.0.0 18 | # via 19 | # build 20 | # pip-tools 21 | tomli==2.0.1 22 | # via 23 | # build 24 | # pip-tools 25 | # pyproject-hooks 26 | wheel==0.42.0 27 | # via pip-tools 28 | zipp==3.17.0 29 | # via importlib-metadata 30 | 31 | # The following packages are considered to be unsafe in a requirements file: 32 | # pip 33 | # setuptools 34 | -------------------------------------------------------------------------------- /requirements/pip.in: -------------------------------------------------------------------------------- 1 | -c constraints.txt 2 | # Core dependencies for installing other packages 3 | 4 | pip 5 | setuptools 6 | wheel 7 | 8 | -------------------------------------------------------------------------------- /requirements/pip.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.8 3 | # by the following command: 4 | # 5 | # make upgrade 6 | # 7 | wheel==0.42.0 8 | # via -r requirements/pip.in 9 | 10 | # The following packages are considered to be unsafe in a requirements file: 11 | pip==24.0 12 | # via -r requirements/pip.in 13 | setuptools==69.1.1 14 | # via -r requirements/pip.in 15 | -------------------------------------------------------------------------------- /requirements/test.in: -------------------------------------------------------------------------------- 1 | # Dependencies required for running tests. 2 | 3 | -c constraints.txt 4 | 5 | -r base.txt # Core dependencies for this package 6 | 7 | pytest # Test runner 8 | pexpect # Utility for making Expect-like tests for CLI interaction 9 | -------------------------------------------------------------------------------- /requirements/test.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.8 3 | # by the following command: 4 | # 5 | # make upgrade 6 | # 7 | exceptiongroup==1.2.0 8 | # via pytest 9 | iniconfig==2.0.0 10 | # via pytest 11 | packaging==23.2 12 | # via pytest 13 | pexpect==4.9.0 14 | # via -r requirements/test.in 15 | pluggy==1.4.0 16 | # via pytest 17 | ptyprocess==0.7.0 18 | # via pexpect 19 | pytest==8.0.2 20 | # via -r requirements/test.in 21 | pyyaml==6.0.1 22 | # via -r requirements/base.txt 23 | tomli==2.0.1 24 | # via pytest 25 | -------------------------------------------------------------------------------- /scripts/Jenkinsfiles/devstack_snapshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | virtualenv --python=python3.8 devstack_snapshot_venv -q 4 | source devstack_snapshot_venv/bin/activate 5 | 6 | python scripts/snapshot.py ../devstack_snapshot 7 | -------------------------------------------------------------------------------- /scripts/Jenkinsfiles/snapshot: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { label "devstack-worker" } 3 | environment { 4 | COMPOSE_HTTP_TIMEOUT = '120' 5 | DOCKER_CLIENT_TIMEOUT = '120' 6 | USE_TTY = 'false' 7 | } 8 | options { 9 | timestamps() 10 | timeout(120) 11 | } 12 | stages { 13 | stage("Build installer") { 14 | steps { 15 | sh 'make requirements' 16 | sh 'make dev.clone.https' 17 | sh 'make dev.pull' 18 | sh 'make dev.provision' 19 | sh 'bash devstack_snapshot.sh' 20 | } 21 | } 22 | } 23 | post { 24 | always { 25 | cleanWs() 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /scripts/README.txt: -------------------------------------------------------------------------------- 1 | To install this snapshot of Open edX devstack (no network access required): 2 | 3 | 1. Copy the entire "devstack_snapshot" directory to your computer. You'll 4 | need about 35 GB free (9 GB for the copied files and 26 GB more for the 5 | subsequent installation. 6 | 7 | 2. Unmount the flash drive, remove it, and return it to the workshop staff 8 | so someone else can use it. 9 | 10 | macOS 11 | ----- 12 | 13 | 3. If you don't already have Docker 17.06 CE or later installed, install 14 | it from "devstack_snapshot/Docker.dmg". 15 | 16 | 4. From a terminal, enter the "devstack_snapshot" directory and run 17 | "bash mac.sh". 18 | 19 | Linux 20 | ----- 21 | 22 | 3. Make sure you have Docker 17.06 CE or later installed. If not, see 23 | https://www.docker.com/community-edition for installation instructions. 24 | 25 | 4. Make sure you have docker-compose 1.9.0 or later installed. If not, 26 | you can get it by running the following: 27 | 28 | sudo curl -L https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose 29 | sudo chmod +x /usr/local/bin/docker-compose 30 | 31 | 5. From a terminal, enter the "devstack_snapshot" directory and run 32 | "bash linux.sh". Partway through you'll be prompted for your password 33 | to grant sudo access (to run docker commands); provide it as needed. 34 | 35 | Windows 36 | ------- 37 | 38 | 3. If you don't already have Docker 17.06 CE or later installed, install 39 | it from "devstack_snapshot/Docker for Windows Installer.exe". 40 | 41 | 4. Follow the instructions at 42 | https://github.com/openedx/devstack/blob/master/README-windows.rst 43 | (Unlike the macOS and Linux installations above, this will require 44 | a network connection). 45 | 46 | Open edX devstack isn't fully working on Windows yet, but if you are 47 | running Windows 10 you can help us diagnose and fix the remaining 48 | problems. If you're running an older version of Windows, please ask 49 | for help choosing a task that can be completed without installing 50 | devstack. 51 | 52 | All Operating Systems 53 | --------------------- 54 | 55 | Done! Try visiting http://localhost:18000/ for the LMS and 56 | http://localhost:18010/ for CMS. It may take a minute or two for the 57 | services to finish initializing and start responding to requests. 58 | -------------------------------------------------------------------------------- /scripts/colors.sh: -------------------------------------------------------------------------------- 1 | # Source this file to get color variables 2 | 3 | RED='\033[0;31m' 4 | GREEN='\033[0;32m' 5 | YELLOW='\033[0;33m' 6 | NC='\033[0m' # No Color 7 | -------------------------------------------------------------------------------- /scripts/extract_snapshot_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Installs Open edX devstack on Linux from a local snapshot of repository, 4 | # image, and volume tarballs. 5 | 6 | set -e 7 | 8 | # Extract all of the Open edX source code repositories needed to run devstack 9 | for tarball in repositories/*.tar.gz 10 | do 11 | echo "Extracting $tarball" 12 | tar xzf $tarball 13 | done 14 | 15 | # Load Docker containers and their associated volumes 16 | # Calls to "docker" usually require sudo privileges on Linux 17 | # add sudo here (and line 25 & 28) if needed... 18 | # However, best practice is to create docker group: 19 | # q.v. https://docs.docker.com/install/linux/linux-postinstall/ 20 | python devstack/scripts/restore.py 21 | 22 | # For the rest, we need to be in the directory with the devstack Makefile 23 | cd devstack 24 | 25 | # Shut down all the running containers, the volumes were incomplete at startup 26 | make down 27 | 28 | # Start all the containers again with correctly populated volumes. 29 | make dev.up.large-and-slow 30 | -------------------------------------------------------------------------------- /scripts/extract_snapshot_mac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Installs Open edX devstack on macOS from a local snapshot of repository, 4 | # image, and volume tarballs. 5 | 6 | set -e 7 | 8 | # Extract all of the Open edX source code repositories needed to run devstack 9 | for tarball in repositories/*.tar.gz 10 | do 11 | echo "Extracting $tarball" 12 | tar xzf $tarball 13 | done 14 | 15 | # Load Docker containers and populate their associated volumes 16 | python devstack/scripts/restore.py 17 | 18 | # For the rest, we need to be in the directory with the devstack Makefile 19 | cd devstack 20 | 21 | # Shut down all the running containers, the volumes were incomplete at startup 22 | make down 23 | 24 | # Start all the containers again with correctly populated volumes. 25 | make dev.up.large-and-slow 26 | -------------------------------------------------------------------------------- /scripts/make_warn_default_large.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Warn the developer that they've run a make command that uses a very 3 | # large set of services and that often is not the best tool for the 4 | # job. 5 | # 6 | # This script is used in the Makefile for commands that should be run 7 | # as `make $target.large-and-slow` instead if that's what's intended. 8 | 9 | target="$1" 10 | 11 | show_warning_and_wait() { 12 | cat <<"EOCOW" >&2 13 | _________________________________________________________________________ 14 | / \ 15 | | Are you sure you want to run this command for *all* Open edX services? | 16 | \_________________________________________________________________________/ 17 | \ ^__^ 18 | \ (oo)\_______ 19 | (__)\ )\/\ 20 | ||----w | 21 | || || 22 | 23 | EOCOW 24 | 25 | cat <&2 26 | The command "make $target" will operate on a large default set of 27 | services and their dependencies. This can make your task take longer 28 | than necessary. 29 | 30 | You may prefer to use something like "make $target.lms" to 31 | target a smaller set of services. Learn more about the commands you 32 | can run at: 33 | 34 | https://edx.readthedocs.io/projects/open-edx-devstack/en/latest/devstack_interface.html 35 | 36 | Without an explicit list of services, many devstack Make targets pull 37 | down Docker images you don't need or take up extra memory and CPU. You 38 | might even run into bugs in unrelated services. 39 | 40 | (If you *really* want the large default set of services, you can use 41 | the command "make $target.large-and-slow". You can also configure 42 | DEFAULT_SERVICES in your options.local.mk to your preferred smaller 43 | set of services. Either of these options will prevent this warning.) 44 | 45 | EOF 46 | 47 | read -r -p $'(You can cancel the command now with Ctrl-C or press ENTER to continue.)\n' 48 | } 49 | 50 | if grep --quiet --no-messages '^DEFAULT_SERVICES' options.local.mk; then 51 | echo >&2 "Skipping warning because DEFAULT_SERVICES is set in options.local.mk" 52 | else 53 | show_warning_and_wait 54 | fi 55 | 56 | make --no-print-directory "$target.large-and-slow" 57 | -------------------------------------------------------------------------------- /scripts/restore.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Restore Docker images and volumes from the tarballs found in "images" and 4 | "volumes" subdirectories of the current directory. 5 | """ 6 | from __future__ import absolute_import, print_function, unicode_literals 7 | 8 | import json 9 | import os 10 | from subprocess import check_call 11 | 12 | SOURCE_DIR = os.getcwd() 13 | IMAGES_DIR = os.path.join(SOURCE_DIR, 'images') 14 | VOLUMES_DIR = os.path.join(SOURCE_DIR, 'volumes') 15 | VOLUMES_JSON = os.path.join(VOLUMES_DIR, 'volumes.json') 16 | DEVSTACK_REPO_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | # Use this minimal container image to restore volume content 19 | BACKUP_IMAGE = 'alpine:latest' 20 | 21 | 22 | def load_images(): 23 | """ 24 | Load all of the Docker images from the associated tarballs. 25 | """ 26 | for filename in os.listdir(IMAGES_DIR): 27 | if not filename.endswith('.tar.gz'): 28 | continue 29 | tarball = os.path.join(IMAGES_DIR, filename) 30 | print('Loading Docker image from {}'.format(filename)) 31 | check_call(['docker', 'load', '--input', tarball]) 32 | 33 | 34 | def start_devstack(): 35 | """ 36 | Start the devstack containers so their volumes can be populated. 37 | """ 38 | cwd = os.getcwd() 39 | os.chdir(DEVSTACK_REPO_DIR) 40 | check_call(['make', 'dev.up.large-and-slow']) 41 | os.chdir(cwd) 42 | 43 | 44 | def load_volumes(): 45 | """ 46 | Restore the image volume content from the associated tarballs. 47 | """ 48 | with open(VOLUMES_JSON, 'r') as f: 49 | volumes = json.loads(f.read()) 50 | for volume in volumes: 51 | container_name = volume['container'] 52 | path = volume['path'] 53 | if path.endswith('/'): 54 | path = path[:-1] 55 | tarball = volume['tarball'] 56 | components = str(path.count('/')) 57 | print('Loading volume from {}'.format(tarball)) 58 | check_call(['docker', 'run', '--rm', '--volumes-from', container_name, 59 | '-v', '{}:/backup'.format(VOLUMES_DIR), BACKUP_IMAGE, 60 | 'tar', 'xzf', '/backup/{}'.format(tarball), '-C', path, 61 | '--strip-components', components]) 62 | 63 | 64 | if __name__ == "__main__": 65 | load_images() 66 | start_devstack() 67 | load_volumes() 68 | -------------------------------------------------------------------------------- /scripts/snapshot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to capture a snapshot of the current devstack images, repositories, 4 | and volume content to tarballs for no-network installation. To be run while 5 | devstack is running (otherwise volume content can't be accessed). 6 | """ 7 | from __future__ import absolute_import, print_function, unicode_literals 8 | 9 | import argparse 10 | import json 11 | import os 12 | import re 13 | from shutil import copyfile 14 | from subprocess import STDOUT, CalledProcessError, check_output 15 | 16 | import yaml 17 | 18 | REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 19 | DEVSTACK_WORKSPACE = os.path.dirname(REPO_ROOT) 20 | REPO_SCRIPT = os.path.join(REPO_ROOT, 'repo.sh') 21 | 22 | # Use this minimal container image to fetch volume content 23 | BACKUP_IMAGE = 'alpine:latest' 24 | 25 | 26 | def make_directories(output_dir): 27 | """ 28 | Create any of the output directories that don't already exist. 29 | """ 30 | if not os.path.exists(output_dir): 31 | os.mkdir(output_dir) 32 | for dir_name in ('images', 'repositories', 'volumes'): 33 | path = os.path.join(output_dir, dir_name) 34 | if not os.path.exists(path): 35 | os.mkdir(path) 36 | 37 | 38 | def archive_repos(output_dir): 39 | """ 40 | Create tarballs for each of the relevant repositories in DEVSTACK_WORKSPACE 41 | """ 42 | with open('repo.sh', 'r') as f: 43 | script = f.read() 44 | prefix = r'https://github\.com/edx/' 45 | suffix = r'\.git' 46 | repos = re.findall(r'{}[^\.]+{}'.format(prefix, suffix), script) 47 | dirs = [repo[len(prefix) - 1:1 - len(suffix)] for repo in repos if 'edx-themes' not in repo] 48 | dirs.append('devstack') 49 | repositories_dir = os.path.join(output_dir, 'repositories') 50 | cwd = os.getcwd() 51 | os.chdir(DEVSTACK_WORKSPACE) 52 | for directory in dirs: 53 | print('Archiving {}'.format(directory)) 54 | output = os.path.join(repositories_dir, '{}.tar.gz'.format(directory)) 55 | check_output(['tar', 'czf', output, directory], stderr=STDOUT) 56 | os.chdir(cwd) 57 | 58 | 59 | def process_compose_file(filename, output_dir): 60 | """ 61 | Go through the given docker-compose YAML file and save any of the 62 | referenced Docker images and data volumes to tarballs. 63 | """ 64 | images_dir = os.path.join(output_dir, 'images') 65 | volumes_dir = os.path.join(output_dir, 'volumes') 66 | compose_path = os.path.join(REPO_ROOT, filename) 67 | with open(compose_path, 'r') as f: 68 | devstack = yaml.safe_load(f.read()) 69 | 70 | volume_list = [] 71 | services = devstack['services'] 72 | saved_images = set() 73 | for service_name in services: 74 | service = services[service_name] 75 | image = service['image'] 76 | image = re.sub(r'\$.*', 'latest', image) 77 | container_name = service['container_name'] 78 | # Don't save the same image twice, like edxapp for lms and cms 79 | if image not in saved_images: 80 | output = os.path.join(images_dir, '{}.tar'.format(service_name)) 81 | print('Saving image {}'.format(service_name)) 82 | check_output(['docker', 'save', '--output', output, image], 83 | stderr=STDOUT) 84 | check_output(['gzip', output], stderr=STDOUT) 85 | saved_images.add(image) 86 | 87 | if 'volumes' in service: 88 | volumes = service['volumes'] 89 | for volume in volumes: 90 | if volume[0] == '.': 91 | # Mount of a host directory, skip it 92 | continue 93 | if ':' in volume: 94 | parts = volume.split(':') 95 | volume_name = parts[0] 96 | volume_path = parts[1] 97 | else: 98 | volume_name = volume[1:].replace('/', '_') 99 | volume_path = volume 100 | tarball = '{}.tar.gz'.format(volume_name) 101 | volume_list.append({'container': container_name, 102 | 'path': volume_path, 'tarball': tarball}) 103 | print('Saving volume {}'.format(volume_name)) 104 | check_output(['docker', 'run', '--rm', '--volumes-from', container_name, '-v', 105 | '{}:/backup'.format(volumes_dir), BACKUP_IMAGE, 'tar', 'czf', 106 | '/backup/{}'.format(tarball), volume_path], stderr=STDOUT) 107 | print('Saving image alpine') 108 | output = os.path.join(images_dir, 'alpine.tar') 109 | check_output(['docker', 'save', '--output', output, BACKUP_IMAGE], stderr=STDOUT) 110 | check_output(['gzip', output], stderr=STDOUT) 111 | print('Saving volume metadata') 112 | with open(os.path.join(volumes_dir, 'volumes.json'), 'w') as f: 113 | f.write(json.dumps(volume_list)) 114 | 115 | 116 | if __name__ == '__main__': 117 | parser = argparse.ArgumentParser() 118 | parser.add_argument('output_dir', help='The directory in which to create the devstack snapshot') 119 | args = parser.parse_args() 120 | output_dir = os.path.abspath(args.output_dir) 121 | make_directories(output_dir) 122 | try: 123 | archive_repos(output_dir) 124 | process_compose_file('docker-compose.yml', output_dir) 125 | except CalledProcessError as e: 126 | print(e.output) 127 | raise 128 | copyfile(os.path.join(REPO_ROOT, 'scripts', 'extract_snapshot_linux.sh'), 129 | os.path.join(output_dir, 'linux.sh')) 130 | copyfile(os.path.join(REPO_ROOT, 'scripts', 'extract_snapshot_mac.sh'), 131 | os.path.join(output_dir, 'mac.sh')) 132 | copyfile(os.path.join(REPO_ROOT, 'scripts', 'README.txt'), 133 | os.path.join(output_dir, 'README.txt')) 134 | -------------------------------------------------------------------------------- /tests/README.rst: -------------------------------------------------------------------------------- 1 | Devstack CLI tests 2 | ================== 3 | 4 | These tests rely heavily on the pexpect library (inspired by TCL 5 | Expect); if you're editing or creating tests it is highly recommended 6 | you read up on the gotchas in here: 7 | 8 | https://pexpect.readthedocs.io/en/stable/overview.html 9 | 10 | Debugging tips 11 | -------------- 12 | 13 | If an expectation fails (or an unexpected timeout or EOF occurs) then pexpect will throw an exception which contains a printout of the internal state of the pexpect instance. This includes the ``buffer``, ``before``, and ``after`` buffers, which are essential for seeing what the child process's output was. 14 | 15 | However, pexpect truncates two of these to 100 characters by default. To see more, set the undocumented ``str_last_chars`` attribute on the pexpect object to something larger, or to 0 for the full output. 16 | -------------------------------------------------------------------------------- /tests/warn_default.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for make_warn_default_large.sh 3 | """ 4 | 5 | import pexpect 6 | 7 | 8 | def test_warn_default(): 9 | """ 10 | Test that dev.pull (bare) prompts before continuing. 11 | """ 12 | 13 | p = pexpect.spawn('make dev.pull', timeout=15) 14 | p.expect(r'Are you sure you want to run this command') 15 | 16 | p.sendline('') 17 | p.expect(r'docker compose pull --include-deps') 18 | 19 | # Send ^C, don't wait for it to finish 20 | p.sendintr() 21 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | skipsdist=True 3 | envlist = py{38} 4 | 5 | [doc8] 6 | ; D001 = Line too long 7 | ignore=D001 8 | 9 | [testenv:docs] 10 | setenv = 11 | PYTHONPATH = {toxinidir} 12 | allowlist_externals = 13 | make 14 | rm 15 | deps = 16 | -r{toxinidir}/requirements/doc.txt 17 | commands = 18 | doc8 --ignore-path docs/_build README.rst docs 19 | make -C docs clean 20 | make -C docs html 21 | -------------------------------------------------------------------------------- /update-dbs-init-sql-scripts.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Updates MySQL databases dumps with MySQL Docker container schema/data 3 | # Make sure you have added your user to the docker group before running this script 4 | # or use sudo to run it 5 | 6 | set -eu -o pipefail 7 | set -x 8 | 9 | # constants 10 | readonly EDXAPP_MYSQL_DB_USER="edxapp001" 11 | readonly ECOMMERCE_MYSQL_DB_USER="ecomm001" 12 | readonly MYSQL_DB_PASSWORD="password" 13 | readonly EDXAPP_DBS=("edxapp" "edxapp_csmh") 14 | DBS=("ecommerce" "${EDXAPP_DBS[@]}") 15 | 16 | # don't include the demo course in the initial sql since it relies on data being present in mongo 17 | export DEVSTACK_SKIP_DEMO="true" 18 | 19 | 20 | # create a docker devstack with LMS and ecommerce 21 | make destroy 22 | make dev.clone.ssh 23 | make dev.pull.lms+ecommerce 24 | make dev.provision.services.lms+ecommerce 25 | 26 | # dump schema and data from mysql databases in the mysql docker container and copy them to current directory in docker host 27 | MYSQL_DOCKER_CONTAINER="$(make --silent --no-print-directory dev.print-container.mysql80)" 28 | for DB_NAME in "${DBS[@]}"; do 29 | DB_CREATION_SQL_SCRIPT="${DB_NAME}.sql" 30 | if [[ " ${EDXAPP_DBS[@]} " =~ " ${DB_NAME} " ]]; then 31 | MYSQL_DB_USER=${EDXAPP_MYSQL_DB_USER} 32 | else 33 | MYSQL_DB_USER=${ECOMMERCE_MYSQL_DB_USER} 34 | fi 35 | docker exec ${MYSQL_DOCKER_CONTAINER} /bin/bash -c "mysqldump -u ${MYSQL_DB_USER} -p${MYSQL_DB_PASSWORD} --no-tablespaces --add-drop-database --skip-add-drop-table --databases ${DB_NAME} > ${DB_CREATION_SQL_SCRIPT}" 36 | docker cp ${MYSQL_DOCKER_CONTAINER}:/${DB_CREATION_SQL_SCRIPT} . 37 | done 38 | -------------------------------------------------------------------------------- /upgrade_mongo_4_0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | # This script will upgrade a devstack that was previosly running Mongo DB 3.2, 3.4 or 3.6 to MongoDB 4.0 5 | 6 | . scripts/colors.sh 7 | 8 | export MONGO_VERSION=3.4.24 9 | current_mongo_version="3.4" 10 | echo -e "${GREEN}Sarting Mongo ${MONGO_VERSION}${NC}" 11 | make dev.up.mongo 12 | mongo_container="$(make --silent --no-print-directory dev.print-container.mongo)" 13 | 14 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 15 | until docker exec "$mongo_container" mongo --eval 'db.serverStatus()' &> /dev/null 16 | do 17 | if docker logs "$mongo_container" | grep -q "BadValue: Invalid value for version, found 4.0, expected '3.4' or '3.2'"; then 18 | echo -e "${YELLOW}Already upgraded to Mongo 4.0, exiting${NC}" 19 | exit 20 | elif docker logs "$mongo_container" | grep -q "BadValue: Invalid value for version, found 3.6, expected '3.4' or '3.2'"; then 21 | current_mongo_version="3.6" 22 | break 23 | fi 24 | printf "." 25 | sleep 1 26 | done 27 | 28 | if [[ $current_mongo_version == "3.4" ]]; then 29 | echo -e "${GREEN}MongoDB ready.${NC}" 30 | MONGO_VERSION_LIVE=$(docker exec -it "$mongo_container" mongo --quiet --eval "printjson(db.version())") 31 | MONGO_VERSION_COMPAT=$(docker exec -it "$mongo_container" mongo --quiet \ 32 | --eval "printjson(db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )['featureCompatibilityVersion'])") 33 | echo -e "${GREEN}Mongo Server version: ${MONGO_VERSION_LIVE}${NC}" 34 | echo -e "${GREEN}Mongo FeatureCompatibilityVersion version: ${MONGO_VERSION_COMPAT}${NC}" 35 | 36 | if echo "${MONGO_VERSION_COMPAT}" | grep -q "3\.2" ; then 37 | echo -e "${GREEN}Upgrading FeatureCompatibilityVersion to 3.4${NC}" 38 | docker exec -it "$mongo_container" mongo --eval "db.adminCommand( { setFeatureCompatibilityVersion: \"3.4\" } )" 39 | else 40 | echo -e "${GREEN}FeatureCompatibilityVersion already set to 3.4${NC}" 41 | fi 42 | fi 43 | 44 | 45 | export MONGO_VERSION=3.6.17 46 | 47 | echo 48 | echo -e "${GREEN}Restarting Mongo on version ${MONGO_VERSION}${NC}" 49 | make dev.up.mongo 50 | mongo_container="$(make --silent --no-print-directory dev.print-container.mongo)" 51 | 52 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 53 | until docker exec "$mongo_container" mongo --eval 'db.serverStatus()' &> /dev/null 54 | do 55 | printf "." 56 | sleep 1 57 | done 58 | 59 | echo -e "${GREEN}MongoDB ready.${NC}" 60 | MONGO_VERSION_LIVE=$(docker exec -it "$mongo_container" mongo --quiet --eval "printjson(db.version())") 61 | MONGO_VERSION_COMPAT=$(docker exec -it "$mongo_container" mongo --quiet \ 62 | --eval "printjson(db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )['featureCompatibilityVersion'])") 63 | echo -e "${GREEN}Mongo Server version: ${MONGO_VERSION_LIVE}${NC}" 64 | echo -e "${GREEN}Mongo FeatureCompatibilityVersion version: ${MONGO_VERSION_COMPAT}${NC}" 65 | 66 | if echo "${MONGO_VERSION_COMPAT}" | grep -q "3\.4" ; then 67 | echo -e "${GREEN}Upgrading FeatureCompatibilityVersion to 3.6${NC}" 68 | docker exec -it "$mongo_container" mongo --eval "db.adminCommand( { setFeatureCompatibilityVersion: \"3.6\" } )" 69 | else 70 | echo -e "${GREEN}FeatureCompatibilityVersion already set to 3.6${NC}" 71 | fi 72 | 73 | # Upgrade to mongo 4 74 | export MONGO_VERSION=4.0.22 75 | 76 | echo 77 | echo -e "${GREEN}Restarting Mongo on version ${MONGO_VERSION}${NC}" 78 | make dev.up.mongo 79 | mongo_container="$(make --silent --no-print-directory dev.print-container.mongo)" 80 | 81 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 82 | until docker exec "$mongo_container" mongo --eval 'db.serverStatus()' &> /dev/null 83 | do 84 | printf "." 85 | sleep 1 86 | done 87 | 88 | echo -e "${GREEN}MongoDB ready.${NC}" 89 | MONGO_VERSION_LIVE=$(docker exec -it "$mongo_container" mongo --quiet --eval "printjson(db.version())") 90 | MONGO_VERSION_COMPAT=$(docker exec -it "$mongo_container" mongo --quiet \ 91 | --eval "printjson(db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )['featureCompatibilityVersion'])") 92 | echo -e "${GREEN}Mongo Server version: ${MONGO_VERSION_LIVE}${NC}" 93 | echo -e "${GREEN}Mongo FeatureCompatibilityVersion version: ${MONGO_VERSION_COMPAT}${NC}" 94 | 95 | if echo "${MONGO_VERSION_COMPAT}" | grep -q "3\.6" ; then 96 | echo -e "${GREEN}Upgrading FeatureCompatibilityVersion to 4.0${NC}" 97 | docker exec -it "$mongo_container" mongo --eval "db.adminCommand( { setFeatureCompatibilityVersion: \"4.0\" } )" 98 | else 99 | echo -e "${GREEN}FeatureCompatibilityVersion already set to 4.0${NC}" 100 | fi 101 | -------------------------------------------------------------------------------- /upgrade_mongo_4_2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | # This script will upgrade a devstack that was previosly running Mongo DB 4.0 to MongoDB 4.0 5 | 6 | . scripts/colors.sh 7 | 8 | # Upgrade to mongo 4.2 9 | export MONGO_VERSION=4.2.14 10 | 11 | echo 12 | echo -e "${GREEN}Restarting Mongo on version ${MONGO_VERSION}${NC}" 13 | make dev.up.mongo 14 | mongo_container="$(make --silent --no-print-directory dev.print-container.mongo)" 15 | 16 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 17 | until docker exec "$mongo_container" mongo --eval 'db.serverStatus()' &> /dev/null 18 | do 19 | printf "." 20 | sleep 1 21 | done 22 | 23 | echo -e "${GREEN}MongoDB ready.${NC}" 24 | MONGO_VERSION_LIVE=$(docker exec -it "$mongo_container" mongo --quiet --eval "printjson(db.version())") 25 | MONGO_VERSION_COMPAT=$(docker exec -it "$mongo_container" mongo --quiet \ 26 | --eval "printjson(db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )['featureCompatibilityVersion'])") 27 | echo -e "${GREEN}Mongo Server version: ${MONGO_VERSION_LIVE}${NC}" 28 | echo -e "${GREEN}Mongo FeatureCompatibilityVersion version: ${MONGO_VERSION_COMPAT}${NC}" 29 | 30 | if echo "${MONGO_VERSION_COMPAT}" | grep -q "4\.0" ; then 31 | echo -e "${GREEN}Upgrading FeatureCompatibilityVersion to 4.2${NC}" 32 | docker exec -it "$mongo_container" mongo --eval "db.adminCommand( { setFeatureCompatibilityVersion: \"4.2\" } )" 33 | else 34 | echo -e "${GREEN}FeatureCompatibilityVersion already set to 4.2${NC}" 35 | fi 36 | -------------------------------------------------------------------------------- /upgrade_mongo_4_4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | # This script will upgrade a devstack that was previosly running Mongo DB 4.0 to MongoDB 4.0 5 | 6 | . scripts/colors.sh 7 | 8 | # Upgrade to mongo 4.4 9 | export MONGO_VERSION=4.4.18 10 | 11 | echo 12 | echo -e "${GREEN}Restarting Mongo on version ${MONGO_VERSION}${NC}" 13 | make dev.up.mongo 14 | mongo_container="$(make --silent --no-print-directory dev.print-container.mongo)" 15 | 16 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 17 | until docker exec "$mongo_container" mongo --eval 'db.serverStatus()' &> /dev/null 18 | do 19 | printf "." 20 | sleep 1 21 | done 22 | 23 | echo -e "${GREEN}MongoDB ready.${NC}" 24 | MONGO_VERSION_LIVE=$(docker exec -it "$mongo_container" mongo --quiet --eval "printjson(db.version())") 25 | MONGO_VERSION_COMPAT=$(docker exec -it "$mongo_container" mongo --quiet \ 26 | --eval "printjson(db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )['featureCompatibilityVersion'])") 27 | echo -e "${GREEN}Mongo Server version: ${MONGO_VERSION_LIVE}${NC}" 28 | echo -e "${GREEN}Mongo FeatureCompatibilityVersion version: ${MONGO_VERSION_COMPAT}${NC}" 29 | 30 | if echo "${MONGO_VERSION_COMPAT}" | grep -q "4\.2" ; then 31 | echo -e "${GREEN}Upgrading FeatureCompatibilityVersion to 4.4${NC}" 32 | docker exec -it "$mongo_container" mongo --eval "db.adminCommand( { setFeatureCompatibilityVersion: \"4.4\" } )" 33 | else 34 | echo -e "${GREEN}FeatureCompatibilityVersion already set to 4.4${NC}" 35 | fi 36 | -------------------------------------------------------------------------------- /upgrade_mongo_5_0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu -o pipefail 3 | 4 | # This script will upgrade a devstack that was previosly running Mongo DB 4.4 to MongoDB 5.0.24 5 | 6 | . scripts/colors.sh 7 | 8 | # Upgrade to mongo 5.0.24 9 | export MONGO_VERSION=5.0.24 10 | 11 | echo 12 | echo -e "${GREEN}Restarting Mongo on version ${MONGO_VERSION}${NC}" 13 | make dev.up.mongo 14 | mongo_container="$(make --silent --no-print-directory dev.print-container.mongo)" 15 | 16 | echo -e "${GREEN}Waiting for MongoDB...${NC}" 17 | until docker exec "$mongo_container" mongo --eval 'db.serverStatus()' &> /dev/null 18 | do 19 | printf "." 20 | sleep 1 21 | done 22 | 23 | echo -e "${GREEN}MongoDB ready.${NC}" 24 | MONGO_VERSION_LIVE=$(docker exec -it "$mongo_container" mongo --quiet --eval "printjson(db.version())") 25 | MONGO_VERSION_COMPAT=$(docker exec -it "$mongo_container" mongo --quiet \ 26 | --eval "printjson(db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )['featureCompatibilityVersion'])") 27 | echo -e "${GREEN}Mongo Server version: ${MONGO_VERSION_LIVE}${NC}" 28 | echo -e "${GREEN}Mongo FeatureCompatibilityVersion version: ${MONGO_VERSION_COMPAT}${NC}" 29 | 30 | if echo "${MONGO_VERSION_COMPAT}" | grep -q "5\.0" ; then 31 | echo -e "${GREEN}Upgrading FeatureCompatibilityVersion to 5.0${NC}" 32 | docker exec -it "$mongo_container" mongo --eval "db.adminCommand( { setFeatureCompatibilityVersion: \"5.0\" } )" 33 | else 34 | echo -e "${GREEN}FeatureCompatibilityVersion already set to 5.0${NC}" 35 | fi 36 | -------------------------------------------------------------------------------- /wait-ready.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Wait for the listed services to become ready. 3 | # 4 | # This does not start the containers; that should be performed separately 5 | # via `make dev.up` in order to allow for parallel startup. 6 | 7 | set -eu -o pipefail 8 | 9 | function print_usage { 10 | echo "Usage: $0 service1 service2 ..." 11 | } 12 | 13 | if [[ $# == 0 ]]; then 14 | print_usage 15 | exit 0 16 | fi 17 | 18 | for service_name in "$@"; do 19 | until ./check.sh "$service_name" >/dev/null 2>&1; do 20 | printf "." >&2 21 | sleep 1 22 | done 23 | echo >&2 "$service_name is ready" 24 | done 25 | --------------------------------------------------------------------------------