├── .devcontainer
├── Dockerfile
└── devcontainer.json
├── .github
└── workflows
│ ├── add-operation-column-and-index.yml
│ ├── connect-to-gcp.yaml
│ ├── create-database.yml
│ ├── create-db-branch-and-pr-dr.yml
│ ├── hello.yaml
│ ├── image_processing.yml
│ ├── issue-ops-ps-commands.yml
│ ├── matrix-build-custom-runner-nektos.yml
│ ├── matrix-build-custom-runner.yml
│ ├── matrix-build-hosted-runner.yml
│ ├── merge-latest-open-deploy-request.yml
│ ├── remove-database.yml
│ ├── remove-operation-column-and-index.yml
│ ├── show-node-allocation.yaml
│ ├── streampixels.yaml
│ ├── visualize-matrix-build-gui.yaml
│ ├── visualize-matrix-build-led.yml
│ └── visualize-matrix-build-nektos.yml
├── .gitignore
├── .pscale
└── cli-helper-scripts
│ ├── add-operation-column-and-index.sh
│ ├── approve-deploy-request.sh
│ ├── authenticate-ps.sh
│ ├── create-branch-connection-string.sh
│ ├── create-database.sh
│ ├── create-db-branch-dr-and-connection.sh
│ ├── delete-db-branch.sh
│ ├── export-db-connection-string.sh
│ ├── merge-deploy-request.sh
│ ├── merge-latest-open-deploy-request.sh
│ ├── ps-create-helper-functions.sh
│ ├── ps-env-template.sh
│ ├── remove-database.sh
│ ├── remove-operation-column-and-index.sh
│ ├── retrieve-branch-info.sh
│ ├── retrieve-deploy-request-info.sh
│ ├── set-db-and-org-and-branch-name.sh
│ ├── set-db-and-org-name.sh
│ ├── set-db-url.sh
│ ├── update-db-branch.sh
│ ├── use-pscale-docker-image.sh
│ ├── wait-for-branch-readiness.sh
│ └── wait-for-deploy-request-merged.sh
├── Dockerfile.base
├── README.md
├── __pycache__
└── constants.cpython-39.pyc
├── actions-runner-controller-runner-deployment-ese.yml
├── advanced-schema-stream-parameters.json
├── certs.pem
├── connect-to-kubernetes-via-act.sh
├── constants.py
├── events-nektos.json
├── fluxbox
└── menu
├── gui.py
├── images
├── blackandblue.png
├── blackbluenumbers.png
├── blackbluereset.png
├── done.png
├── github-actions.png
├── github-longer.png
├── hubot.png
├── images
│ ├── static_image.jpg
│ └── winterfest.png
├── matrix-construct-grid.png
├── matrix-construct-vitess.png
├── matrix-finished.png
├── matrix-reset.png
├── matrix-start-witout-numbers.png
├── matrix-start.png
├── monahubot.png
├── non-blocking-schema-change-text.png
├── non-blocking-schema-change.png
├── numbers-blue.png
├── numbers-grey.png
├── numbers-white.png
├── ps-finished.png
├── ps-start.png
├── publish-pixels-ps.py
├── publish-pixels.py
├── reset-green.png
├── reset-grey.png
├── reset.pxi
├── run.py
├── summer-finished.png
├── summer-start.png
└── visualize-matrix-build.py
├── library-scripts
├── common-debian.sh
├── desktop-lite-debian.sh
└── docker-in-docker-debian.sh
├── ps-database-scripts
├── add-operation-column-and-index.sh
├── create-database.sh
├── merge-latest-open-deploy-request.sh
├── remove-operation-column-and-index.sh
├── use-pscale-docker-image.sh
├── wait-for-branch-readiness.sh
└── wait-for-deploy-request-merged.sh
├── redis-scripts
├── expose-redis.yml
└── redis.md
├── render-matrix-cell.py
├── requirements.txt
├── samplebase.py
├── show-node-allocation-aws.sh
├── show-node-allocation-blinkt.py
├── show-node-allocation-gcp.sh
├── show-node-allocation-gui.py
├── show-node-allocation-raspi.sh
├── show-node-allocation.py
├── simulate-action-workflow.sh
├── simulate-matrix-build-custom-runner.sh
├── start-advanced-schema-stream.sh
├── start-gui.sh
├── stream-pixels-gcp.sh
├── stream-pixels-gui.py
├── stream-pixels-raspi.sh
├── stream-pixels.py
└── test-parameters.json
/.devcontainer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/jonico/codespace-with-vnc-chrome-and-ps:latest
2 |
3 | COPY fluxbox/menu /home/vscode/.fluxbox/
4 |
5 | VOLUME [ "/var/lib/docker" ]
6 |
7 |
8 | ENV DBUS_SESSION_BUS_ADDRESS="autolaunch:" \
9 | VNC_RESOLUTION="1440x768x16" \
10 | VNC_DPI="96" \
11 | VNC_PORT="5901" \
12 | NOVNC_PORT="6080" \
13 | DISPLAY=":1" \
14 | LANG="en_US.UTF-8" \
15 | LANGUAGE="en_US.UTF-8"
16 | ENTRYPOINT ["/usr/local/share/desktop-init.sh", "/usr/local/share/docker-init.sh" ]
17 | CMD ["sleep", "infinity"]
18 |
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.192.0/containers/python-3
3 | {
4 | "name": "Python 3",
5 | "build": {
6 | "dockerfile": "Dockerfile",
7 | "context": "..",
8 | "args": {
9 | // Update 'VARIANT' to pick a Python version: 3, 3.6, 3.7, 3.8, 3.9
10 | "VARIANT": "3",
11 | // Options
12 | "NODE_VERSION": "lts/*"
13 | },
14 | },
15 |
16 | // Set *default* container specific settings.json values on container create.
17 | "settings": {
18 | "python.pythonPath": "/usr/local/bin/python",
19 | "python.languageServer": "Pylance",
20 | "python.linting.enabled": true,
21 | "python.linting.pylintEnabled": true,
22 | "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
23 | "python.formatting.blackPath": "/usr/local/py-utils/bin/black",
24 | "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf",
25 | "python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
26 | "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
27 | "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
28 | "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
29 | "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
30 | "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint"
31 | },
32 |
33 | // Add the IDs of extensions you want installed when the container is created.
34 | "extensions": [
35 | "ms-python.python",
36 | "ms-python.vscode-pylance",
37 | "ms-azuretools.vscode-docker"
38 | ],
39 |
40 | // Use 'forwardPorts' to make a list of ports inside the container available locally.
41 | // "forwardPorts": [],
42 |
43 | // Use 'postCreateCommand' to run commands after the container is created.
44 | // "postCreateCommand": "pip3 install --user -r requirements.txt",
45 |
46 | // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
47 | "remoteUser": "vscode",
48 | "runArgs": ["--init", "--security-opt", "seccomp=unconfined", "--privileged"],
49 | "forwardPorts": [6080, 5901],
50 | "overrideCommand": false,
51 | "mounts": ["source=dind-var-lib-docker,target=/var/lib/docker,type=volume"]
52 | }
53 |
--------------------------------------------------------------------------------
/.github/workflows/add-operation-column-and-index.yml:
--------------------------------------------------------------------------------
1 | name: 02 - Add Operation Column & Index
2 |
3 | on:
4 | workflow_dispatch
5 |
6 | env:
7 | pscale_base_directory: .pscale
8 |
9 | jobs:
10 |
11 | add-operation-column-and-index:
12 | name: "Add operation - click here"
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: checkout
17 | uses: actions/checkout@v2
18 |
19 | - name: Add operation - if asked, please click on displayed link to authenticate
20 | timeout-minutes: 3
21 | env:
22 | PLANETSCALE_SERVICE_TOKEN_NAME: ${{secrets.PLANETSCALE_SERVICE_TOKEN_NAME}}
23 | PLANETSCALE_SERVICE_TOKEN: ${{secrets.PLANETSCALE_SERVICE_TOKEN}}
24 | ORG_NAME: ${{secrets.ORG_NAME}}
25 | DB_NAME: ${{secrets.DB_NAME}}
26 | GITHUB_USER: ${{github.actor}}
27 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
28 | run: ./add-operation-column-and-index.sh
29 | - name: Please check out deployment request and branch connection string from step above
30 | run: |
31 | echo "Please check out deployment request and branch connection string from step above."
32 | sleep 10
33 |
--------------------------------------------------------------------------------
/.github/workflows/connect-to-gcp.yaml:
--------------------------------------------------------------------------------
1 | name: Connect to GCP
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | jobs:
7 | connect-to-gcp:
8 | name: "Connect to GCP"
9 | runs-on: ubuntu-latest
10 | steps:
11 | #- uses: actions/setup-python@v2
12 | # with:
13 | # python-version: '3.x' # Version range or exact version of a Python version to use, using SemVer's version range syntax
14 | # architecture: 'x64' # optional x64 or x86. Defaults to x64 if not specified
15 | - name: Set up Cloud SDK
16 | uses: google-github-actions/setup-gcloud@master
17 | with:
18 | project_id: ${{ secrets.PROJECT_ID }}
19 | service_account_key: ${{ secrets.GCP_SA_KEY }}
20 | export_default_credentials: true
21 |
22 | - id: get-credentials
23 | uses: google-github-actions/get-gke-credentials@main
24 | with:
25 | cluster_name: ${{secrets.CLUSTER_NAME}}
26 | location: ${{secrets.CLUSTER_LOCATION}}
27 |
28 | - id: install-tools-gcp
29 | name: "Install kubectl"
30 | run: gcloud -q components install kubectl
31 |
32 | - name: Show cluster nodes
33 | run: kubectl get nodes
34 |
35 | - name: Show how to use kubernetes config locally
36 | run: |
37 | echo "To connect to cluster via kubectl run the following command: "
38 | echo "export KUBECONFIG=$KUBECONFIG"
39 |
--------------------------------------------------------------------------------
/.github/workflows/create-database.yml:
--------------------------------------------------------------------------------
1 | name: 01 - Create Database
2 |
3 | on:
4 | workflow_dispatch
5 |
6 | env:
7 | pscale_base_directory: .pscale
8 |
9 | jobs:
10 |
11 | create-database:
12 | name: "Create database - click here"
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: checkout
17 | uses: actions/checkout@v2
18 |
19 | - name: Create database - please click on displayed link to authenticate
20 | timeout-minutes: 3
21 | env:
22 | PLANETSCALE_SERVICE_TOKEN_NAME: ${{secrets.PLANETSCALE_SERVICE_TOKEN_NAME}}
23 | ORG_NAME: ${{secrets.ORG_NAME}}
24 | DB_NAME: ${{secrets.DB_NAME}}
25 | GITHUB_USER: ${{github.actor}}
26 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
27 | run: ./create-database.sh
28 | - name: Please set MY_DB_URL with instructions from step above
29 | run: |
30 | echo "Have a look at the end of the output of the previous step to find your one-time link with instructions how to set the database repo secret ..."
31 | sleep 10
32 |
--------------------------------------------------------------------------------
/.github/workflows/create-db-branch-and-pr-dr.yml:
--------------------------------------------------------------------------------
1 | name: Create branch/PR for schema change
2 |
3 | env:
4 | pscale_base_directory: .pscale
5 |
6 | on:
7 | workflow_dispatch:
8 | inputs:
9 | branch:
10 | description: DB and PR branch name
11 | required: true
12 | default: "add-operation-column-and-index"
13 | ddl_statements:
14 | description: 'DDL statements to run in new branch'
15 | required: true
16 | default: 'alter table pixel_matrix add column operation varchar(10) default NULL; create index environment_operation on pixel_matrix(environment, operation);'
17 |
18 | jobs:
19 | create_branch_dr_and_pr:
20 | name: Create branch/PR/DR - click here
21 |
22 | runs-on: ubuntu-latest
23 |
24 | steps:
25 |
26 | - name: Validate parameters
27 | id: validate_params
28 | uses: actions/github-script@v3
29 | env:
30 | BRANCH_NAME: ${{ github.event.inputs.branch }}
31 | DDL_STATEMENTS: ${{ github.event.inputs.ddl_statements }}
32 | with:
33 | github-token: ${{ secrets.GITHUB_TOKEN }}
34 | script: |
35 | const branch_name = process.env.BRANCH_NAME;
36 | const ddl_statements = process.env.DDL_STATEMENTS;
37 |
38 | if (! /^[a-zA-Z0-9_-]+$/.test(branch_name)) {
39 | const error = `The branch name contains illegal characters: ${branch_name}`;
40 | core.error(error);
41 | core.setFailed(error);
42 | }
43 |
44 | if (! /^.*;$/.test(ddl_statements)) {
45 | const error = `The ddl statements do not end with an ;: ${ddl_statements}`;
46 | core.error(error);
47 | core.setFailed(error);
48 | }
49 |
50 | core.setOutput('branch_name', branch_name);
51 | core.setOutput('ddl_statements', ddl_statements);
52 |
53 | - name: Checkout
54 | uses: actions/checkout@v2
55 |
56 | - name: Create DB branch and deploy request- if asked, please click on displayed link to authenticate
57 | id: create-db-branch-and-dr
58 | timeout-minutes: 3
59 | env:
60 | PLANETSCALE_SERVICE_TOKEN_NAME: ${{secrets.PLANETSCALE_SERVICE_TOKEN_NAME}}
61 | PLANETSCALE_SERVICE_TOKEN: ${{secrets.PLANETSCALE_SERVICE_TOKEN}}
62 | ORG_NAME: ${{secrets.ORG_NAME}}
63 | DB_NAME: ${{secrets.DB_NAME}}
64 | GITHUB_USER: ${{github.actor}}
65 | DDL_STATEMENTS: ${{ steps.validate_params.outputs.ddl_statements }}
66 | BRANCH_NAME: ${{ steps.validate_params.outputs.branch_name }}
67 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
68 | run: |
69 | ./create-db-branch-dr-and-connection.sh "$BRANCH_NAME" "$DDL_STATEMENTS"
70 |
71 | - name: Write information about associated PS database entities
72 | env:
73 | BRANCH_NAME: ${{ steps.create-db-branch-and-dr.outputs.BRANCH_NAME }}
74 | DB_NAME: ${{ steps.create-db-branch-and-dr.outputs.DB_NAME }}
75 | ORG_NAME: ${{ steps.create-db-branch-and-dr.outputs.ORG_NAME }}
76 | DEPLOY_REQUEST_NUMBER: ${{ steps.create-db-branch-and-dr.outputs.DEPLOY_REQUEST_NUMBER }}
77 | DEPLOY_REQUEST_URL: ${{ steps.create-db-branch-and-dr.outputs.DEPLOY_REQUEST_URL }}
78 | BRANCH_URL: ${{ steps.create-db-branch-and-dr.outputs.BRANCH_URL }}
79 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
80 | run: |
81 | mkdir -p ../env/
82 | envsubst < ps-env-template.sh > ../env/ps-env-${BRANCH_NAME}.sh
83 | chmod a+x ../env/ps-env-${BRANCH_NAME}.sh
84 |
85 |
86 | - name: Create corresponding Git branch and Pull Request
87 | id: create_pr
88 | uses: peter-evans/create-pull-request@v3.7.0
89 | env:
90 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
91 | with:
92 | branch: ${{ steps.validate_params.outputs.branch_name }}
93 | title: ${{ steps.validate_params.outputs.branch_name }}
94 | delete-branch: true
95 | commit-message: "DDL statements for DB branch ${{ steps.validate_params.outputs.branch_name }}"
96 | body: >
97 | This PR contains the code changes needed to go along with the following database changes:
98 |
99 | * :seedling: __DB-Branch__: [${{ steps.create-db-branch-and-dr.outputs.branch_name }}](${{ steps.create-db-branch-and-dr.outputs.BRANCH_URL }})
100 |
101 | * :train2: __Deploy-Request URL__: ${{ steps.create-db-branch-and-dr.outputs.DEPLOY_REQUEST_URL }}
102 |
103 | * :lock: __Branch connection info__: [One-time link](${{ steps.create-db-branch-and-dr.outputs.CONNECTION_STRING_LINK }})
104 |
105 |
106 | :scroll: Proposed DDL-Statements:
107 |
108 |
109 | ```
110 |
111 | ${{ steps.validate_params.outputs.ddl_statements }}
112 |
113 | ```
114 |
115 |
116 |
117 |
118 |
119 | 📖 Calculated schema changes:
120 |
121 |
122 | ```
123 |
124 | ${{ steps.create-db-branch-and-dr.outputs.BRANCH_DIFF }}
125 | ```
126 |
127 |
128 |
129 |
130 | If you are ok with the schema changes and have carefully reviewed them, you can merge them with a `/ps-merge` comment
131 |
132 | - name: Please check out branch and deployment request / PR created
133 | run: |
134 | echo "::notice ::Please check out deployment request and branch in created PR: ${{ steps.create_pr.outputs.pull-request-url }}"
135 | sleep 10
--------------------------------------------------------------------------------
/.github/workflows/hello.yaml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow that is manually triggered
2 |
3 | name: Raspberry PI Greetings
4 |
5 | # Controls when the action will run. Workflow runs when manually triggered using the UI
6 | # or API.
7 | on:
8 | workflow_dispatch:
9 | # Inputs the workflow accepts.
10 | inputs:
11 | name:
12 | # Friendly description to be shown in the UI instead of 'name'
13 | description: 'Person to greet'
14 | # Default value if no value is explicitly provided
15 | default: 'World'
16 | # Input has to be provided for the workflow to run
17 | required: true
18 |
19 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
20 | jobs:
21 | # This workflow contains a single job called "greet"
22 | greet:
23 | timeout-minutes: 1
24 | # The type of runner that the job will run on
25 | runs-on: [ raspberry-pi-3b ]
26 |
27 | # Steps represent a sequence of tasks that will be executed as part of the job
28 | steps:
29 | # Runs a single command using the runners shell
30 | - name: Send greeting
31 | run: |
32 | echo "Hello ${{ github.event.inputs.name }}"
33 | cd /rpi-rgb-led-matrix/examples-api-use
34 | ./scrolling-text-example --led-rows=32 --led-cols=64 -b 40 -C 0,0,255 -f ../fonts/10x20.bdf "Hello ${{ github.event.inputs.name }}"
35 |
--------------------------------------------------------------------------------
/.github/workflows/image_processing.yml:
--------------------------------------------------------------------------------
1 | name: Image Processing and Display
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: https://octodex.github.com/images/linktocat.jpg
10 |
11 | jobs:
12 | process_image:
13 | name: Fetch and Process Image
14 |
15 | runs-on: foobar
16 |
17 | steps:
18 | - name: checkout
19 | uses: actions/checkout@v2
20 |
21 | - name: Fetch and convert image
22 | id: processed_image
23 | uses: baseline/graphicsmagick-image-converter-action@v1
24 | with:
25 | image_url: ${{ github.event.inputs.image_url }}
26 | output_image_width: 64
27 |
28 | - name: Stream image
29 | env:
30 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
31 | run: |
32 | python3 publish-pixels.py --max-x=64 --max-y=32 --job-x=0 --job-y=0 --image-file=../${{ steps.processed_image.outputs.image }}
33 | working-directory: ./images
34 |
--------------------------------------------------------------------------------
/.github/workflows/matrix-build-custom-runner-nektos.yml:
--------------------------------------------------------------------------------
1 | name: Matrix Build Custom Runner Nektos
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: "images/matrix-finished.png"
10 | duration:
11 | description: 'Number of job duration in ms'
12 | required: true
13 | default: '500'
14 | x:
15 | description: 'Number of cells on x-Axis'
16 | required: true
17 | default: '16'
18 | y:
19 | description: 'Number of cells on y-Axis'
20 | required: true
21 | default: '12'
22 | pixel-x:
23 | description: 'Number of pixels on x-Axis'
24 | required: true
25 | default: '800'
26 | pixel-y:
27 | description: 'Number of pixels on y-Axis'
28 | required: true
29 | default: '600'
30 | repetitions:
31 | description: 'Number of matrix cell render cycles'
32 | required: true
33 | default: '1'
34 | repetition-delay:
35 | description: 'Wait time in ms between render cycles'
36 | required: true
37 | default: '5000'
38 | connections-per-cell:
39 | description: 'DB connections per matrix cell'
40 | required: true
41 | default: '1'
42 |
43 |
44 | jobs:
45 |
46 | generate-matrix:
47 | name: "Generate matrix job specs"
48 | outputs:
49 | x: ${{ steps.generate-matrix.outputs.x }}
50 | y: ${{ steps.generate-matrix.outputs.y }}
51 | runs-on: custom-runner
52 | steps:
53 | - name: generate-matrix
54 | id: generate-matrix
55 | run: |
56 | echo "::set-output name=x::[`seq -s , ${{ github.event.inputs.x }}`]"
57 | echo "::set-output name=y::[`seq -s , ${{ github.event.inputs.y }}`]"
58 |
59 | enter-matrix:
60 | name: "Render Matrix Cell"
61 | runs-on: custom-runner
62 | needs: [generate-matrix]
63 |
64 | strategy:
65 | fail-fast: false
66 | matrix:
67 | x: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
68 | y: [1,2,3,4,5,6,7,8,9,10,11,12]
69 |
70 | steps:
71 | - name: checkout
72 | uses: actions/checkout@v2
73 |
74 | - name: Stream pixels into DB
75 | env:
76 | DATABASE_URL: ${{secrets.DATABASE_URL}}
77 | run: python3 render-matrix-cell.py --max-x='${{github.event.inputs.pixel-x}}' --max-y='${{github.event.inputs.pixel-y}}' --job-x='${{matrix.x}}' --job-y='${{matrix.y}}' --max-job-x='${{github.event.inputs.x}}' --max-job-y='${{github.event.inputs.y}}' --duration='${{github.event.inputs.duration}}' --environment='${{github.actor}}' --image-file='${{ github.event.inputs.image_url }}' --repetitions '${{github.event.inputs.repetitions}}' --connections '${{github.event.inputs.connections-per-cell}}' --repetition-delay '${{github.event.inputs.repetition-delay}}'
78 |
--------------------------------------------------------------------------------
/.github/workflows/matrix-build-custom-runner.yml:
--------------------------------------------------------------------------------
1 | name: Matrix Build Custom Runner
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: "images/matrix-finished.png"
10 | duration:
11 | description: 'Number of job duration in ms'
12 | required: true
13 | default: '500'
14 | x:
15 | description: 'Number of cells on x-Axis'
16 | required: true
17 | default: '16'
18 | y:
19 | description: 'Number of cells on y-Axis'
20 | required: true
21 | default: '12'
22 | pixel-x:
23 | description: 'Number of pixels on x-Axis'
24 | required: true
25 | default: '800'
26 | pixel-y:
27 | description: 'Number of pixels on y-Axis'
28 | required: true
29 | default: '600'
30 | repetitions:
31 | description: 'Number of matrix cell render cycles'
32 | required: true
33 | default: '1'
34 | repetition-delay:
35 | description: 'Wait time in ms between render cycles'
36 | required: true
37 | default: '5000'
38 | connections-per-cell:
39 | description: 'DB connections per matrix cell'
40 | required: true
41 | default: '1'
42 |
43 |
44 | jobs:
45 |
46 | generate-matrix:
47 | name: "Generate matrix job specs"
48 | outputs:
49 | x: ${{ steps.generate-matrix.outputs.x }}
50 | y: ${{ steps.generate-matrix.outputs.y }}
51 | runs-on: custom-runner
52 | steps:
53 | - name: generate-matrix
54 | id: generate-matrix
55 | run: |
56 | echo "::set-output name=x::[`seq -s , ${{ github.event.inputs.x }}`]"
57 | echo "::set-output name=y::[`seq -s , ${{ github.event.inputs.y }}`]"
58 |
59 | enter-matrix:
60 | name: "Render Matrix Cell"
61 | runs-on: custom-runner
62 | needs: [generate-matrix]
63 |
64 | strategy:
65 | fail-fast: false
66 | matrix:
67 | x: ${{ fromJson(needs.generate-matrix.outputs.x) }}
68 | y: ${{ fromJson(needs.generate-matrix.outputs.y) }}
69 |
70 | steps:
71 | - name: checkout
72 | uses: actions/checkout@v2
73 |
74 | - name: Stream pixels into DB
75 | env:
76 | DATABASE_URL: ${{secrets.DATABASE_URL}}
77 | run: python3 render-matrix-cell.py --max-x='${{github.event.inputs.pixel-x}}' --max-y='${{github.event.inputs.pixel-y}}' --job-x='${{matrix.x}}' --job-y='${{matrix.y}}' --max-job-x='${{github.event.inputs.x}}' --max-job-y='${{github.event.inputs.y}}' --duration='${{github.event.inputs.duration}}' --environment='${{github.actor}}' --image-file='${{ github.event.inputs.image_url }}' --repetitions '${{github.event.inputs.repetitions}}' --connections '${{github.event.inputs.connections-per-cell}}' --repetition-delay '${{github.event.inputs.repetition-delay}}'
78 |
--------------------------------------------------------------------------------
/.github/workflows/matrix-build-hosted-runner.yml:
--------------------------------------------------------------------------------
1 | name: Matrix Build Hosted Runner
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: "images/matrix-finished.png"
10 | duration:
11 | description: 'Number of job duration in ms'
12 | required: true
13 | default: '500'
14 | x:
15 | description: 'Number of cells on x-Axis'
16 | required: true
17 | default: '16'
18 | y:
19 | description: 'Number of cells on y-Axis'
20 | required: true
21 | default: '12'
22 | pixel-x:
23 | description: 'Number of pixels on x-Axis'
24 | required: true
25 | default: '800'
26 | pixel-y:
27 | description: 'Number of pixels on y-Axis'
28 | required: true
29 | default: '600'
30 | repetitions:
31 | description: 'Number of matrix cell render cycles'
32 | required: true
33 | default: '1'
34 | repetition-delay:
35 | description: 'Wait time in ms between render cycles'
36 | required: true
37 | default: '30000'
38 | connections-per-cell:
39 | description: 'DB connections per matrix cell'
40 | required: true
41 | default: '1'
42 |
43 | jobs:
44 |
45 | enter-matrix:
46 | name: "Render Matrix Cell"
47 | runs-on: ubuntu-latest
48 | container:
49 | image: ghcr.io/jonico/actions-runner:ps
50 | options: --user root
51 |
52 | strategy:
53 | fail-fast: false
54 | #max-parallel: 16
55 | matrix:
56 | x: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
57 | y: [1,2,3,4,5,6,7,8,9,10,11,12]
58 |
59 | steps:
60 | - name: checkout
61 | uses: actions/checkout@v2
62 |
63 | - name: Stream pixels into PlanetScale DB
64 | env:
65 | DATABASE_URL: ${{secrets.DATABASE_URL}}
66 | run: python3 render-matrix-cell.py --max-x='${{github.event.inputs.pixel-x}}' --max-y='${{github.event.inputs.pixel-y}}' --job-x='${{matrix.x}}' --job-y='${{matrix.y}}' --max-job-x='${{github.event.inputs.x}}' --max-job-y='${{github.event.inputs.y}}' --duration='${{github.event.inputs.duration}}' --environment='${{github.actor}}' --image-file='${{ github.event.inputs.image_url }}' --repetitions '${{github.event.inputs.repetitions}}' --connections '${{github.event.inputs.connections-per-cell}}' --repetition-delay '${{github.event.inputs.repetition-delay}}'
67 |
--------------------------------------------------------------------------------
/.github/workflows/merge-latest-open-deploy-request.yml:
--------------------------------------------------------------------------------
1 | name: 03 - Merge latest Deploy Request
2 |
3 | on:
4 | workflow_dispatch
5 |
6 | env:
7 | pscale_base_directory: .pscale
8 |
9 | jobs:
10 |
11 | merge-latest-open-deploy-request:
12 | name: "Merge - click here"
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: checkout
17 | uses: actions/checkout@v2
18 |
19 | - name: Merge request - if asked, please click on displayed link to authenticate
20 | timeout-minutes: 10
21 | env:
22 | PLANETSCALE_SERVICE_TOKEN_NAME: ${{secrets.PLANETSCALE_SERVICE_TOKEN_NAME}}
23 | PLANETSCALE_SERVICE_TOKEN: ${{secrets.PLANETSCALE_SERVICE_TOKEN}}
24 | ORG_NAME: ${{secrets.ORG_NAME}}
25 | DB_NAME: ${{secrets.DB_NAME}}
26 | GITHUB_USER: ${{github.actor}}
27 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
28 | run: ./merge-latest-open-deploy-request.sh
29 | - name: Please check out the result of your merge request from step above
30 | run: |
31 | echo "Please check out the result of your merge request from step above"
32 | sleep 10
33 |
--------------------------------------------------------------------------------
/.github/workflows/remove-database.yml:
--------------------------------------------------------------------------------
1 | name: XX - Remove Database
2 |
3 | on:
4 | workflow_dispatch
5 |
6 | env:
7 | pscale_base_directory: .pscale
8 |
9 | jobs:
10 |
11 | remove-database:
12 | name: "Remove database - click here"
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: checkout
17 | uses: actions/checkout@v2
18 |
19 | - name: Remove database - please click on displayed link to authenticate
20 | timeout-minutes: 3
21 | env:
22 | PLANETSCALE_SERVICE_TOKEN_NAME: ${{secrets.PLANETSCALE_SERVICE_TOKEN_NAME}}
23 | ORG_NAME: ${{secrets.ORG_NAME}}
24 | DB_NAME: ${{secrets.DB_NAME}}
25 | GITHUB_USER: ${{github.actor}}
26 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
27 | run: ./remove-database.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/remove-operation-column-and-index.yml:
--------------------------------------------------------------------------------
1 | name: 04 - Del Operation Column & Index
2 |
3 | on:
4 | workflow_dispatch
5 |
6 | env:
7 | pscale_base_directory: .pscale
8 |
9 | jobs:
10 |
11 | remove-operation-column-and-index:
12 | name: "Remove operation - click here"
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: checkout
17 | uses: actions/checkout@v2
18 |
19 | - name: Remove operation - if asked, please click on displayed link to authenticate
20 | timeout-minutes: 3
21 | env:
22 | PLANETSCALE_SERVICE_TOKEN_NAME: ${{secrets.PLANETSCALE_SERVICE_TOKEN_NAME}}
23 | PLANETSCALE_SERVICE_TOKEN: ${{secrets.PLANETSCALE_SERVICE_TOKEN}}
24 | ORG_NAME: ${{secrets.ORG_NAME}}
25 | DB_NAME: ${{secrets.DB_NAME}}
26 | GITHUB_USER: ${{github.actor}}
27 | working-directory: ${{env.pscale_base_directory}}/cli-helper-scripts/
28 | run: ./remove-operation-column-and-index.sh
29 | - name: Please check out deployment request and branch connection string from step above
30 | run: |
31 | echo "Please check out deployment request and branch connection string from step above."
32 | sleep 10
33 |
--------------------------------------------------------------------------------
/.github/workflows/show-node-allocation.yaml:
--------------------------------------------------------------------------------
1 | name: Show node allocation
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | cluster:
7 | description: 'Cluster name (raspberry-pi-3b or gcp)'
8 | required: true
9 | default: 'raspberry-pi-3b'
10 | environment:
11 | description: 'Environment'
12 | required: true
13 | default: 'link'
14 |
15 | jobs:
16 | show-node-allocation:
17 |
18 | name: "Show node allocation for selected cloud"
19 | runs-on: [ raspberry-pi-3b ]
20 | steps:
21 | - name: Debug
22 | uses: actions/github-script@v3
23 | with:
24 | script: console.log(JSON.stringify(github, null, 2));
25 | - name: checkout
26 | uses: actions/checkout@v2
27 | - name: Show Raspi cluster
28 | if: github.event.inputs.cluster == 'raspberry-pi-3b'
29 | env:
30 | KUBECONFIG: "/home/pirate/kubeconfig"
31 | ENVIRONMENT: ${{ github.event.inputs.environment }}
32 | run: ./show-node-allocation.py --led-rows=32 --length=8 --height=8 --led-cols=64 --led-brightness=40 --namespace="github-actions-runner-${ENVIRONMENT}" node64-1 node64-2
33 |
34 | - name: Set up Cloud SDK
35 | uses: google-github-actions/setup-gcloud@master
36 | if: github.event.inputs.cluster == 'gcp'
37 | with:
38 | project_id: ${{ secrets.PROJECT_ID }}
39 | service_account_key: ${{ secrets.GCP_SA_KEY }}
40 | export_default_credentials: true
41 |
42 |
43 | - id: get-credentials
44 | if: github.event.inputs.cluster == 'gcp'
45 | uses: google-github-actions/get-gke-credentials@main
46 | with:
47 | cluster_name: ${{secrets.CLUSTER_NAME}}
48 | location: ${{secrets.CLUSTER_LOCATION}}
49 |
50 | - name: Show GCP cluster
51 | if: github.event.inputs.cluster == 'gcp'
52 | env:
53 | ENVIRONMENT: ${{ github.event.inputs.environment }}
54 | run: ./show-node-allocation.py --led-rows=32 --length=4 --height=4 --led-cols=64 --led-brightness=40 --namespace="github-actions-runner-${ENVIRONMENT}" gke-gh-runner-terraform-e-runner-pool-d7c9a363-756k gke-gh-runner-terraform-e-runner-pool-d7c9a363-3hg1 gke-gh-runner-terraform-e-runner-pool-d7c9a363-g069 gke-gh-runner-terraform-e-runner-pool-d7c9a363-q1ng
55 |
--------------------------------------------------------------------------------
/.github/workflows/streampixels.yaml:
--------------------------------------------------------------------------------
1 | name: Stream pixels
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | environment:
7 | description: 'Environment'
8 | required: true
9 | default: 'foobar'
10 |
11 | jobs:
12 | stream-pixels:
13 | name: "Stream pixels sent to Redis to LED matrix"
14 | runs-on: [ raspberry-pi-3b ]
15 | steps:
16 | - name: checkout
17 | uses: actions/checkout@v2
18 |
19 | - name: Stream pixels
20 | env:
21 | ENVIRONMENT: ${{ github.event.inputs.environment }}
22 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
23 | run: |
24 | # kubectl port-forward --namespace redis svc/redis-master 6379:6379 &
25 | # sleep 10
26 | python3 stream-pixels.py --led-rows=32 --led-cols=64 --led-brightness=40 --max-x=64 --max-y=32 --sleep-interval=100 --environment ${{ github.event.inputs.environment }} --redis-host=35.245.139.140
27 |
--------------------------------------------------------------------------------
/.github/workflows/visualize-matrix-build-gui.yaml:
--------------------------------------------------------------------------------
1 | name: Visualize matrix build GUI
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: matrix-finished.png
10 | reset_image_url:
11 | description: Reset Image URL
12 | required: true
13 | default: matrix-start.png
14 | duration:
15 | description: 'Number of job duration in ms'
16 | required: true
17 | default: '1000'
18 | x:
19 | description: 'Number of jobs on x-Axis'
20 | required: true
21 | default: '8'
22 | y:
23 | description: 'Number of jobs on y-Axis'
24 | required: true
25 | default: '6'
26 | environment:
27 | description: 'environment'
28 | required: true
29 | default: 'foobar'
30 | redis_host:
31 | description: 'REDIS HOST'
32 | required: true
33 | default: '35.245.139.140'
34 |
35 | jobs:
36 | reset-led:
37 | name: "Reset Matrix"
38 | runs-on: ${{ github.event.inputs.environment }}
39 | steps:
40 | - name: checkout
41 | uses: actions/checkout@v2
42 |
43 | - name: Reset image
44 | env:
45 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
46 | REDIS_HOST: ${{ github.event.inputs.redis_host }}
47 | run: |
48 | python3 publish-pixels.py --max-x=800 --max-y=600 --job-x=0 --job-y=0 --image-file='${{ github.event.inputs.reset_image_url }}' --redis-host="$REDIS_HOST" --environment='${{github.event.inputs.environment}}'
49 | working-directory: ./images
50 | generate-matrix:
51 | name: "Generate matrix job specs"
52 | outputs:
53 | x: ${{ steps.generate-matrix.outputs.x }}
54 | y: ${{ steps.generate-matrix.outputs.y }}
55 | runs-on: ${{ github.event.inputs.environment }}
56 | steps:
57 | - name: generate-matrix
58 | id: generate-matrix
59 | run: |
60 | echo "::set-output name=x::[`seq -s , ${{ github.event.inputs.x }}`]"
61 | echo "::set-output name=y::[`seq -s , ${{ github.event.inputs.y }}`]"
62 |
63 | matrix:
64 | name: "Matrix job"
65 | runs-on: ${{ github.event.inputs.environment }}
66 | needs: [generate-matrix]
67 |
68 | strategy:
69 | fail-fast: false
70 | max-parallel: 50
71 | matrix:
72 | x: ${{ fromJson(needs.generate-matrix.outputs.x) }}
73 | y: ${{ fromJson(needs.generate-matrix.outputs.y) }}
74 |
75 | steps:
76 | - name: checkout
77 | uses: actions/checkout@v2
78 |
79 | - name: Perform job
80 | env:
81 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
82 | REDIS_HOST: ${{ github.event.inputs.redis_host }}
83 | run: |
84 | python3 visualize-matrix-build.py --max-x=800 --max-y=600 --job-x='${{matrix.x}}' --job-y='${{matrix.y}}' --max-job-x='${{github.event.inputs.x}}' --max-job-y='${{github.event.inputs.y}}' --duration='${{github.event.inputs.duration}}' --environment='${{github.event.inputs.environment}}' --image-file='${{ github.event.inputs.image_url }}' --redis-host="$REDIS_HOST"
85 | working-directory: ./images
86 |
--------------------------------------------------------------------------------
/.github/workflows/visualize-matrix-build-led.yml:
--------------------------------------------------------------------------------
1 | name: Visualize matrix build LEDs
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: hubot.png
10 | reset_image_url:
11 | description: Reset Image URL
12 | required: true
13 | default: monahubot.png
14 | x:
15 | description: 'Number of jobs on x-Axis'
16 | required: true
17 | default: '8'
18 | y:
19 | description: 'Number of jobs on y-Axis'
20 | required: true
21 | default: '4'
22 | duration:
23 | description: 'Number of job duration in ms'
24 | required: true
25 | default: '5000'
26 | environment:
27 | description: 'environment'
28 | required: true
29 | default: 'foobar'
30 | redis_host:
31 | description: 'REDIS HOST'
32 | required: true
33 | default: '35.245.139.140'
34 |
35 | jobs:
36 | reset-led:
37 | name: "Reset LEDs"
38 | runs-on: ${{ github.event.inputs.environment }}
39 | steps:
40 | - name: checkout
41 | uses: actions/checkout@v2
42 |
43 | - name: Reset image
44 | env:
45 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
46 | REDIS_HOST: ${{ github.event.inputs.redis_host }}
47 | run: |
48 | python3 publish-pixels.py --max-x=64 --max-y=32 --job-x=0 --job-y=0 --image-file='${{ github.event.inputs.reset_image_url }}' --redis-host="$REDIS_HOST" --environment='${{github.event.inputs.environment}}'
49 | working-directory: ./images
50 |
51 | generate-matrix:
52 | name: "Generate matrix job specs"
53 | outputs:
54 | x: ${{ steps.generate-matrix.outputs.x }}
55 | y: ${{ steps.generate-matrix.outputs.y }}
56 | runs-on: ${{ github.event.inputs.environment }}
57 | steps:
58 | - name: generate-matrix
59 | id: generate-matrix
60 | run: |
61 | echo "::set-output name=x::[`seq -s , ${{ github.event.inputs.x }}`]"
62 | echo "::set-output name=y::[`seq -s , ${{ github.event.inputs.y }}`]"
63 |
64 | matrix:
65 | name: "Matrix job"
66 | runs-on: ${{ github.event.inputs.environment }}
67 | needs: [generate-matrix] # faster, although risky
68 | #needs: [generate-matrix,reset-led]
69 |
70 | strategy:
71 | fail-fast: false
72 | matrix:
73 | x: ${{ fromJson(needs.generate-matrix.outputs.x) }}
74 | y: ${{ fromJson(needs.generate-matrix.outputs.y) }}
75 |
76 | steps:
77 | - name: checkout
78 | uses: actions/checkout@v2
79 |
80 | - name: Perform job
81 | env:
82 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
83 | REDIS_HOST: ${{ github.event.inputs.redis_host }}
84 | run: |
85 | python3 visualize-matrix-build.py --max-x=64 --max-y=32 --job-x='${{matrix.x}}' --job-y='${{matrix.y}}' --max-job-x='${{github.event.inputs.x}}' --max-job-y='${{github.event.inputs.y}}' --duration='${{github.event.inputs.duration}}' --environment='${{github.event.inputs.environment}}' --image-file='${{ github.event.inputs.image_url }}' --redis-host="$REDIS_HOST"
86 | working-directory: ./images
87 |
--------------------------------------------------------------------------------
/.github/workflows/visualize-matrix-build-nektos.yml:
--------------------------------------------------------------------------------
1 | name: Visualize matrix build Nektos
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | image_url:
7 | description: Image URL
8 | required: true
9 | default: matrix-finished.png
10 | reset_image_url:
11 | description: Reset Image URL
12 | required: true
13 | default: matrix-start.png
14 | duration:
15 | description: 'Number of job duration in ms'
16 | required: true
17 | default: '1000'
18 | environment:
19 | description: 'environment'
20 | required: true
21 | default: 'foobar'
22 | redis_host:
23 | description: 'REDIS HOST'
24 | required: true
25 | default: '35.245.139.140'
26 | jobs:
27 | reset-matrix-redis:
28 | name: "Reset Matrix Redis"
29 | runs-on: ${{ github.event.inputs.environment }}
30 | steps:
31 | - name: checkout
32 | uses: actions/checkout@v2
33 |
34 | - name: Reset image
35 | env:
36 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
37 | REDIS_HOST: ${{ github.event.inputs.redis_host }}
38 | run: |
39 | python3 publish-pixels.py --max-x=800 --max-y=600 --job-x=0 --job-y=0 --image-file='${{ github.event.inputs.reset_image_url }}' --redis-host="$REDIS_HOST" --environment='${{github.event.inputs.environment}}'
40 | working-directory: ./images
41 |
42 | matrix:
43 | name: "Matrix job"
44 | runs-on: ${{ github.event.inputs.environment }}
45 | needs: [reset-matrix-redis]
46 |
47 | strategy:
48 | fail-fast: false
49 | matrix:
50 | x: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
51 | y: [1,2,3,4,5,6,7,8,9,10,11,12]
52 |
53 | steps:
54 | - name: checkout
55 | uses: actions/checkout@v2
56 |
57 | - name: Perform job
58 | env:
59 | REDIS_PASSWORD: ${{secrets.REDIS_PASSWORD}}
60 | REDIS_HOST: ${{ github.event.inputs.redis_host }}
61 | run: |
62 | sleep $((RANDOM%5+5))
63 | python3 visualize-matrix-build.py --max-x=800 --max-y=600 --job-x='${{matrix.x}}' --job-y='${{matrix.y}}' --max-job-x='${{github.event.inputs.x}}' --max-job-y='${{github.event.inputs.y}}' --duration='${{github.event.inputs.duration}}' --environment='${{github.event.inputs.environment}}' --image-file='${{ github.event.inputs.image_url }}' --redis-host="$REDIS_HOST"
64 | working-directory: ./images
65 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .venv/*
2 | _actions/*
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/add-operation-column-and-index.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | BRANCH_NAME=${BRANCH_NAME:-"add-operation-column-and-index"}
4 | DDL_STATEMENTS="alter table pixel_matrix add column operation varchar(10) default NULL; create index environment_operation on pixel_matrix(environment, operation);"
5 |
6 | ./create-db-branch-dr-and-connection.sh "$BRANCH_NAME" "$DDL_STATEMENTS"
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/approve-deploy-request.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | . use-pscale-docker-image.sh
4 |
5 | . authenticate-ps.sh
6 |
7 | DEPLOY_REQUEST_NUMBER="$1"
8 | COMMENT="$2"
9 | # escape whitespaces in comment with no-break space
10 | COMMENT="$(echo "$COMMENT" | sed -e 's/ /\ /g')"
11 |
12 | . set-db-and-org-and-branch-name.sh
13 | pscale deploy-request review "$DB_NAME" "$DEPLOY_REQUEST_NUMBER" --approve --comment "$COMMENT" --org "$ORG_NAME"
14 |
15 |
16 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/authenticate-ps.sh:
--------------------------------------------------------------------------------
1 | # if PLANETSCALE_SERVICE_TOKEN is not set, use pscale auth login
2 | if [ -z "$PLANETSCALE_SERVICE_TOKEN" ]; then
3 | echo "Going to authenticate PlanetScale CLI, please follow the link displayed in your browser and confirm ..."
4 | pscale auth login
5 | # if command failed, exit
6 | if [ $? -ne 0 ]; then
7 | echo "pscale auth login failed, please try again"
8 | exit 1
9 | fi
10 | fi
11 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/create-branch-connection-string.sh:
--------------------------------------------------------------------------------
1 | function create-branch-connection-string {
2 | local DB_NAME=$1
3 | local BRANCH_NAME=$2
4 | local ORG_NAME=$3
5 | local CREDS=${4,,}
6 | local secretshare=$5
7 |
8 | # delete password if it already existed
9 | # first, list password if it exists
10 | local raw_output=`pscale password list "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME" --format json `
11 | # check return code, if not 0 then error
12 | if [ $? -ne 0 ]; then
13 | echo "Error: pscale password list returned non-zero exit code $?: $raw_output"
14 | exit 1
15 | fi
16 |
17 | local output=`echo $raw_output | jq -r "[.[] | select(.display_name == \"$CREDS\") ] | .[0].id "`
18 | # if output is not "null", then password exists, delete it
19 | if [ "$output" != "null" ]; then
20 | echo "Deleting existing password $output"
21 | pscale password delete --force "$DB_NAME" "$BRANCH_NAME" "$output" --org "$ORG_NAME"
22 | # check return code, if not 0 then error
23 | if [ $? -ne 0 ]; then
24 | echo "Error: pscale password delete returned non-zero exit code $?"
25 | exit 1
26 | fi
27 | fi
28 |
29 | local raw_output=`pscale password create "$DB_NAME" "$BRANCH_NAME" "$CREDS" --org "$ORG_NAME" --format json`
30 |
31 | if [ $? -ne 0 ]; then
32 | echo "Failed to create credentials for database $DB_NAME branch $BRANCH_NAME: $raw_output"
33 | exit 1
34 | fi
35 |
36 | local DB_URL=`echo "$raw_output" | jq -r ". | \"mysql://\" + .id + \":\" + .plain_text + \"@\" + .database_branch.access_host_url + \"/\""`
37 | local GENERAL_CONNECTION_STRING=`echo "$raw_output" | jq -r ". | .connection_strings.general"`
38 |
39 | read -r -d '' SECRET_TEXT <"
12 | exit 1
13 | fi
14 |
15 | create-deployment "$DB_NAME" "$ORG_NAME" "$1"
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/merge-latest-open-deploy-request.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | . use-pscale-docker-image.sh
4 | . authenticate-ps.sh
5 | . wait-for-deploy-request-merged.sh
6 | . set-db-and-org-and-branch-name.sh
7 | . ps-create-helper-functions.sh
8 |
9 |
10 | raw_output=`pscale deploy-request list "$DB_NAME" --org "$ORG_NAME" --format json`
11 | # check return code, if not 0 then error
12 | if [ $? -ne 0 ]; then
13 | echo "Error: pscale deploy-branch list returned non-zero exit code $?: $raw_output"
14 | exit 1
15 | fi
16 | output=`echo $raw_output | jq "[.[] | select(.state == \"open\") ] | .[0].number "`
17 |
18 | # test whether the output is a number
19 | if [[ $output =~ ^[0-9]+$ ]]; then
20 | create-deployment "$DB_NAME" "$ORG_NAME" "$output"
21 | else
22 | echo "No open deployment request found: $raw_output"
23 | exit 3
24 | fi
25 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/ps-create-helper-functions.sh:
--------------------------------------------------------------------------------
1 | function create-db-branch {
2 | local DB_NAME=$1
3 | local BRANCH_NAME=$2
4 | local ORG_NAME=$3
5 | local recreate_branch=$4
6 |
7 | # delete the branch if it already exists and recreate branch is set
8 | if [ -n "$recreate_branch" ]; then
9 | echo "Trying to delete branch $BRANCH_NAME if it already existed ..."
10 | pscale branch delete "$DB_NAME" "$BRANCH_NAME" --force --org "$ORG_NAME" 2>/dev/null
11 | fi
12 |
13 | pscale branch create "$DB_NAME" "$BRANCH_NAME" --region us-east --org "$ORG_NAME"
14 | # if branch creation fails, exit with error
15 | if [ $? -ne 0 ]; then
16 | echo "Failed to create branch $BRANCH_NAME for database $DB_NAME"
17 | exit 1
18 | fi
19 |
20 | wait_for_branch_readiness 10 "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" 20
21 | if [ $? -ne 0 ]; then
22 | echo "Branch $BRANCH_NAME is not ready"
23 | exit 1
24 | fi
25 |
26 | local branch_url="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/${BRANCH_NAME}"
27 | echo "Branch $BRANCH_NAME is ready at $branch_url"
28 | # if CI variable ist set, then set output variables
29 | if [ -n "$CI" ]; then
30 | echo "::set-output name=BRANCH_URL::$branch_url"
31 | fi
32 | }
33 |
34 | function create-schema-change {
35 | local DB_NAME=$1
36 | local BRANCH_NAME=$2
37 | local ORG_NAME=$3
38 | local DDL_STATEMENTS=$4
39 |
40 | echo "Changing schema with the following DDL statements:"
41 | echo $DDL_STATEMENTS
42 | echo "$DDL_STATEMENTS" | pscale shell "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
43 | if [ $? -ne 0 ]; then
44 | echo "Schema change in $BRANCH_NAME could not be created"
45 | exit 1
46 | fi
47 | }
48 |
49 |
50 | function create-deploy-request {
51 | local DB_NAME=$1
52 | local BRANCH_NAME=$2
53 | local ORG_NAME=$3
54 |
55 | local raw_output=`pscale deploy-request create "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME" --format json`
56 | if [ $? -ne 0 ]; then
57 | echo "Deploy request could not be created: $raw_output"
58 | exit 1
59 | fi
60 | local deploy_request_number=`echo $raw_output | jq -r '.number'`
61 | # if deploy request number is empty, then error
62 | if [ -z "$deploy_request_number" ]; then
63 | echo "Could not retrieve deploy request number: $raw_output"
64 | exit 1
65 | fi
66 |
67 | local deploy_request="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/deploy-requests/${deploy_request_number}"
68 | echo "Check out the deploy request created at $deploy_request"
69 | # if CI variable is set, export the deploy request URL
70 | if [ -n "$CI" ]; then
71 | echo "::set-output name=DEPLOY_REQUEST_URL::$deploy_request"
72 | echo "::set-output name=DEPLOY_REQUEST_NUMBER::$deploy_request_number"
73 | create-diff-for-ci "$DB_NAME" "$ORG_NAME" "$deploy_request_number" "$BRANCH_NAME"
74 | fi
75 | }
76 |
77 | function create-deploy-request-info {
78 | local DB_NAME=$1
79 | local ORG_NAME=$2
80 | local DEPLOY_REQUEST_NUMBER=$3
81 |
82 | local raw_output=`pscale deploy-request show "$DB_NAME" "$DEPLOY_REQUEST_NUMBER" --org "$ORG_NAME" --format json`
83 | if [ $? -ne 0 ]; then
84 | echo "Deploy request could not be retrieved: $raw_output"
85 | exit 1
86 | fi
87 | # extract the branch name from the deploy request
88 | local branch_name=`echo $raw_output | jq -r '.branch'`
89 |
90 | # check if the branch name is empty
91 | if [ -z "$branch_name" ]; then
92 | echo "Could not extract branch name from deploy request $DEPLOY_REQUEST_NUMBER"
93 | exit 1
94 | fi
95 |
96 | export BRANCH_NAME="$branch_name"
97 | local deploy_request="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/deploy-requests/${DEPLOY_REQUEST_NUMBER}"
98 |
99 | local branch_url="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/${BRANCH_NAME}"
100 | export BRANCH_URL="$branch_url"
101 |
102 | # if CI variable is set, export deployment request info
103 | if [ -n "$CI" ]; then
104 | echo "::set-output name=BRANCH_NAME::$branch_name"
105 | echo "::set-output name=DB_NAME::$DB_NAME"
106 | echo "::set-output name=ORG_NAME::$ORG_NAME"
107 | echo "::set-output name=DEPLOY_REQUEST_URL::$deploy_request"
108 | echo "::set-output name=DEPLOY_REQUEST_NUMBER::$DEPLOY_REQUEST_NUMBER"
109 | echo "::set-output name=BRANCH_URL::$branch_url"
110 | fi
111 | }
112 |
113 | function create-branch-info {
114 | local DB_NAME=$1
115 | local BRANCH_NAME=$2
116 | local ORG_NAME=$3
117 |
118 | local raw_output=`pscale branch show "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME" --format json`
119 | if [ $? -ne 0 ]; then
120 | echo "Branch could not be retrieved: $raw_output"
121 | exit 1
122 | fi
123 | # extract the branch name from the deploy request
124 | local branch_name=`echo $raw_output | jq -r '.name'`
125 |
126 | # check if the branch name is empty
127 | if [ -z "$branch_name" ]; then
128 | echo "Could not extract existing branch name from branch $BRANCH_NAME"
129 | exit 1
130 | fi
131 |
132 | export BRANCH_NAME="$branch_name"
133 |
134 | local branch_url="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/${BRANCH_NAME}"
135 | export BRANCH_URL="$branch_url"
136 |
137 | # if CI variable is set, export branch info
138 | if [ -n "$CI" ]; then
139 | echo "::set-output name=BRANCH_NAME::$branch_name"
140 | echo "::set-output name=DB_NAME::$DB_NAME"
141 | echo "::set-output name=ORG_NAME::$ORG_NAME"
142 | echo "::set-output name=BRANCH_URL::$branch_url"
143 | fi
144 | }
145 |
146 | function create-diff-for-ci {
147 | local DB_NAME=$1
148 | local ORG_NAME=$2
149 | local deploy_request_number=$3
150 | local BRANCH_NAME=$4
151 | local refresh_schema=$5
152 |
153 | local deploy_request="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/deploy-requests/${deploy_request_number}"
154 | local BRANCH_DIFF="Diff could not be generated for deploy request $deploy_request"
155 |
156 | # updating schema for branch
157 | if [ -n "$refresh_schema" ]; then
158 | pscale branch refresh-schema "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
159 | fi
160 |
161 | local lines=""
162 | # read shell output line by line and assign to variable
163 | while read -r line; do
164 | lines="$lines\n$line"
165 | done < <(pscale deploy-request diff "$DB_NAME" "$deploy_request_number" --org "$ORG_NAME" --format=json | jq .[].raw)
166 |
167 |
168 | if [ $? -ne 0 ]; then
169 | BRANCH_DIFF="$BRANCH_DIFF : ${lines}"
170 | else
171 | BRANCH_DIFF=$lines
172 | fi
173 |
174 | if [ -n "$CI" ]; then
175 | BRANCH_DIFF="${BRANCH_DIFF//'"'/''}"
176 | BRANCH_DIFF="${BRANCH_DIFF//'%'/'%25'}"
177 | BRANCH_DIFF="${BRANCH_DIFF//'\n'/'%0A'}"
178 | BRANCH_DIFF="${BRANCH_DIFF//'\r'/'%0D'}"
179 | echo "::set-output name=BRANCH_DIFF::$BRANCH_DIFF"
180 | fi
181 | }
182 |
183 | function create-deployment {
184 | local DB_NAME=$1
185 | local ORG_NAME=$2
186 | local deploy_request_number=$3
187 |
188 | local deploy_request="https://app.planetscale.com/${ORG_NAME}/${DB_NAME}/deploy-requests/${deploy_request_number}"
189 | # if CI variable is set, export the deploy request parameters
190 | if [ -n "$CI" ]; then
191 | echo "::set-output name=DEPLOY_REQUEST_URL::$deploy_request"
192 | echo "::set-output name=DEPLOY_REQUEST_NUMBER::$deploy_request_number"
193 | fi
194 |
195 | echo "Going to deploy deployment request $deploy_request with the following changes: "
196 |
197 | pscale deploy-request diff "$DB_NAME" "$deploy_request_number" --org "$ORG_NAME"
198 | # only ask for user input if CI variabe is not set
199 | if [ -z "$CI" ]; then
200 | read -p "Do you want to deploy this deployment request? [y/N] " -n 1 -r
201 | echo
202 | if ! [[ $REPLY =~ ^[Yy]$ ]]; then
203 | echo "Deployment request $deploy_request_number was not deployed."
204 | exit 1
205 | fi
206 | else
207 | create-diff-for-ci "$DB_NAME" "$ORG_NAME" "$deploy_request_number" "$BRANCH_NAME"
208 | fi
209 |
210 | pscale deploy-request deploy "$DB_NAME" "$deploy_request_number" --org "$ORG_NAME"
211 | # check return code, if not 0 then error
212 | if [ $? -ne 0 ]; then
213 | echo "Error: pscale deploy-request deploy returned non-zero exit code"
214 | exit 1
215 | fi
216 |
217 | wait_for_deploy_request_merged 9 "$DB_NAME" "$deploy_request_number" "$ORG_NAME" 60
218 | if [ $? -ne 0 ]; then
219 | echo "Error: wait-for-deploy-request-merged returned non-zero exit code"
220 | echo "Check out the deploy request status at $deploy_request"
221 | exit 5
222 | else
223 | echo "Check out the deploy request merged at $deploy_request"
224 | fi
225 |
226 | }
227 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/ps-env-template.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export ORG_NAME="$ORG_NAME"
3 | echo "::set-output name=ORG_NAME::$ORG_NAME"
4 |
5 | export DB_NAME="$DB_NAME"
6 | echo "::set-output name=DB_NAME::$DB_NAME"
7 |
8 | export BRANCH_NAME="$BRANCH_NAME"
9 | echo "::set-output name=BRANCH_NAME::$BRANCH_NAME"
10 |
11 | export DEPLOY_REQUEST_NUMBER="$DEPLOY_REQUEST_NUMBER"
12 | echo "::set-output name=DEPLOY_REQUEST_NUMBER::$DEPLOY_REQUEST_NUMBER"
13 |
14 | export DEPLOY_REQUEST_URL="$DEPLOY_REQUEST_URL"
15 | echo "::set-output name=DEPLOY_REQUEST_URL::$DEPLOY_REQUEST_URL"
16 |
17 | export BRANCH_URL="$BRANCH_URL"
18 | echo "::set-output name=BRANCH_URL::$BRANCH_URL"
19 |
20 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/remove-database.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | BRANCH_NAME=${BRANCH_NAME:-"main"}
4 |
5 | . use-pscale-docker-image.sh
6 |
7 | # At the moment, service tokens do not allow DB deletions
8 | unset PLANETSCALE_SERVICE_TOKEN
9 | . authenticate-ps.sh
10 |
11 | . set-db-and-org-and-branch-name.sh
12 |
13 | pscale database delete --force "$DB_NAME" --org "$ORG_NAME"
14 | # check if DB deletion worked
15 | if [ $? -ne 0 ]; then
16 | echo "Failed to remove database $DB_NAME"
17 | exit 1
18 | fi
19 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/remove-operation-column-and-index.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | BRANCH_NAME=${BRANCH_NAME:-"remove-operation-column-and-index"}
4 | DDL_STATEMENTS="alter table pixel_matrix drop column operation; drop index environment_operation on pixel_matrix;"
5 |
6 | ./create-db-branch-dr-and-connection.sh "$BRANCH_NAME" "$DDL_STATEMENTS"
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/retrieve-branch-info.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | . use-pscale-docker-image.sh
4 |
5 | . authenticate-ps.sh
6 |
7 | DB_NAME="$1"
8 | ORG_NAME="$2"
9 | BRANCH_NAME="$3"
10 |
11 | . ps-create-helper-functions.sh
12 | create-branch-info "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME"
13 | create-deploy-request "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME"
14 |
15 | . create-branch-connection-string.sh
16 | create-branch-connection-string "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" "creds-${BRANCH_NAME}" "sharesecret"
17 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/retrieve-deploy-request-info.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | . use-pscale-docker-image.sh
4 |
5 | . authenticate-ps.sh
6 |
7 | DB_NAME="$1"
8 | ORG_NAME="$2"
9 | DEPLOY_REQUEST_NUMBER="$3"
10 |
11 | . ps-create-helper-functions.sh
12 | create-deploy-request-info "$DB_NAME" "$ORG_NAME" "$DEPLOY_REQUEST_NUMBER"
13 |
14 | create-diff-for-ci "$DB_NAME" "$ORG_NAME" "$DEPLOY_REQUEST_NUMBER" "$BRANCH_NAME" "update"
15 |
16 | . create-branch-connection-string.sh
17 | create-branch-connection-string "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" "creds-${BRANCH_NAME}" "sharesecret"
18 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/set-db-and-org-and-branch-name.sh:
--------------------------------------------------------------------------------
1 |
2 | # Set DB_NAME unless it is already set
3 | export DB_NAME=${DB_NAME:-matrix-demos-${GITHUB_USER}}
4 | echo "Using DB name ${DB_NAME}"
5 |
6 | # set org name to first org the user has access to unless it is already set in ORG_NAME
7 | if [ -z "${ORG_NAME}" ]; then
8 | export ORG_NAME=`pscale org list --format json | jq -r ".[0].name"`
9 | # check exit code
10 | if [ $? -ne 0 ]; then
11 | echo "Error: Failed to get PlanetScale org name"
12 | exit 1
13 | fi
14 | fi
15 | echo "Using org name ${ORG_NAME}"
16 |
17 | export BRANCH_NAME=${BRANCH_NAME:-"main"}
18 | echo "Using branch name ${BRANCH_NAME}"
19 |
20 | # if CI variable ist set
21 | if [ -n "$CI" ]; then
22 | echo "::set-output name=DB_NAME::$DB_NAME"
23 | echo "::set-output name=ORG_NAME::$ORG_NAME"
24 | echo "::set-output name=BRANCH_NAME::$BRANCH_NAME"
25 | fi
26 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/set-db-and-org-name.sh:
--------------------------------------------------------------------------------
1 |
2 | # Set DB_NAME unless it is already set
3 | export DB_NAME=${DB_NAME:-matrix-demos-${GITHUB_USER}}
4 | # Set org name unless it is already set
5 | export ORG_NAME=${ORG_NAME:-"planetscale-demo"}
6 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/set-db-url.sh:
--------------------------------------------------------------------------------
1 | if [ -z "$MY_DB_URL" ]; then
2 | echo "MY_DB_URL not set, using default database - this will potentially interfere with demos of other users using the same repo at the same time"
3 | else
4 | echo "MY_DB_URL set, using your personal database"
5 | export DATABASE_URL="$MY_DB_URL"
6 | fi
7 |
8 | # if DATABASE_URL is not set, exit
9 | if [ -z "$DATABASE_URL" ]; then
10 | echo "DATABASE_URL not set, exiting"
11 | exit 1
12 | fi
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/update-db-branch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | . use-pscale-docker-image.sh
4 |
5 | . authenticate-ps.sh
6 |
7 | BRANCH_NAME="$1"
8 | DDL_STATEMENTS="$2"
9 | DEPLOY_REQUEST_NUMBER="$3"
10 |
11 |
12 | . set-db-and-org-and-branch-name.sh
13 |
14 | . ps-create-helper-functions.sh
15 | create-schema-change "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" "$DDL_STATEMENTS"
16 | create-diff-for-ci "$DB_NAME" "$ORG_NAME" "$DEPLOY_REQUEST_NUMBER" "$BRANCH_NAME" "update"
17 |
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/use-pscale-docker-image.sh:
--------------------------------------------------------------------------------
1 | echo Using pscale CLI from latest docker image ...
2 | mkdir -p $HOME/.config/planetscale
3 |
4 | function pscale {
5 | local tty="-t"
6 | local non_interactive=""
7 | local command=""
8 |
9 | # if first arg equals shell, we have to turn off pseudo-tty and set PSCALE_ALLOW_NONINTERACTIVE_SHELL=true
10 | if [ "$1" = "shell" ]; then
11 | tty=""
12 | non_interactive="-e PSCALE_ALLOW_NONINTERACTIVE_SHELL=true"
13 | fi
14 |
15 | # if script is run in CI and it is not the auth command, we have to turn off pseudo-tty
16 | if [ -n "$CI" ] && [ "$1" != "auth" ]; then
17 | tty=""
18 | fi
19 |
20 | # if NO_DOCKER is set, we will use the natively installed commands
21 | if [ -n "$NO_DOCKER" ]; then
22 | command="pscale $@"
23 | else
24 | command="docker run -e PLANETSCALE_SERVICE_TOKEN=$PLANETSCALE_SERVICE_TOKEN -e PLANETSCALE_SERVICE_TOKEN_NAME=$PLANETSCALE_SERVICE_TOKEN_NAME -e HOME=/tmp -v $HOME/.config/planetscale:/tmp/.config/planetscale -e PSCALE_ALLOW_NONINTERACTIVE_SHELL=true --user $(id -u):$(id -g) --rm -i $tty planetscale/pscale:latest $@"
25 | fi
26 |
27 | # if command is auth and we are running in CI, we will use the script command to get a fake terminal
28 | if [ "$1" = "auth" ] && [ -n "$CI" ]; then
29 | echo "::notice ::Please visit the URL displayed in the line below in your browser to authenticate"
30 | command="script -q -f --return -c '$command' | perl -ne '\$| = 1; \$/ = \"\r\"; \$counter=0; while (<>) { \$url = \$1 if /(http.*)$/; print \"Please click on \" . \$url . \"\n\" if \$url && (\$counter++)%100==0; }'"
31 | eval $command
32 | else
33 | $command
34 | fi
35 |
36 | }
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/wait-for-branch-readiness.sh:
--------------------------------------------------------------------------------
1 | function wait_for_branch_readiness {
2 | local retries=$1
3 | local db=$2
4 | local branch=${3,,}
5 | local org=$4
6 |
7 | # check whether fifth parameter is set, otherwise use default value
8 | if [ -z "$5" ]; then
9 | local max_timeout=60
10 | else
11 | local max_timeout=$5
12 | fi
13 |
14 | local count=0
15 | local wait=1
16 |
17 | echo "Checking if branch $branch is ready for use..."
18 | while true; do
19 | local raw_output=`pscale branch list $db --org $org --format json`
20 | # check return code, if not 0 then error
21 | if [ $? -ne 0 ]; then
22 | echo "Error: pscale branch list returned non-zero exit code $?: $raw_output"
23 | return 1
24 | fi
25 | local output=`echo $raw_output | jq ".[] | select(.name == \"$branch\") | .ready"`
26 | # test whether output is false, if so, increase wait timeout exponentially
27 | if [ "$output" == "false" ]; then
28 | # increase wait variable exponentially but only if it is less than max_timeout
29 | if [ $((wait * 2)) -le $max_timeout ]; then
30 | wait=$((wait * 2))
31 | else
32 | wait=$max_timeout
33 | fi
34 |
35 | count=$((count+1))
36 | if [ $count -ge $retries ]; then
37 | echo "Branch $branch is not ready after $retries retries. Exiting..."
38 | return 2
39 | fi
40 | echo "Branch $branch is not ready yet. Retrying in $wait seconds..."
41 | sleep $wait
42 | elif [ "$output" == "true" ]; then
43 | echo "Branch $branch is ready for use."
44 | return 0
45 | else
46 | echo "Branch $branch in unknown status: $raw_output"
47 | return 3
48 | fi
49 | done
50 | }
--------------------------------------------------------------------------------
/.pscale/cli-helper-scripts/wait-for-deploy-request-merged.sh:
--------------------------------------------------------------------------------
1 | function wait_for_deploy_request_merged {
2 | local retries=$1
3 | local db=$2
4 | local number=$3
5 | local org=$4
6 |
7 | # check whether fifth parameter is set, otherwise use default value
8 | if [ -z "$5" ]; then
9 | local max_timeout=600
10 | else
11 | local max_timeout=$5
12 | fi
13 |
14 | local count=0
15 | local wait=1
16 |
17 | echo "Checking if deploy request $number is ready for use..."
18 | while true; do
19 | local raw_output=`pscale deploy-request list "$db" --org "$org" --format json`
20 | # check return code, if not 0 then error
21 | if [ $? -ne 0 ]; then
22 | echo "Error: pscale deploy-request list returned non-zero exit code $?: $raw_output"
23 | return 1
24 | fi
25 | local output=`echo $raw_output | jq ".[] | select(.number == $number) | .deployment.state"`
26 | # test whether output is pending, if so, increase wait timeout exponentially
27 | if [ "$output" = "\"pending\"" ] || [ "$output" = "\"in_progress\"" ]; then
28 | # increase wait variable exponentially but only if it is less than max_timeout
29 | if [ $((wait * 2)) -le $max_timeout ]; then
30 | wait=$((wait * 2))
31 | else
32 | wait=$max_timeout
33 | fi
34 |
35 | count=$((count+1))
36 | if [ $count -ge $retries ]; then
37 | echo "Deploy request $number is not ready after $retries retries. Exiting..."
38 | return 2
39 | fi
40 | echo "Deploy-request $number is not merged yet. Current status:"
41 | echo "show vitess_migrations\G" | pscale shell "$db" main --org "$org"
42 | echo "Retrying in $wait seconds..."
43 | sleep $wait
44 | elif [ "$output" = "\"complete\"" ]; then
45 | echo "Deploy-request $number has been merged successfully."
46 | return 0
47 | else
48 | echo "Deploy-request $number with unknown status: $output"
49 | return 3
50 | fi
51 | done
52 | }
--------------------------------------------------------------------------------
/Dockerfile.base:
--------------------------------------------------------------------------------
1 | # See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.192.0/containers/python-3/.devcontainer/base.Dockerfile
2 |
3 | # [Choice] Python version: 3, 3.9, 3.8, 3.7, 3.6
4 | ARG VARIANT="3.9"
5 | FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT}
6 |
7 | # [Choice] Node.js version: none, lts/*, 16, 14, 12, 10
8 | ARG NODE_VERSION="none"
9 | RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi
10 |
11 | # [Option] Install zsh
12 | ARG INSTALL_ZSH="true"
13 | # [Option] Upgrade OS packages to their latest versions
14 | ARG UPGRADE_PACKAGES="false"
15 | # [Option] Enable non-root Docker access in container
16 | ARG ENABLE_NONROOT_DOCKER="true"
17 | # [Option] Use the OSS Moby Engine instead of the licensed Docker Engine
18 | ARG USE_MOBY="true"
19 |
20 | # Install needed packages and setup non-root user. Use a separate RUN statement to add your
21 | # own dependencies. A user of "automatic" attempts to reuse an user ID if one already exists.
22 | ARG USERNAME=automatic
23 | ARG USER_UID=1000
24 | ARG USER_GID=$USER_UID
25 |
26 | # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
27 | COPY requirements.txt /tmp/pip-tmp/
28 | RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
29 | && rm -rf /tmp/pip-tmp
30 |
31 | # RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
32 | # && apt-get -y install --no-install-recommends
33 |
34 |
35 | # [Optional] Uncomment this line to install global node packages.
36 | # RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1
37 |
38 | RUN curl https://raw.githubusercontent.com/nektos/act/master/install.sh > /tmp/install-act.sh \
39 | && chmod a+x /tmp/install-act.sh \
40 | && /tmp/install-act.sh v0.2.20
41 |
42 | COPY library-scripts/*.sh /tmp/library-scripts/
43 |
44 |
45 | RUN apt-get update \
46 | && /bin/bash /tmp/library-scripts/common-debian.sh "${INSTALL_ZSH}" "${USERNAME}" "${USER_UID}" "${USER_GID}" "${UPGRADE_PACKAGES}" "true" "true" \
47 | && /bin/bash /tmp/library-scripts/desktop-lite-debian.sh \
48 | # Use Docker script from script library to set things up
49 | && /bin/bash /tmp/library-scripts/docker-in-docker-debian.sh "${ENABLE_NONROOT_DOCKER}" "${USERNAME}" "${USE_MOBY}" \
50 | && apt-get -y install --no-install-recommends mariadb-client \
51 | && wget https://github.com/planetscale/cli/releases/download/v0.77.0/pscale_0.77.0_linux_amd64.deb \
52 | && dpkg -i pscale_0.77.0_linux_amd64.deb \
53 | && apt-get update && export DEBIAN_FRONTEND=noninteractive \
54 | && curl -sSL https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -o /tmp/chrome.deb \
55 | && apt-get -y install /tmp/chrome.deb \
56 | && ALIASES="alias google-chrome='google-chrome --disable-dev-shm-usage'\nalias google-chrome-stable='google-chrome-stable --disable-dev-shm-usage'\n\alias x-www-browser='x-www-browser --disable-dev-shm-usage'\nalias gnome-www-browser='gnome-www-browser --disable-dev-shm-usage'" \
57 | && echo "${ALIASES}" >> tee -a /etc/bash.bashrc \
58 | && if type zsh > /dev/null 2>&1; then echo "${ALIASES}" >> /etc/zsh/zshrc; fi \
59 | # Clean up
60 | && rm pscale_0.77.0_linux_amd64.deb \
61 | && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* /tmp/library-scripts/
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # GitHub self-hosted runner matrix build visualizations for Kubernetes and Raspberry Pi
2 |
3 | [](https://github.com/jonico/awesome-runners)
4 |
5 | This repository contains various GitHub Action workflows to trigger Matrix builds on self-hosted GitHub Action runners and visualize their outputs and pod allocations on LED matrices connected to Raspberry PI. Similar visualization scripts are also available for desktop PCs.
6 |
7 | 
8 |
9 | 
10 |
11 | 
12 |
13 | 
14 |
15 | 
16 |
17 | [Watch a one minute video](https://user-images.githubusercontent.com/1872314/109437801-218af480-7a27-11eb-8968-acf12d6392a5.mp4) how runners spin up based on a GitHub matrix build, wtach their pods in Kubernetes and see how they stream their work to a widget.
18 |
19 | ... and [another movie](https://user-images.githubusercontent.com/1872314/109474179-8456ad00-7a74-11eb-9a77-033e5f70c66d.mp4) showing the LED matrix showing the output of the GitHub Actions runners in the Raspberry Pi Kubernetes connected to it.
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/__pycache__/constants.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/__pycache__/constants.cpython-39.pyc
--------------------------------------------------------------------------------
/actions-runner-controller-runner-deployment-ese.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | environment: ese
6 | name: github-actions-runner-ese
7 | ---
8 | apiVersion: actions.summerwind.dev/v1alpha1
9 | kind: HorizontalRunnerAutoscaler
10 | metadata:
11 | labels:
12 | environment: ese
13 | name: actions-runner-autoscaler-ese
14 | namespace: github-actions-runner-ese
15 | spec:
16 | maxReplicas: 16
17 | metrics:
18 | - scaleDownFactor: "0.5"
19 | scaleDownThreshold: "0.2"
20 | scaleUpFactor: "2"
21 | scaleUpThreshold: "0.5"
22 | type: PercentageRunnersBusy
23 | minReplicas: 4
24 | scaleDownDelaySecondsAfterScaleOut: 300
25 | scaleTargetRef:
26 | name: actions-runner-deployment-ese
27 | ---
28 | apiVersion: actions.summerwind.dev/v1alpha1
29 | kind: RunnerDeployment
30 | metadata:
31 | labels:
32 | environment: ese
33 | name: actions-runner-deployment-ese
34 | namespace: github-actions-runner-ese
35 | spec:
36 | template:
37 | spec:
38 | dockerEnabled: false
39 | group: raspberry
40 | image: ghcr.io/jonico/actions-runner:ese
41 | labels:
42 | - ese
43 | - kubernetes
44 | - custom-runner
45 | repository: jonico/visualize-actions-matrix-builds-on-k8s
46 | resources:
47 | limits:
48 | cpu: 1000m
49 | memory: 1Gi
50 | requests:
51 | cpu: 50m
52 | memory: 128Mi
53 | tolerations:
54 | - effect: NoExecute
55 | key: node.kubernetes.io/unreachable
56 | operator: Exists
57 | tolerationSeconds: 10
58 |
--------------------------------------------------------------------------------
/advanced-schema-stream-parameters.json:
--------------------------------------------------------------------------------
1 | {
2 | "inputs": {
3 | "image_url": "images/non-blocking-schema-change-text.png",
4 | "reset_image_url" : "images/ps-start.png",
5 | "pixel-x": "800",
6 | "pixel-y": "600",
7 | "x": "16",
8 | "y": "12",
9 | "repetitions": "2",
10 | "repetition-delay": "60000",
11 | "duration": "1000",
12 | "connections-per-cell": "10"
13 | }
14 | }
--------------------------------------------------------------------------------
/connect-to-kubernetes-via-act.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | act workflow_dispatch -W .github/workflows/connect-to-gcp.yaml -s CLUSTER_NAME=$CLUSTER_NAME -s CLUSTER_LOCATION=$CLUSTER_LOCATION -s PROJECT_ID -s GCP_SA_KEY="$GCP_SA_KEY" -b -r
4 |
--------------------------------------------------------------------------------
/constants.py:
--------------------------------------------------------------------------------
1 | PIN="PIN"
2 | OVERLAY="OVERLAY"
3 | REPLACE="REPLACE"
--------------------------------------------------------------------------------
/events-nektos.json:
--------------------------------------------------------------------------------
1 | {
2 | "enterprise": {
3 | "avatar_url": "https://avatars.octodemo.com/b/1?",
4 | "created_at": "2018-11-11T19:08:55Z",
5 | "description": "This is GitHub",
6 | "html_url": "https://octodemo.com/enterprises/github",
7 | "id": 1,
8 | "name": "GitHub",
9 | "node_id": "MDEwOkVudGVycHJpc2Ux",
10 | "slug": "github",
11 | "updated_at": "2018-11-12T12:10:14Z",
12 | "website_url": "https://github.com"
13 | },
14 | "inputs": {
15 | "cluster": "gcp",
16 | "environment": "foobar",
17 | "image_url": "matrix-finished.png",
18 | "reset_image_url" : "matrix-start.png",
19 | "x": "16",
20 | "y": "12",
21 | "duration": "5000",
22 | "redis_host": "35.245.139.140"
23 | },
24 | "organization": {
25 | "avatar_url": "https://avatars.octodemo.com/u/286?",
26 | "description": "Hyrule - The magic kingdom",
27 | "events_url": "https://octodemo.com/api/v3/orgs/Hyrule/events",
28 | "hooks_url": "https://octodemo.com/api/v3/orgs/Hyrule/hooks",
29 | "id": 286,
30 | "issues_url": "https://octodemo.com/api/v3/orgs/Hyrule/issues",
31 | "login": "Hyrule",
32 | "members_url": "https://octodemo.com/api/v3/orgs/Hyrule/members{/member}",
33 | "node_id": "MDEyOk9yZ2FuaXphdGlvbjI4Ng==",
34 | "public_members_url": "https://octodemo.com/api/v3/orgs/Hyrule/public_members{/member}",
35 | "repos_url": "https://octodemo.com/api/v3/orgs/Hyrule/repos",
36 | "url": "https://octodemo.com/api/v3/orgs/Hyrule"
37 | },
38 | "ref": "refs/heads/master",
39 | "repository": {
40 | "archive_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/{archive_format}{/ref}",
41 | "archived": false,
42 | "assignees_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/assignees{/user}",
43 | "blobs_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/git/blobs{/sha}",
44 | "branches_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/branches{/branch}",
45 | "clone_url": "https://octodemo.com/Hyrule/raspberry-pi-actions.git",
46 | "collaborators_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/collaborators{/collaborator}",
47 | "comments_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/comments{/number}",
48 | "commits_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/commits{/sha}",
49 | "compare_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/compare/{base}...{head}",
50 | "contents_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/contents/{+path}",
51 | "contributors_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/contributors",
52 | "created_at": "2021-02-15T16:22:55Z",
53 | "default_branch": "master",
54 | "deployments_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/deployments",
55 | "description": "Actions for jonico's Raspberry PI",
56 | "disabled": false,
57 | "downloads_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/downloads",
58 | "events_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/events",
59 | "fork": false,
60 | "forks": 0,
61 | "forks_count": 0,
62 | "forks_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/forks",
63 | "full_name": "Hyrule/raspberry-pi-actions",
64 | "git_commits_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/git/commits{/sha}",
65 | "git_refs_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/git/refs{/sha}",
66 | "git_tags_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/git/tags{/sha}",
67 | "git_url": "git://octodemo.com/Hyrule/raspberry-pi-actions.git",
68 | "has_downloads": true,
69 | "has_issues": true,
70 | "has_pages": false,
71 | "has_projects": true,
72 | "has_wiki": true,
73 | "homepage": null,
74 | "hooks_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/hooks",
75 | "html_url": "https://octodemo.com/Hyrule/raspberry-pi-actions",
76 | "id": 2276,
77 | "issue_comment_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/issues/comments{/number}",
78 | "issue_events_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/issues/events{/number}",
79 | "issues_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/issues{/number}",
80 | "keys_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/keys{/key_id}",
81 | "labels_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/labels{/name}",
82 | "language": "Python",
83 | "languages_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/languages",
84 | "license": null,
85 | "merges_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/merges",
86 | "milestones_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/milestones{/number}",
87 | "mirror_url": null,
88 | "name": "raspberry-pi-actions",
89 | "node_id": "MDEwOlJlcG9zaXRvcnkyMjc2",
90 | "notifications_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/notifications{?since,all,participating}",
91 | "open_issues": 0,
92 | "open_issues_count": 0,
93 | "owner": {
94 | "avatar_url": "https://avatars.octodemo.com/u/286?",
95 | "events_url": "https://octodemo.com/api/v3/users/Hyrule/events{/privacy}",
96 | "followers_url": "https://octodemo.com/api/v3/users/Hyrule/followers",
97 | "following_url": "https://octodemo.com/api/v3/users/Hyrule/following{/other_user}",
98 | "gists_url": "https://octodemo.com/api/v3/users/Hyrule/gists{/gist_id}",
99 | "gravatar_id": "",
100 | "html_url": "https://octodemo.com/Hyrule",
101 | "id": 286,
102 | "login": "Hyrule",
103 | "node_id": "MDEyOk9yZ2FuaXphdGlvbjI4Ng==",
104 | "organizations_url": "https://octodemo.com/api/v3/users/Hyrule/orgs",
105 | "received_events_url": "https://octodemo.com/api/v3/users/Hyrule/received_events",
106 | "repos_url": "https://octodemo.com/api/v3/users/Hyrule/repos",
107 | "site_admin": false,
108 | "starred_url": "https://octodemo.com/api/v3/users/Hyrule/starred{/owner}{/repo}",
109 | "subscriptions_url": "https://octodemo.com/api/v3/users/Hyrule/subscriptions",
110 | "type": "Organization",
111 | "url": "https://octodemo.com/api/v3/users/Hyrule"
112 | },
113 | "private": true,
114 | "pulls_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/pulls{/number}",
115 | "pushed_at": "2021-02-21T01:31:47Z",
116 | "releases_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/releases{/id}",
117 | "size": 153,
118 | "ssh_url": "git@octodemo.com:Hyrule/raspberry-pi-actions.git",
119 | "stargazers_count": 0,
120 | "stargazers_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/stargazers",
121 | "statuses_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/statuses/{sha}",
122 | "subscribers_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/subscribers",
123 | "subscription_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/subscription",
124 | "svn_url": "https://octodemo.com/Hyrule/raspberry-pi-actions",
125 | "tags_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/tags",
126 | "teams_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/teams",
127 | "trees_url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions/git/trees{/sha}",
128 | "updated_at": "2021-02-21T01:31:52Z",
129 | "url": "https://octodemo.com/api/v3/repos/Hyrule/raspberry-pi-actions",
130 | "watchers": 0,
131 | "watchers_count": 0
132 | },
133 | "sender": {
134 | "avatar_url": "https://avatars.octodemo.com/u/306?",
135 | "events_url": "https://octodemo.com/api/v3/users/zelda/events{/privacy}",
136 | "followers_url": "https://octodemo.com/api/v3/users/zelda/followers",
137 | "following_url": "https://octodemo.com/api/v3/users/zelda/following{/other_user}",
138 | "gists_url": "https://octodemo.com/api/v3/users/zelda/gists{/gist_id}",
139 | "gravatar_id": "",
140 | "html_url": "https://octodemo.com/zelda",
141 | "id": 306,
142 | "login": "zelda",
143 | "node_id": "MDQ6VXNlcjMwNg==",
144 | "organizations_url": "https://octodemo.com/api/v3/users/zelda/orgs",
145 | "received_events_url": "https://octodemo.com/api/v3/users/zelda/received_events",
146 | "repos_url": "https://octodemo.com/api/v3/users/zelda/repos",
147 | "site_admin": false,
148 | "starred_url": "https://octodemo.com/api/v3/users/zelda/starred{/owner}{/repo}",
149 | "subscriptions_url": "https://octodemo.com/api/v3/users/zelda/subscriptions",
150 | "type": "User",
151 | "url": "https://octodemo.com/api/v3/users/zelda"
152 | },
153 | "workflow": ".github/workflows/visualize-matrix-build.yaml"
154 | }
155 |
--------------------------------------------------------------------------------
/fluxbox/menu:
--------------------------------------------------------------------------------
1 | [begin] ( Application Menu )
2 | [exec] (Start Matrix Visualization GUI) { tilix -e /bin/bash -il ./start-gui.sh } <>
3 | [exec] (File Manager) { nautilus ~ } <>
4 | [exec] (Text Editor) { mousepad } <>
5 | [exec] (Terminal) { tilix -e /bin/bash -il } <>
6 | [exec] (Web Browser) { x-www-browser --disable-dev-shm-usage } <>
7 | [submenu] (System) {}
8 | [exec] (Set Resolution) { tilix -t "Set Resolution" -e bash /usr/local/bin/set-resolution } <>
9 | [exec] (Edit Application Menu) { mousepad ~/.fluxbox/menu } <>
10 | [exec] (Passwords and Keys) { seahorse } <>
11 | [exec] (Top Processes) { tilix -t "Top" -e htop } <>
12 | [exec] (Disk Utilization) { tilix -t "Disk Utilization" -e ncdu / } <>
13 | [exec] (Editres) {editres} <>
14 | [exec] (Xfontsel) {xfontsel} <>
15 | [exec] (Xkill) {xkill} <>
16 | [exec] (Xrefresh) {xrefresh} <>
17 | [end]
18 | [config] (Configuration)
19 | [workspaces] (Workspaces)
20 | [end]
21 |
--------------------------------------------------------------------------------
/gui.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import argparse
3 | import constants
4 | import os
5 | import io
6 | from urllib.parse import urlparse
7 | import PySimpleGUI as sg
8 | from PIL import Image
9 | import pymysql.cursors
10 |
11 |
12 | class StreamPixels():
13 | def __init__(self, *args, **kwargs):
14 | self.parser = argparse.ArgumentParser()
15 | self.parser.add_argument("--max-x", help="max x pixel", default=800, type=int)
16 | self.parser.add_argument("--max-y", help="max y pixel", default=600, type=int)
17 | self.parser.add_argument("--environment", help="redis environment", default="foobar", type=str)
18 | self.parser.add_argument("--sleep-interval", help="sleep interval in milliseconds", default="1000", type=int)
19 | self.parser.add_argument("--image-file", help="image file location", default="images/matrix-start.png", type=str)
20 | self.args = self.parser.parse_args()
21 |
22 | def run(self):
23 | maxX = self.args.max_x
24 | maxY = self.args.max_y
25 | environment = self.args.environment
26 |
27 |
28 | image_file = self.args.image_file
29 | image = Image.open(image_file)
30 |
31 | rgb_im = image.convert('RGB')
32 | width, height = rgb_im.size
33 |
34 | pixelCache = {}
35 | operationCache = {}
36 | sleepInterval = self.args.sleep_interval
37 |
38 | url = urlparse(os.environ.get('DATABASE_URL'))
39 |
40 |
41 | # connect to MySQL with TLS enabled
42 | connection = pymysql.connect(host=url.hostname,
43 | user=url.username,
44 | password=url.password,
45 | db=url.path[1:],
46 | ssl={'ca': 'certs.pem'})
47 |
48 | #connection = pymysql.connect(user=url.username,password=url.password, host=url.hostname,port=url.port, database=url.path[1:], clieent)
49 | cursor = connection.cursor()
50 | cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED")
51 |
52 | # clear Vitess and cache at the beginning
53 | clear_environment=("delete from pixel_matrix where environment = %s limit 99999")
54 | cursor.execute(clear_environment, environment)
55 | connection.commit()
56 |
57 | for x in range(maxX):
58 | for y in range(maxY):
59 | key="%s/%d/%d" % (environment,x,y)
60 | r, g, b = rgb_im.getpixel((x%width,y%height))
61 | pixelCache[key]=(r,g,b)
62 | operationCache[key] = None
63 |
64 |
65 | bio = io.BytesIO()
66 | image.save(bio, format="PNG")
67 | del image
68 |
69 | layout = [[sg.Graph(
70 | canvas_size=(maxX, maxY),
71 | graph_bottom_left=(0, 0),
72 | graph_top_right=(maxX, maxY),
73 | key="-GRAPH-",
74 | change_submits=True, # mouse click events
75 | )]
76 | ]
77 |
78 | sg.SetOptions(element_padding=(0, 0))
79 | menu = ['&Right', ['Use advanced schema', 'Use basic schema']]
80 | window = sg.Window('Stream-Pixel-PS', layout, margins=(0,0), size=(maxX, maxY), right_click_menu=menu, finalize=True)
81 |
82 | #window = sg.Window('Stream-Pixel-GUI').Layout(layout).Finalize()
83 | window.Maximize()
84 | fullScreen = True
85 | graph = window["-GRAPH-"]
86 | graph.draw_image(data=bio.getvalue(), location=(0,maxY))
87 |
88 | needScreenUpdate = False
89 | useAdvancedSchema = False
90 |
91 | id = 0
92 | while True:
93 | event, values = window.read(timeout=sleepInterval)
94 | # perform button and keyboard operations
95 | if event == sg.WIN_CLOSED:
96 | break
97 |
98 | elif event == "-GRAPH-":
99 | if fullScreen:
100 | print("Minimize")
101 | window.Normal()
102 | fullScreen = False
103 | else:
104 | print("Maxmimize")
105 | window.Maximize()
106 | fullScreen = True
107 | elif event == "Use advanced schema":
108 | print ("Switch to advanced schema")
109 | useAdvancedSchema = True
110 | elif event == "Use basic schema":
111 | print ("Switch back to basic schema")
112 | useAdvancedSchema = False
113 |
114 | if useAdvancedSchema == False:
115 | line_query = ("select id, cell, pixel_data from pixel_matrix where environment = %s and id > %s order by ID LIMIT 500")
116 | else:
117 | line_query = ("select id, cell, pixel_data, operation from pixel_matrix where environment = %s and id > %s order by ID LIMIT 500")
118 |
119 | cursor.execute(line_query, (environment, id))
120 | rows=cursor.fetchall()
121 |
122 | #for (id, cell, pixel_data) in rows:
123 | for row in rows:
124 | operation = None
125 | if useAdvancedSchema:
126 | (id, cell, pixel_data, operation) = row
127 | else:
128 | (id, cell, pixel_data) = row
129 | for values in pixel_data.split("\n"):
130 | if not values:
131 | continue
132 | x, y, red, green, blue = values.split(",")
133 | key=("%s/%s/%s") % (environment,x,y)
134 |
135 | cachedOperation = operationCache[key]
136 | if cachedOperation == constants.PIN:
137 | # if a pinned pixel should be replaced, the PIN operation has to be used
138 | if operation != constants.PIN:
139 | continue
140 |
141 | if operation != cachedOperation:
142 | operationCache[key] = operation
143 |
144 | value=(int(red),int(green),int(blue))
145 | cachedValue = pixelCache[key]
146 | if (cachedValue != value):
147 | needScreenUpdate = True
148 | pixelCache[key]=value
149 |
150 |
151 |
152 | #clear_environment = ("delete from pixel_matrix where environment = %s and cell= %s")
153 | #cursor.execute(clear_environment, (environment, cell))
154 |
155 | connection.commit()
156 |
157 | if (needScreenUpdate):
158 | img = Image.new('RGB', (maxX, maxY))
159 | for x in range (maxX):
160 | for y in range (maxY):
161 | key="%s/%d/%s" % (environment,x,y)
162 | red, green, blue = pixelCache[key]
163 | img.putpixel((x,y), (red,green,blue))
164 | bio = io.BytesIO()
165 | img.save(bio, format="PNG")
166 | graph.draw_image(data=bio.getvalue(), location=(0,maxY))
167 | window.refresh()
168 | needScreenUpdate=False
169 | del img
170 | print ("updated screen")
171 |
172 | # Main function
173 | if __name__ == "__main__":
174 | stream_pixels = StreamPixels()
175 | stream_pixels.run()
176 |
--------------------------------------------------------------------------------
/images/blackandblue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/blackandblue.png
--------------------------------------------------------------------------------
/images/blackbluenumbers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/blackbluenumbers.png
--------------------------------------------------------------------------------
/images/blackbluereset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/blackbluereset.png
--------------------------------------------------------------------------------
/images/done.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/done.png
--------------------------------------------------------------------------------
/images/github-actions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/github-actions.png
--------------------------------------------------------------------------------
/images/github-longer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/github-longer.png
--------------------------------------------------------------------------------
/images/hubot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/hubot.png
--------------------------------------------------------------------------------
/images/images/static_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/images/static_image.jpg
--------------------------------------------------------------------------------
/images/images/winterfest.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/images/winterfest.png
--------------------------------------------------------------------------------
/images/matrix-construct-grid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/matrix-construct-grid.png
--------------------------------------------------------------------------------
/images/matrix-construct-vitess.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/matrix-construct-vitess.png
--------------------------------------------------------------------------------
/images/matrix-finished.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/matrix-finished.png
--------------------------------------------------------------------------------
/images/matrix-reset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/matrix-reset.png
--------------------------------------------------------------------------------
/images/matrix-start-witout-numbers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/matrix-start-witout-numbers.png
--------------------------------------------------------------------------------
/images/matrix-start.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/matrix-start.png
--------------------------------------------------------------------------------
/images/monahubot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/monahubot.png
--------------------------------------------------------------------------------
/images/non-blocking-schema-change-text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/non-blocking-schema-change-text.png
--------------------------------------------------------------------------------
/images/non-blocking-schema-change.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/non-blocking-schema-change.png
--------------------------------------------------------------------------------
/images/numbers-blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/numbers-blue.png
--------------------------------------------------------------------------------
/images/numbers-grey.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/numbers-grey.png
--------------------------------------------------------------------------------
/images/numbers-white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/numbers-white.png
--------------------------------------------------------------------------------
/images/ps-finished.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/ps-finished.png
--------------------------------------------------------------------------------
/images/ps-start.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/ps-start.png
--------------------------------------------------------------------------------
/images/publish-pixels-ps.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import time
3 | import sys
4 | import argparse
5 | from PIL import Image
6 | import time
7 | import pymysql.cursors
8 | import os
9 | from urllib.request import urlopen
10 | from urllib.parse import urlparse
11 |
12 |
13 | class StreamPixels(object):
14 | def __init__(self, *args, **kwargs):
15 | self.parser = argparse.ArgumentParser()
16 | self.parser.add_argument("--max-x", help="max x pixel", default=16, type=int)
17 | self.parser.add_argument("--job-x", help="job x", default=0, type=int)
18 | self.parser.add_argument("--max-y", help="max y pixel", default=16, type=int)
19 | self.parser.add_argument("--job-y", help="job y", default=0, type=int)
20 | self.parser.add_argument("--environment", help="environment", default="barfoo", type=str)
21 | self.parser.add_argument("--image-file", help="image file location", default="images/static_image.jpg", type=str)
22 | self.parser.add_argument("--sleep-interval", help="sleep interval in milliseconds", default="0", type=int)
23 | self.args = self.parser.parse_args()
24 |
25 | def run(self):
26 | maxX = self.args.max_x
27 | maxY = self.args.max_y
28 | offsetX = self.args.job_x * maxX
29 | offsetY = self.args.job_y * maxY
30 |
31 | environment = self.args.environment
32 | sleepInterval = self.args.sleep_interval
33 |
34 | image_file = self.args.image_file
35 | if image_file.startswith("http"):
36 | image = Image.open(urlopen(image_file))
37 | else:
38 | image = Image.open(image_file)
39 |
40 | width, height = image.size
41 | if width != maxX and height != maxY:
42 | image.thumbnail((maxX, maxY), Image.ANTIALIAS)
43 |
44 | url = urlparse(os.environ.get('DATABASE_URL'))
45 | #print (url.username, url.password, url.hostname, url.port, url.path[1:])
46 |
47 | connection = pymysql.connect(user=url.username,password=url.password, host=url.hostname,port=url.port)
48 | cursor = connection.cursor()
49 |
50 | #redisClient = redis.Redis(host=self.args.redis_host, port=6379, db=0, password=os.environ.get('REDIS_PASSWORD'), decode_responses=True)
51 | #p = redisClient.pipeline(transaction=False)
52 |
53 | rgb_im = image.convert('RGB')
54 | width, height = rgb_im.size
55 |
56 | # clear screen
57 | clear_environment=("delete from matrix where environment = %s")
58 | cursor.execute(clear_environment, environment)
59 | connection.commit()
60 |
61 | # redisClient.delete(environment)
62 |
63 | add_pixels = ("INSERT INTO matrix "
64 | "(environment, job, lines ) "
65 | "VALUES (%s, %s, %s)")
66 |
67 | records_to_insert = []
68 | for x in range(maxX):
69 | values = ""
70 | for y in range(maxY):
71 | r, g, b = rgb_im.getpixel((x%width, y%height))
72 | value=("%d,%d,%d,%d,%d")%(x+offsetX,y + offsetY,r,g,b)
73 | values+=value
74 | values+="\n"
75 | records_to_insert.append((environment, ("line%d") % (x), values))
76 | if (x != 0 and x%2 == 0):
77 | cursor.executemany(add_pixels, records_to_insert)
78 | records_to_insert = []
79 | # redisClient.hset(environment,("line%d") % (x), values)
80 |
81 | connection.commit()
82 | # Redis could also write all data in one call but MySQL's text column would be too small to do this
83 | #redisClient.hset(environment,"reset",values)
84 |
85 | # Main function
86 | if __name__ == "__main__":
87 | stream_pixels = StreamPixels()
88 | stream_pixels.run()
89 |
--------------------------------------------------------------------------------
/images/publish-pixels.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import time
3 | import sys
4 | import argparse
5 | from PIL import Image
6 | import time
7 | import redis
8 | import os
9 | from urllib.request import urlopen
10 |
11 |
12 | class StreamPixels(object):
13 | def __init__(self, *args, **kwargs):
14 | self.parser = argparse.ArgumentParser()
15 | self.parser.add_argument("--max-x", help="max x pixel", default=16, type=int)
16 | self.parser.add_argument("--job-x", help="job x", default=0, type=int)
17 | self.parser.add_argument("--max-y", help="max y pixel", default=16, type=int)
18 | self.parser.add_argument("--job-y", help="job y", default=0, type=int)
19 | self.parser.add_argument("--environment", help="redis environment", default="foobar", type=str)
20 | self.parser.add_argument("--image-file", help="image file location", default="images/static_image.jpg", type=str)
21 | self.parser.add_argument("--redis-host", help="Redis Host", default="redis-master.redis.svc.cluster.local", type=str)
22 | self.parser.add_argument("--sleep-interval", help="sleep interval in milliseconds", default="0", type=int)
23 | self.args = self.parser.parse_args()
24 |
25 | def run(self):
26 | maxX = self.args.max_x
27 | maxY = self.args.max_y
28 | offsetX = self.args.job_x * maxX
29 | offsetY = self.args.job_y * maxY
30 |
31 | environment = self.args.environment
32 | sleepInterval = self.args.sleep_interval
33 |
34 | image_file = self.args.image_file
35 | if image_file.startswith("http"):
36 | image = Image.open(urlopen(image_file))
37 | else:
38 | image = Image.open(image_file)
39 |
40 | width, height = image.size
41 | if width != maxX and height != maxY:
42 | image.thumbnail((maxX, maxY), Image.ANTIALIAS)
43 |
44 | redisClient = redis.Redis(host=self.args.redis_host, port=6379, db=0, password=os.environ.get('REDIS_PASSWORD'), decode_responses=True)
45 | p = redisClient.pipeline(transaction=False)
46 |
47 |
48 | rgb_im = image.convert('RGB')
49 | width, height = rgb_im.size
50 |
51 | # clear screen
52 | redisClient.delete(environment)
53 |
54 | values = ""
55 | for x in range(maxX):
56 | values=""
57 | for y in range(maxY):
58 |
59 | r, g, b = rgb_im.getpixel((x%width, y%height))
60 | value=("%d,%d,%d,%d,%d")%(x+offsetX,y + offsetY,r,g,b)
61 | values+=value
62 | values+="\n"
63 | # print("Setting key %s with value %s" % (key, value))
64 | # p.set(key,value)
65 | redisClient.hset(environment,("line%d") % (x), values)
66 | # p.execute()
67 | #redisClient.hset(environment,"reset",values)
68 |
69 | #time.sleep(sleepInterval/1000)
70 |
71 | # Main function
72 | if __name__ == "__main__":
73 | stream_pixels = StreamPixels()
74 | stream_pixels.run()
75 |
--------------------------------------------------------------------------------
/images/reset-green.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/reset-green.png
--------------------------------------------------------------------------------
/images/reset-grey.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/reset-grey.png
--------------------------------------------------------------------------------
/images/reset.pxi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/reset.pxi
--------------------------------------------------------------------------------
/images/run.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import time
3 | import sys
4 |
5 | from rgbmatrix import RGBMatrix, RGBMatrixOptions
6 | from PIL import Image
7 |
8 | if len(sys.argv) < 2:
9 | sys.exit("Require an image argument")
10 | else:
11 | image_file = sys.argv[1]
12 |
13 | image = Image.open(image_file)
14 |
15 | # Configuration for the matrix
16 | options = RGBMatrixOptions()
17 | options.rows = 64
18 | options.cols = 64
19 | options.brightness = 80
20 | options.chain_length = 1
21 | options.parallel = 1
22 | options.hardware_mapping = 'adafruit-hat'
23 |
24 | matrix = RGBMatrix(options = options)
25 |
26 | # Make image fit our screen.
27 | image.thumbnail((matrix.width, matrix.height), Image.ANTIALIAS)
28 |
29 | matrix.SetImage(image.convert('RGB'))
30 |
31 | time.sleep(20)
32 | sys.exit(0)
--------------------------------------------------------------------------------
/images/summer-finished.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/summer-finished.png
--------------------------------------------------------------------------------
/images/summer-start.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonico/visualize-actions-matrix-builds-on-k8s/37eb0ee5954e457ee02130628e15c6372f4699c5/images/summer-start.png
--------------------------------------------------------------------------------
/images/visualize-matrix-build.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import time
3 | import sys
4 | import argparse
5 | from PIL import Image
6 | import time
7 | import redis
8 | import os
9 | from urllib.request import urlopen
10 |
11 |
12 | class VisualizeMatrixBuild(object):
13 | def __init__(self, *args, **kwargs):
14 | self.parser = argparse.ArgumentParser()
15 | self.parser.add_argument("--max-job-x", help="max-job-x", default=8, type=int)
16 | self.parser.add_argument("--max-job-y", help="max job y", default=4, type=int)
17 | self.parser.add_argument("--max-x", help="max x pixels", default=32, type=int)
18 | self.parser.add_argument("--max-y", help="max y pixels", default=16, type=int)
19 | self.parser.add_argument("--job-x", help="job x", default=1, type=int)
20 | self.parser.add_argument("--job-y", help="job y", default=1, type=int)
21 | self.parser.add_argument("--environment", help="redis environment", default="foobar", type=str)
22 | self.parser.add_argument("--image-file", help="image file location", default="images/static_image.jpg", type=str)
23 | self.parser.add_argument("--redis-host", help="Redis Host", default="redis-master.redis.svc.cluster.local", type=str)
24 | self.parser.add_argument("--duration", help="job in milliseconds", default="5000", type=int)
25 | self.args = self.parser.parse_args()
26 |
27 | def run(self):
28 | maxX = self.args.max_x
29 | maxY = self.args.max_y
30 |
31 | pixelsX = int (maxX/self.args.max_job_x)
32 | pixelsY = int (maxY/self.args.max_job_y)
33 |
34 | offsetX = (self.args.job_x-1) * pixelsX
35 | offsetY = (self.args.job_y-1) * pixelsY
36 |
37 | numberPixels = pixelsX * pixelsY
38 |
39 | environment = self.args.environment
40 | duration = self.args.duration
41 |
42 | sleepBetweenPixels = duration / numberPixels
43 |
44 | image_file = self.args.image_file
45 | if image_file.startswith("http"):
46 | image = Image.open(urlopen(image_file))
47 | else:
48 | image = Image.open(image_file)
49 |
50 | width, height = image.size
51 |
52 | if width != maxX and height != maxY:
53 | image.thumbnail((maxX, maxY), Image.ANTIALIAS)
54 |
55 | redisClient = redis.Redis(host=self.args.redis_host, port=6379, db=0, password=os.environ.get('REDIS_PASSWORD'), decode_responses=True)
56 |
57 | rgb_im = image.convert('RGB')
58 | width, height = rgb_im.size
59 |
60 | values = ""
61 | for y in range(pixelsY):
62 | for x in range(pixelsX):
63 | realX=x+offsetX
64 | realY=y+offsetY
65 | r, g, b = rgb_im.getpixel((realX%width,realY%height))
66 | value=("%d,%d,%d,%d,%d")%(realX,realY,r,g,b)
67 | values+=value
68 | values+="\n"
69 |
70 | # if there are more than 100 pixels, line by line updates will put too much stress on Redis, updating the entire cell then
71 | if (maxX < 100 and maxY < 100):
72 | hashKey = ("job/%d/%d/%d") % (self.args.job_x, self.args.job_y, y)
73 | redisClient.hset(environment,hashKey,values)
74 | values=""
75 | time.sleep(sleepBetweenPixels*pixelsX/1000)
76 | hashKey = ("job/%d/%d") % (self.args.job_x, self.args.job_y)
77 | redisClient.hset(environment,hashKey,values)
78 |
79 |
80 | # Main function
81 | if __name__ == "__main__":
82 | stream_pixels = VisualizeMatrixBuild()
83 | stream_pixels.run()
84 |
--------------------------------------------------------------------------------
/library-scripts/common-debian.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #-------------------------------------------------------------------------------------------------------------
3 | # Copyright (c) Microsoft Corporation. All rights reserved.
4 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5 | #-------------------------------------------------------------------------------------------------------------
6 | #
7 | # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/common.md
8 | # Maintainer: The VS Code and Codespaces Teams
9 | #
10 | # Syntax: ./common-debian.sh [install zsh flag] [username] [user UID] [user GID] [upgrade packages flag] [install Oh My Zsh! flag] [Add non-free packages]
11 |
12 | set -e
13 |
14 | INSTALL_ZSH=${1:-"true"}
15 | USERNAME=${2:-"automatic"}
16 | USER_UID=${3:-"automatic"}
17 | USER_GID=${4:-"automatic"}
18 | UPGRADE_PACKAGES=${5:-"true"}
19 | INSTALL_OH_MYS=${6:-"true"}
20 | ADD_NON_FREE_PACKAGES=${7:-"false"}
21 | SCRIPT_DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
22 | MARKER_FILE="/usr/local/etc/vscode-dev-containers/common"
23 |
24 |
25 | if [ "$(id -u)" -ne 0 ]; then
26 | echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
27 | exit 1
28 | fi
29 |
30 | # Ensure that login shells get the correct path if the user updated the PATH using ENV.
31 | rm -f /etc/profile.d/00-restore-env.sh
32 | echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
33 | chmod +x /etc/profile.d/00-restore-env.sh
34 |
35 | # If in automatic mode, determine if a user already exists, if not use vscode
36 | if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
37 | USERNAME=""
38 | POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
39 | for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
40 | if id -u ${CURRENT_USER} > /dev/null 2>&1; then
41 | USERNAME=${CURRENT_USER}
42 | break
43 | fi
44 | done
45 | if [ "${USERNAME}" = "" ]; then
46 | USERNAME=vscode
47 | fi
48 | elif [ "${USERNAME}" = "none" ]; then
49 | USERNAME=root
50 | USER_UID=0
51 | USER_GID=0
52 | fi
53 |
54 | # Load markers to see which steps have already run
55 | if [ -f "${MARKER_FILE}" ]; then
56 | echo "Marker file found:"
57 | cat "${MARKER_FILE}"
58 | source "${MARKER_FILE}"
59 | fi
60 |
61 | # Ensure apt is in non-interactive to avoid prompts
62 | export DEBIAN_FRONTEND=noninteractive
63 |
64 | # Function to call apt-get if needed
65 | apt_get_update_if_needed()
66 | {
67 | if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
68 | echo "Running apt-get update..."
69 | apt-get update
70 | else
71 | echo "Skipping apt-get update."
72 | fi
73 | }
74 |
75 | # Run install apt-utils to avoid debconf warning then verify presence of other common developer tools and dependencies
76 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then
77 |
78 | package_list="apt-utils \
79 | openssh-client \
80 | gnupg2 \
81 | iproute2 \
82 | procps \
83 | lsof \
84 | htop \
85 | net-tools \
86 | psmisc \
87 | curl \
88 | wget \
89 | rsync \
90 | ca-certificates \
91 | unzip \
92 | zip \
93 | nano \
94 | vim-tiny \
95 | less \
96 | jq \
97 | lsb-release \
98 | apt-transport-https \
99 | dialog \
100 | libc6 \
101 | libgcc1 \
102 | libkrb5-3 \
103 | libgssapi-krb5-2 \
104 | libicu[0-9][0-9] \
105 | liblttng-ust0 \
106 | libstdc++6 \
107 | zlib1g \
108 | locales \
109 | sudo \
110 | ncdu \
111 | man-db \
112 | strace \
113 | manpages \
114 | manpages-dev \
115 | init-system-helpers"
116 |
117 | # Needed for adding manpages-posix and manpages-posix-dev which are non-free packages in Debian
118 | if [ "${ADD_NON_FREE_PACKAGES}" = "true" ]; then
119 | # Bring in variables from /etc/os-release like VERSION_CODENAME
120 | . /etc/os-release
121 | sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list
122 | sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list
123 | sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list
124 | sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list
125 | sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list
126 | sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list
127 | sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list
128 | sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list
129 | # Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html
130 | sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list
131 | sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list
132 | echo "Running apt-get update..."
133 | apt-get update
134 | package_list="${package_list} manpages-posix manpages-posix-dev"
135 | else
136 | apt_get_update_if_needed
137 | fi
138 |
139 | # Install libssl1.1 if available
140 | if [[ ! -z $(apt-cache --names-only search ^libssl1.1$) ]]; then
141 | package_list="${package_list} libssl1.1"
142 | fi
143 |
144 | # Install appropriate version of libssl1.0.x if available
145 | libssl_package=$(dpkg-query -f '${db:Status-Abbrev}\t${binary:Package}\n' -W 'libssl1\.0\.?' 2>&1 || echo '')
146 | if [ "$(echo "$LIlibssl_packageBSSL" | grep -o 'libssl1\.0\.[0-9]:' | uniq | sort | wc -l)" -eq 0 ]; then
147 | if [[ ! -z $(apt-cache --names-only search ^libssl1.0.2$) ]]; then
148 | # Debian 9
149 | package_list="${package_list} libssl1.0.2"
150 | elif [[ ! -z $(apt-cache --names-only search ^libssl1.0.0$) ]]; then
151 | # Ubuntu 18.04, 16.04, earlier
152 | package_list="${package_list} libssl1.0.0"
153 | fi
154 | fi
155 |
156 | echo "Packages to verify are installed: ${package_list}"
157 | apt-get -y install --no-install-recommends ${package_list} 2> >( grep -v 'debconf: delaying package configuration, since apt-utils is not installed' >&2 )
158 |
159 | # Install git if not already installed (may be more recent than distro version)
160 | if ! type git > /dev/null 2>&1; then
161 | apt-get -y install --no-install-recommends git
162 | fi
163 |
164 | PACKAGES_ALREADY_INSTALLED="true"
165 | fi
166 |
167 | # Get to latest versions of all packages
168 | if [ "${UPGRADE_PACKAGES}" = "true" ]; then
169 | apt_get_update_if_needed
170 | apt-get -y upgrade --no-install-recommends
171 | apt-get autoremove -y
172 | fi
173 |
174 | # Ensure at least the en_US.UTF-8 UTF-8 locale is available.
175 | # Common need for both applications and things like the agnoster ZSH theme.
176 | if [ "${LOCALE_ALREADY_SET}" != "true" ] && ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then
177 | echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
178 | locale-gen
179 | LOCALE_ALREADY_SET="true"
180 | fi
181 |
182 | # Create or update a non-root user to match UID/GID.
183 | if id -u ${USERNAME} > /dev/null 2>&1; then
184 | # User exists, update if needed
185 | if [ "${USER_GID}" != "automatic" ] && [ "$USER_GID" != "$(id -G $USERNAME)" ]; then
186 | groupmod --gid $USER_GID $USERNAME
187 | usermod --gid $USER_GID $USERNAME
188 | fi
189 | if [ "${USER_UID}" != "automatic" ] && [ "$USER_UID" != "$(id -u $USERNAME)" ]; then
190 | usermod --uid $USER_UID $USERNAME
191 | fi
192 | else
193 | # Create user
194 | if [ "${USER_GID}" = "automatic" ]; then
195 | groupadd $USERNAME
196 | else
197 | groupadd --gid $USER_GID $USERNAME
198 | fi
199 | if [ "${USER_UID}" = "automatic" ]; then
200 | useradd -s /bin/bash --gid $USERNAME -m $USERNAME
201 | else
202 | useradd -s /bin/bash --uid $USER_UID --gid $USERNAME -m $USERNAME
203 | fi
204 | fi
205 |
206 | # Add add sudo support for non-root user
207 | if [ "${USERNAME}" != "root" ] && [ "${EXISTING_NON_ROOT_USER}" != "${USERNAME}" ]; then
208 | echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME
209 | chmod 0440 /etc/sudoers.d/$USERNAME
210 | EXISTING_NON_ROOT_USER="${USERNAME}"
211 | fi
212 |
213 | # ** Shell customization section **
214 | if [ "${USERNAME}" = "root" ]; then
215 | user_rc_path="/root"
216 | else
217 | user_rc_path="/home/${USERNAME}"
218 | fi
219 |
220 | # Restore user .bashrc defaults from skeleton file if it doesn't exist or is empty
221 | if [ ! -f "${user_rc_path}/.bashrc" ] || [ ! -s "${user_rc_path}/.bashrc" ] ; then
222 | cp /etc/skel/.bashrc "${user_rc_path}/.bashrc"
223 | fi
224 |
225 | # Restore user .profile defaults from skeleton file if it doesn't exist or is empty
226 | if [ ! -f "${user_rc_path}/.profile" ] || [ ! -s "${user_rc_path}/.profile" ] ; then
227 | cp /etc/skel/.profile "${user_rc_path}/.profile"
228 | fi
229 |
230 | # .bashrc/.zshrc snippet
231 | rc_snippet="$(cat << 'EOF'
232 |
233 | if [ -z "${USER}" ]; then export USER=$(whoami); fi
234 | if [[ "${PATH}" != *"$HOME/.local/bin"* ]]; then export PATH="${PATH}:$HOME/.local/bin"; fi
235 |
236 | # Display optional first run image specific notice if configured and terminal is interactive
237 | if [ -t 1 ] && [[ "${TERM_PROGRAM}" = "vscode" || "${TERM_PROGRAM}" = "codespaces" ]] && [ ! -f "$HOME/.config/vscode-dev-containers/first-run-notice-already-displayed" ]; then
238 | if [ -f "/usr/local/etc/vscode-dev-containers/first-run-notice.txt" ]; then
239 | cat "/usr/local/etc/vscode-dev-containers/first-run-notice.txt"
240 | elif [ -f "/workspaces/.codespaces/shared/first-run-notice.txt" ]; then
241 | cat "/workspaces/.codespaces/shared/first-run-notice.txt"
242 | fi
243 | mkdir -p "$HOME/.config/vscode-dev-containers"
244 | # Mark first run notice as displayed after 10s to avoid problems with fast terminal refreshes hiding it
245 | ((sleep 10s; touch "$HOME/.config/vscode-dev-containers/first-run-notice-already-displayed") &)
246 | fi
247 |
248 | # Set the default git editor if not already set
249 | if [ -z "$(git config --get core.editor)" ] && [ -z "${GIT_EDITOR}" ]; then
250 | if [ "${TERM_PROGRAM}" = "vscode" ]; then
251 | if [[ -n $(command -v code-insiders) && -z $(command -v code) ]]; then
252 | export GIT_EDITOR="code-insiders --wait"
253 | else
254 | export GIT_EDITOR="code --wait"
255 | fi
256 | fi
257 | fi
258 |
259 | EOF
260 | )"
261 |
262 | # code shim, it fallbacks to code-insiders if code is not available
263 | cat << 'EOF' > /usr/local/bin/code
264 | #!/bin/sh
265 |
266 | get_in_path_except_current() {
267 | which -a "$1" | grep -A1 "$0" | grep -v "$0"
268 | }
269 |
270 | code="$(get_in_path_except_current code)"
271 |
272 | if [ -n "$code" ]; then
273 | exec "$code" "$@"
274 | elif [ "$(command -v code-insiders)" ]; then
275 | exec code-insiders "$@"
276 | else
277 | echo "code or code-insiders is not installed" >&2
278 | exit 127
279 | fi
280 | EOF
281 | chmod +x /usr/local/bin/code
282 |
283 | # systemctl shim - tells people to use 'service' if systemd is not running
284 | cat << 'EOF' > /usr/local/bin/systemctl
285 | #!/bin/sh
286 | set -e
287 | if [ -d "/run/systemd/system" ]; then
288 | exec /bin/systemctl/systemctl "$@"
289 | else
290 | echo '\n"systemd" is not running in this container due to its overhead.\nUse the "service" command to start services intead. e.g.: \n\nservice --status-all'
291 | fi
292 | EOF
293 | chmod +x /usr/local/bin/systemctl
294 |
295 | # Codespaces bash and OMZ themes - partly inspired by https://github.com/ohmyzsh/ohmyzsh/blob/master/themes/robbyrussell.zsh-theme
296 | codespaces_bash="$(cat \
297 | <<'EOF'
298 |
299 | # Codespaces bash prompt theme
300 | __bash_prompt() {
301 | local userpart='`export XIT=$? \
302 | && [ ! -z "${GITHUB_USER}" ] && echo -n "\[\033[0;32m\]@${GITHUB_USER} " || echo -n "\[\033[0;32m\]\u " \
303 | && [ "$XIT" -ne "0" ] && echo -n "\[\033[1;31m\]➜" || echo -n "\[\033[0m\]➜"`'
304 | local gitbranch='`\
305 | export BRANCH=$(git symbolic-ref --short HEAD 2>/dev/null || git rev-parse --short HEAD 2>/dev/null); \
306 | if [ "${BRANCH}" != "" ]; then \
307 | echo -n "\[\033[0;36m\](\[\033[1;31m\]${BRANCH}" \
308 | && if git ls-files --error-unmatch -m --directory --no-empty-directory -o --exclude-standard ":/*" > /dev/null 2>&1; then \
309 | echo -n " \[\033[1;33m\]✗"; \
310 | fi \
311 | && echo -n "\[\033[0;36m\]) "; \
312 | fi`'
313 | local lightblue='\[\033[1;34m\]'
314 | local removecolor='\[\033[0m\]'
315 | PS1="${userpart} ${lightblue}\w ${gitbranch}${removecolor}\$ "
316 | unset -f __bash_prompt
317 | }
318 | __bash_prompt
319 |
320 | EOF
321 | )"
322 |
323 | codespaces_zsh="$(cat \
324 | <<'EOF'
325 | # Codespaces zsh prompt theme
326 | __zsh_prompt() {
327 | local prompt_username
328 | if [ ! -z "${GITHUB_USER}" ]; then
329 | prompt_username="@${GITHUB_USER}"
330 | else
331 | prompt_username="%n"
332 | fi
333 | PROMPT="%{$fg[green]%}${prompt_username} %(?:%{$reset_color%}➜ :%{$fg_bold[red]%}➜ )" # User/exit code arrow
334 | PROMPT+='%{$fg_bold[blue]%}%(5~|%-1~/…/%3~|%4~)%{$reset_color%} ' # cwd
335 | PROMPT+='$(git_prompt_info)%{$fg[white]%}$ %{$reset_color%}' # Git status
336 | unset -f __zsh_prompt
337 | }
338 | ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg_bold[cyan]%}(%{$fg_bold[red]%}"
339 | ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%} "
340 | ZSH_THEME_GIT_PROMPT_DIRTY=" %{$fg_bold[yellow]%}✗%{$fg_bold[cyan]%})"
341 | ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[cyan]%})"
342 | __zsh_prompt
343 |
344 | EOF
345 | )"
346 |
347 | # Add notice that Oh My Bash! has been removed from images and how to provide information on how to install manually
348 | omb_readme="$(cat \
349 | <<'EOF'
350 | "Oh My Bash!" has been removed from this image in favor of a simple shell prompt. If you
351 | still wish to use it, remove "~/.oh-my-bash" and install it from: https://github.com/ohmybash/oh-my-bash
352 | You may also want to consider "Bash-it" as an alternative: https://github.com/bash-it/bash-it
353 | See here for infomation on adding it to your image or dotfiles: https://aka.ms/codespaces/omb-remove
354 | EOF
355 | )"
356 | omb_stub="$(cat \
357 | <<'EOF'
358 | #!/usr/bin/env bash
359 | if [ -t 1 ]; then
360 | cat $HOME/.oh-my-bash/README.md
361 | fi
362 | EOF
363 | )"
364 |
365 | # Add RC snippet and custom bash prompt
366 | if [ "${RC_SNIPPET_ALREADY_ADDED}" != "true" ]; then
367 | echo "${rc_snippet}" >> /etc/bash.bashrc
368 | echo "${codespaces_bash}" >> "${user_rc_path}/.bashrc"
369 | echo 'export PROMPT_DIRTRIM=4' >> "${user_rc_path}/.bashrc"
370 | if [ "${USERNAME}" != "root" ]; then
371 | echo "${codespaces_bash}" >> "/root/.bashrc"
372 | echo 'export PROMPT_DIRTRIM=4' >> "/root/.bashrc"
373 | fi
374 | chown ${USERNAME}:${USERNAME} "${user_rc_path}/.bashrc"
375 | RC_SNIPPET_ALREADY_ADDED="true"
376 | fi
377 |
378 | # Add stub for Oh My Bash!
379 | if [ ! -d "${user_rc_path}/.oh-my-bash}" ] && [ "${INSTALL_OH_MYS}" = "true" ]; then
380 | mkdir -p "${user_rc_path}/.oh-my-bash" "/root/.oh-my-bash"
381 | echo "${omb_readme}" >> "${user_rc_path}/.oh-my-bash/README.md"
382 | echo "${omb_stub}" >> "${user_rc_path}/.oh-my-bash/oh-my-bash.sh"
383 | chmod +x "${user_rc_path}/.oh-my-bash/oh-my-bash.sh"
384 | if [ "${USERNAME}" != "root" ]; then
385 | echo "${omb_readme}" >> "/root/.oh-my-bash/README.md"
386 | echo "${omb_stub}" >> "/root/.oh-my-bash/oh-my-bash.sh"
387 | chmod +x "/root/.oh-my-bash/oh-my-bash.sh"
388 | fi
389 | chown -R "${USERNAME}:${USERNAME}" "${user_rc_path}/.oh-my-bash"
390 | fi
391 |
392 | # Optionally install and configure zsh and Oh My Zsh!
393 | if [ "${INSTALL_ZSH}" = "true" ]; then
394 | if ! type zsh > /dev/null 2>&1; then
395 | apt_get_update_if_needed
396 | apt-get install -y zsh
397 | fi
398 | if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then
399 | echo "${rc_snippet}" >> /etc/zsh/zshrc
400 | ZSH_ALREADY_INSTALLED="true"
401 | fi
402 |
403 | # Adapted, simplified inline Oh My Zsh! install steps that adds, defaults to a codespaces theme.
404 | # See https://github.com/ohmyzsh/ohmyzsh/blob/master/tools/install.sh for official script.
405 | oh_my_install_dir="${user_rc_path}/.oh-my-zsh"
406 | if [ ! -d "${oh_my_install_dir}" ] && [ "${INSTALL_OH_MYS}" = "true" ]; then
407 | template_path="${oh_my_install_dir}/templates/zshrc.zsh-template"
408 | user_rc_file="${user_rc_path}/.zshrc"
409 | umask g-w,o-w
410 | mkdir -p ${oh_my_install_dir}
411 | git clone --depth=1 \
412 | -c core.eol=lf \
413 | -c core.autocrlf=false \
414 | -c fsck.zeroPaddedFilemode=ignore \
415 | -c fetch.fsck.zeroPaddedFilemode=ignore \
416 | -c receive.fsck.zeroPaddedFilemode=ignore \
417 | "https://github.com/ohmyzsh/ohmyzsh" "${oh_my_install_dir}" 2>&1
418 | echo -e "$(cat "${template_path}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${user_rc_file}
419 | sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="codespaces"/g' ${user_rc_file}
420 |
421 | mkdir -p ${oh_my_install_dir}/custom/themes
422 | echo "${codespaces_zsh}" > "${oh_my_install_dir}/custom/themes/codespaces.zsh-theme"
423 | # Shrink git while still enabling updates
424 | cd "${oh_my_install_dir}"
425 | git repack -a -d -f --depth=1 --window=1
426 | # Copy to non-root user if one is specified
427 | if [ "${USERNAME}" != "root" ]; then
428 | cp -rf "${user_rc_file}" "${oh_my_install_dir}" /root
429 | chown -R ${USERNAME}:${USERNAME} "${user_rc_path}"
430 | fi
431 | fi
432 | fi
433 |
434 | # Persist image metadata info, script if meta.env found in same directory
435 | meta_info_script="$(cat << 'EOF'
436 | #!/bin/sh
437 | . /usr/local/etc/vscode-dev-containers/meta.env
438 |
439 | # Minimal output
440 | if [ "$1" = "version" ] || [ "$1" = "image-version" ]; then
441 | echo "${VERSION}"
442 | exit 0
443 | elif [ "$1" = "release" ]; then
444 | echo "${GIT_REPOSITORY_RELEASE}"
445 | exit 0
446 | elif [ "$1" = "content" ] || [ "$1" = "content-url" ] || [ "$1" = "contents" ] || [ "$1" = "contents-url" ]; then
447 | echo "${CONTENTS_URL}"
448 | exit 0
449 | fi
450 |
451 | #Full output
452 | echo
453 | echo "Development container image information"
454 | echo
455 | if [ ! -z "${VERSION}" ]; then echo "- Image version: ${VERSION}"; fi
456 | if [ ! -z "${DEFINITION_ID}" ]; then echo "- Definition ID: ${DEFINITION_ID}"; fi
457 | if [ ! -z "${VARIANT}" ]; then echo "- Variant: ${VARIANT}"; fi
458 | if [ ! -z "${GIT_REPOSITORY}" ]; then echo "- Source code repository: ${GIT_REPOSITORY}"; fi
459 | if [ ! -z "${GIT_REPOSITORY_RELEASE}" ]; then echo "- Source code release/branch: ${GIT_REPOSITORY_RELEASE}"; fi
460 | if [ ! -z "${BUILD_TIMESTAMP}" ]; then echo "- Timestamp: ${BUILD_TIMESTAMP}"; fi
461 | if [ ! -z "${CONTENTS_URL}" ]; then echo && echo "More info: ${CONTENTS_URL}"; fi
462 | echo
463 | EOF
464 | )"
465 | if [ -f "${SCRIPT_DIR}/meta.env" ]; then
466 | mkdir -p /usr/local/etc/vscode-dev-containers/
467 | cp -f "${SCRIPT_DIR}/meta.env" /usr/local/etc/vscode-dev-containers/meta.env
468 | echo "${meta_info_script}" > /usr/local/bin/devcontainer-info
469 | chmod +x /usr/local/bin/devcontainer-info
470 | fi
471 |
472 | # Write marker file
473 | mkdir -p "$(dirname "${MARKER_FILE}")"
474 | echo -e "\
475 | PACKAGES_ALREADY_INSTALLED=${PACKAGES_ALREADY_INSTALLED}\n\
476 | LOCALE_ALREADY_SET=${LOCALE_ALREADY_SET}\n\
477 | EXISTING_NON_ROOT_USER=${EXISTING_NON_ROOT_USER}\n\
478 | RC_SNIPPET_ALREADY_ADDED=${RC_SNIPPET_ALREADY_ADDED}\n\
479 | ZSH_ALREADY_INSTALLED=${ZSH_ALREADY_INSTALLED}" > "${MARKER_FILE}"
480 |
481 | echo "Done!"
--------------------------------------------------------------------------------
/library-scripts/desktop-lite-debian.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #-------------------------------------------------------------------------------------------------------------
3 | # Copyright (c) Microsoft Corporation. All rights reserved.
4 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5 | #-------------------------------------------------------------------------------------------------------------
6 | #
7 | # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/desktop-lite.md
8 | # Maintainer: The VS Code and Codespaces Teams
9 | #
10 | # Syntax: ./desktop-lite-debian.sh [non-root user] [vnc password] [install no vnc flag]
11 |
12 | USERNAME=${1:-"automatic"}
13 | VNC_PASSWORD=${2:-"vscode"}
14 | INSTALL_NOVNC=${3:-"true"}
15 |
16 | NOVNC_VERSION=1.2.0
17 | WEBSOCKETIFY_VERSION=0.10.0
18 |
19 | package_list="
20 | tigervnc-standalone-server \
21 | tigervnc-common \
22 | fluxbox \
23 | dbus-x11 \
24 | x11-utils \
25 | x11-xserver-utils \
26 | xdg-utils \
27 | fbautostart \
28 | at-spi2-core \
29 | xterm \
30 | eterm \
31 | nautilus\
32 | mousepad \
33 | seahorse \
34 | gnome-icon-theme \
35 | gnome-keyring \
36 | libx11-dev \
37 | libxkbfile-dev \
38 | libsecret-1-dev \
39 | libgbm-dev \
40 | libnotify4 \
41 | libnss3 \
42 | libxss1 \
43 | libasound2 \
44 | xfonts-base \
45 | xfonts-terminus \
46 | fonts-noto \
47 | fonts-wqy-microhei \
48 | fonts-droid-fallback \
49 | htop \
50 | ncdu \
51 | curl \
52 | ca-certificates\
53 | unzip \
54 | nano \
55 | locales"
56 |
57 | set -e
58 |
59 | if [ "$(id -u)" -ne 0 ]; then
60 | echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
61 | exit 1
62 | fi
63 |
64 | # Determine the appropriate non-root user
65 | if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
66 | USERNAME=""
67 | POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
68 | for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
69 | if id -u ${CURRENT_USER} > /dev/null 2>&1; then
70 | USERNAME=${CURRENT_USER}
71 | break
72 | fi
73 | done
74 | if [ "${USERNAME}" = "" ]; then
75 | USERNAME=root
76 | fi
77 | elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
78 | USERNAME=root
79 | fi
80 |
81 | # Function to run apt-get if needed
82 | apt_get_update_if_needed()
83 | {
84 | if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
85 | echo "Running apt-get update..."
86 | apt-get update
87 | else
88 | echo "Skipping apt-get update."
89 | fi
90 | }
91 |
92 | # Checks if packages are installed and installs them if not
93 | check_packages() {
94 | if ! dpkg -s "$@" > /dev/null 2>&1; then
95 | apt_get_update_if_needed
96 | apt-get -y install --no-install-recommends "$@"
97 | fi
98 | }
99 |
100 | # Ensure apt is in non-interactive to avoid prompts
101 | export DEBIAN_FRONTEND=noninteractive
102 |
103 | apt_get_update_if_needed
104 |
105 | # On older Ubuntu, Tilix is in a PPA. on Debian strech its in backports.
106 | if [[ -z $(apt-cache --names-only search ^tilix$) ]]; then
107 | . /etc/os-release
108 | if [ "${ID}" = "ubuntu" ]; then
109 | apt-get install -y --no-install-recommends apt-transport-https software-properties-common
110 | add-apt-repository -y ppa:webupd8team/terminix
111 | elif [ "${VERSION_CODENAME}" = "stretch" ]; then
112 | echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/stretch-backports.list
113 | fi
114 | apt-get update
115 | if [[ -z $(apt-cache --names-only search ^tilix$) ]]; then
116 | echo "(!) WARNING: Tilix not available on ${ID} ${VERSION_CODENAME} architecture $(uname -m). Skipping."
117 | else
118 | package_list="${package_list} tilix"
119 | fi
120 | else
121 | package_list="${package_list} tilix"
122 | fi
123 |
124 | # Install X11, fluxbox and VS Code dependencies
125 | check_packages ${package_list}
126 |
127 | # Install Emoji font if available in distro - Available in Debian 10+, Ubuntu 18.04+
128 | if dpkg-query -W fonts-noto-color-emoji > /dev/null 2>&1 && ! dpkg -s fonts-noto-color-emoji > /dev/null 2>&1; then
129 | apt-get -y install --no-install-recommends fonts-noto-color-emoji
130 | fi
131 |
132 | # Check at least one locale exists
133 | if ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then
134 | echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
135 | locale-gen
136 | fi
137 |
138 | # Install the Cascadia Code fonts - https://github.com/microsoft/cascadia-code
139 | if [ ! -d "/usr/share/fonts/truetype/cascadia" ]; then
140 | curl -sSL https://github.com/microsoft/cascadia-code/releases/download/v2008.25/CascadiaCode-2008.25.zip -o /tmp/cascadia-fonts.zip
141 | unzip /tmp/cascadia-fonts.zip -d /tmp/cascadia-fonts
142 | mkdir -p /usr/share/fonts/truetype/cascadia
143 | mv /tmp/cascadia-fonts/ttf/* /usr/share/fonts/truetype/cascadia/
144 | rm -rf /tmp/cascadia-fonts.zip /tmp/cascadia-fonts
145 | fi
146 |
147 | # Install noVNC
148 | if [ "${INSTALL_NOVNC}" = "true" ] && [ ! -d "/usr/local/novnc" ]; then
149 | mkdir -p /usr/local/novnc
150 | curl -sSL https://github.com/novnc/noVNC/archive/v${NOVNC_VERSION}.zip -o /tmp/novnc-install.zip
151 | unzip /tmp/novnc-install.zip -d /usr/local/novnc
152 | cp /usr/local/novnc/noVNC-${NOVNC_VERSION}/vnc.html /usr/local/novnc/noVNC-${NOVNC_VERSION}/index.html
153 | curl -sSL https://github.com/novnc/websockify/archive/v${WEBSOCKETIFY_VERSION}.zip -o /tmp/websockify-install.zip
154 | unzip /tmp/websockify-install.zip -d /usr/local/novnc
155 | ln -s /usr/local/novnc/websockify-${WEBSOCKETIFY_VERSION} /usr/local/novnc/noVNC-${NOVNC_VERSION}/utils/websockify
156 | rm -f /tmp/websockify-install.zip /tmp/novnc-install.zip
157 |
158 | # Install noVNC dependencies and use them.
159 | if ! dpkg -s python3-minimal python3-numpy > /dev/null 2>&1; then
160 | apt-get -y install --no-install-recommends python3-minimal python3-numpy
161 | fi
162 | sed -i -E 's/^python /python3 /' /usr/local/novnc/websockify-${WEBSOCKETIFY_VERSION}/run
163 | fi
164 |
165 | # Set up folders for scripts and init files
166 | mkdir -p /var/run/dbus /usr/local/etc/vscode-dev-containers/ /root/.fluxbox
167 |
168 | # Script to change resolution of desktop
169 | tee /usr/local/bin/set-resolution > /dev/null \
170 | << EOF
171 | #!/bin/bash
172 | RESOLUTION=\${1:-\${VNC_RESOLUTION:-1920x1080}}
173 | DPI=\${2:-\${VNC_DPI:-96}}
174 | IGNORE_ERROR=\${3:-"false"}
175 | if [ -z "\$1" ]; then
176 | echo -e "**Current Settings **\n"
177 | xrandr
178 | echo -n -e "\nEnter new resolution (WIDTHxHEIGHT, blank for \${RESOLUTION}, Ctrl+C to abort).\n> "
179 | read NEW_RES
180 | if [ "\${NEW_RES}" != "" ]; then
181 | RESOLUTION=\${NEW_RES}
182 | fi
183 | if ! echo "\${RESOLUTION}" | grep -E '[0-9]+x[0-9]+' > /dev/null; then
184 | echo -e "\nInvalid resolution format!\n"
185 | exit 1
186 | fi
187 | if [ -z "\$2" ]; then
188 | echo -n -e "\nEnter new DPI (blank for \${DPI}, Ctrl+C to abort).\n> "
189 | read NEW_DPI
190 | if [ "\${NEW_DPI}" != "" ]; then
191 | DPI=\${NEW_DPI}
192 | fi
193 | fi
194 | fi
195 |
196 | xrandr --fb \${RESOLUTION} --dpi \${DPI} > /dev/null 2>&1
197 |
198 | if [ \$? -ne 0 ] && [ "\${IGNORE_ERROR}" != "true" ]; then
199 | echo -e "\nFAILED TO SET RESOLUTION!\n"
200 | exit 1
201 | fi
202 |
203 | echo -e "\nSuccess!\n"
204 | EOF
205 |
206 | # Container ENTRYPOINT script
207 | tee /usr/local/share/desktop-init.sh > /dev/null \
208 | << EOF
209 | #!/bin/bash
210 |
211 | USERNAME=${USERNAME}
212 | LOG=/tmp/container-init.log
213 |
214 | # Execute the command it not already running
215 | startInBackgroundIfNotRunning()
216 | {
217 | log "Starting \$1."
218 | echo -e "\n** \$(date) **" | sudoIf tee -a /tmp/\$1.log > /dev/null
219 | if ! pidof \$1 > /dev/null; then
220 | keepRunningInBackground "\$@"
221 | while ! pidof \$1 > /dev/null; do
222 | sleep 1
223 | done
224 | log "\$1 started."
225 | else
226 | echo "\$1 is already running." | sudoIf tee -a /tmp/\$1.log > /dev/null
227 | log "\$1 is already running."
228 | fi
229 | }
230 |
231 | # Keep command running in background
232 | keepRunningInBackground()
233 | {
234 | (\$2 bash -c "while :; do echo [\\\$(date)] Process started.; \$3; echo [\\\$(date)] Process exited!; sleep 5; done 2>&1" | sudoIf tee -a /tmp/\$1.log > /dev/null & echo "\$!" | sudoIf tee /tmp/\$1.pid > /dev/null)
235 | }
236 |
237 | # Use sudo to run as root when required
238 | sudoIf()
239 | {
240 | if [ "\$(id -u)" -ne 0 ]; then
241 | sudo "\$@"
242 | else
243 | "\$@"
244 | fi
245 | }
246 |
247 | # Use sudo to run as non-root user if not already running
248 | sudoUserIf()
249 | {
250 | if [ "\$(id -u)" -eq 0 ] && [ "\${USERNAME}" != "root" ]; then
251 | sudo -u \${USERNAME} "\$@"
252 | else
253 | "\$@"
254 | fi
255 | }
256 |
257 | # Log messages
258 | log()
259 | {
260 | echo -e "[\$(date)] \$@" | sudoIf tee -a \$LOG > /dev/null
261 | }
262 |
263 | log "** SCRIPT START **"
264 |
265 | # Start dbus.
266 | log 'Running "/etc/init.d/dbus start".'
267 | if [ -f "/var/run/dbus/pid" ] && ! pidof dbus-daemon > /dev/null; then
268 | sudoIf rm -f /var/run/dbus/pid
269 | fi
270 | sudoIf /etc/init.d/dbus start 2>&1 | sudoIf tee -a /tmp/dbus-daemon-system.log > /dev/null
271 | while ! pidof dbus-daemon > /dev/null; do
272 | sleep 1
273 | done
274 |
275 | # Startup tigervnc server and fluxbox
276 | sudo rm -rf /tmp/.X11-unix /tmp/.X*-lock
277 | mkdir -p /tmp/.X11-unix
278 | sudoIf chmod 1777 /tmp/.X11-unix
279 | sudoIf chown root:\${USERNAME} /tmp/.X11-unix
280 | if [ "\$(echo "\${VNC_RESOLUTION}" | tr -cd 'x' | wc -c)" = "1" ]; then VNC_RESOLUTION=\${VNC_RESOLUTION}x16; fi
281 | screen_geometry="\${VNC_RESOLUTION%*x*}"
282 | screen_depth="\${VNC_RESOLUTION##*x}"
283 | startInBackgroundIfNotRunning "Xtigervnc" sudoUserIf "tigervncserver \${DISPLAY:-:1} -geometry \${screen_geometry} -depth \${screen_depth} -rfbport \${VNC_PORT:-5901} -dpi \${VNC_DPI:-96} -localhost -desktop fluxbox -fg -passwd /usr/local/etc/vscode-dev-containers/vnc-passwd"
284 |
285 | # Spin up noVNC if installed and not runnning.
286 | if [ -d "/usr/local/novnc" ] && [ "\$(ps -ef | grep /usr/local/novnc/noVNC*/utils/launch.sh | grep -v grep)" = "" ]; then
287 | keepRunningInBackground "noVNC" sudoIf "/usr/local/novnc/noVNC*/utils/launch.sh --listen \${NOVNC_PORT:-6080} --vnc localhost:\${VNC_PORT:-5901}"
288 | log "noVNC started."
289 | else
290 | log "noVNC is already running or not installed."
291 | fi
292 |
293 | # Run whatever was passed in
294 | log "Executing \"\$@\"."
295 | exec "\$@"
296 | log "** SCRIPT EXIT **"
297 | EOF
298 |
299 | echo "${VNC_PASSWORD}" | vncpasswd -f > /usr/local/etc/vscode-dev-containers/vnc-passwd
300 | touch /root/.Xmodmap
301 | chmod +x /usr/local/share/desktop-init.sh /usr/local/bin/set-resolution
302 |
303 | tee /root/.fluxbox/apps > /dev/null \
304 | < /dev/null \
312 | < /dev/null \
326 | <
329 | [exec] (Text Editor) { mousepad } <>
330 | [exec] (Terminal) { tilix -w ~ -e $(readlink -f /proc/$$/exe) -il } <>
331 | [exec] (Web Browser) { x-www-browser --disable-dev-shm-usage } <>
332 | [submenu] (System) {}
333 | [exec] (Set Resolution) { tilix -t "Set Resolution" -e bash /usr/local/bin/set-resolution } <>
334 | [exec] (Edit Application Menu) { mousepad ~/.fluxbox/menu } <>
335 | [exec] (Passwords and Keys) { seahorse } <>
336 | [exec] (Top Processes) { tilix -t "Top" -e htop } <>
337 | [exec] (Disk Utilization) { tilix -t "Disk Utilization" -e ncdu / } <>
338 | [exec] (Editres) {editres} <>
339 | [exec] (Xfontsel) {xfontsel} <>
340 | [exec] (Xkill) {xkill} <>
341 | [exec] (Xrefresh) {xrefresh} <>
342 | [end]
343 | [config] (Configuration)
344 | [workspaces] (Workspaces)
345 | [end]
346 | EOF
347 |
348 | # Set up non-root user (if one exists)
349 | if [ "${USERNAME}" != "root" ]; then
350 | touch /home/${USERNAME}/.Xmodmap
351 | cp -R /root/.fluxbox /home/${USERNAME}
352 | chown -R ${USERNAME}:${USERNAME} /home/${USERNAME}/.Xmodmap /home/${USERNAME}/.fluxbox
353 | chown ${USERNAME}:root /usr/local/share/desktop-init.sh /usr/local/bin/set-resolution /usr/local/etc/vscode-dev-containers/vnc-passwd
354 | fi
355 |
356 | echo "Done!"
--------------------------------------------------------------------------------
/library-scripts/docker-in-docker-debian.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #-------------------------------------------------------------------------------------------------------------
3 | # Copyright (c) Microsoft Corporation. All rights reserved.
4 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5 | #-------------------------------------------------------------------------------------------------------------
6 | #
7 | # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
8 | # Maintainer: The VS Code and Codespaces Teams
9 | #
10 | # Syntax: ./docker-in-docker-debian.sh [enable non-root docker access flag] [non-root user] [use moby]
11 |
12 | ENABLE_NONROOT_DOCKER=${1:-"true"}
13 | USERNAME=${2:-"automatic"}
14 | USE_MOBY=${3:-"true"}
15 | MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
16 |
17 | set -e
18 |
19 | if [ "$(id -u)" -ne 0 ]; then
20 | echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
21 | exit 1
22 | fi
23 |
24 | # Determine the appropriate non-root user
25 | if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
26 | USERNAME=""
27 | POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
28 | for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
29 | if id -u ${CURRENT_USER} > /dev/null 2>&1; then
30 | USERNAME=${CURRENT_USER}
31 | break
32 | fi
33 | done
34 | if [ "${USERNAME}" = "" ]; then
35 | USERNAME=root
36 | fi
37 | elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
38 | USERNAME=root
39 | fi
40 |
41 | # Get central common setting
42 | get_common_setting() {
43 | if [ "${common_settings_file_loaded}" != "true" ]; then
44 | curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping."
45 | common_settings_file_loaded=true
46 | fi
47 | if [ -f "/tmp/vsdc-settings.env" ]; then
48 | local multi_line=""
49 | if [ "$2" = "true" ]; then multi_line="-z"; fi
50 | local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')"
51 | if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi
52 | fi
53 | echo "$1=${!1}"
54 | }
55 |
56 | # Function to run apt-get if needed
57 | apt_get_update_if_needed()
58 | {
59 | if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
60 | echo "Running apt-get update..."
61 | apt-get update
62 | else
63 | echo "Skipping apt-get update."
64 | fi
65 | }
66 |
67 | # Checks if packages are installed and installs them if not
68 | check_packages() {
69 | if ! dpkg -s "$@" > /dev/null 2>&1; then
70 | apt_get_update_if_needed
71 | apt-get -y install --no-install-recommends "$@"
72 | fi
73 | }
74 |
75 | # Ensure apt is in non-interactive to avoid prompts
76 | export DEBIAN_FRONTEND=noninteractive
77 |
78 | # Install dependencies
79 | check_packages apt-transport-https curl ca-certificates lxc pigz iptables gnupg2
80 |
81 | # Swap to legacy iptables for compatibility
82 | if type iptables-legacy > /dev/null 2>&1; then
83 | update-alternatives --set iptables /usr/sbin/iptables-legacy
84 | update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
85 | fi
86 |
87 | # Install Docker / Moby CLI if not already installed
88 | if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
89 | echo "Docker / Moby CLI and Engine already installed."
90 | else
91 | # Source /etc/os-release to get OS info
92 | . /etc/os-release
93 | if [ "${USE_MOBY}" = "true" ]; then
94 | # Import key safely (new 'signed-by' method rather than deprecated apt-key approach) and install
95 | get_common_setting MICROSOFT_GPG_KEYS_URI
96 | curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
97 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
98 | apt-get update
99 | apt-get -y install --no-install-recommends moby-cli moby-buildx moby-compose moby-engine
100 | else
101 | # Import key safely (new 'signed-by' method rather than deprecated apt-key approach) and install
102 | curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
103 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
104 | apt-get update
105 | apt-get -y install --no-install-recommends docker-ce-cli docker-ce
106 | fi
107 | fi
108 |
109 | echo "Finished installing docker / moby"
110 |
111 | # Install Docker Compose if not already installed and is on a supported architecture
112 | if type docker-compose > /dev/null 2>&1; then
113 | echo "Docker Compose already installed."
114 | else
115 | TARGET_COMPOSE_ARCH="$(uname -m)"
116 | if [ "${TARGET_COMPOSE_ARCH}" = "amd64" ]; then
117 | TARGET_COMPOSE_ARCH="x86_64"
118 | fi
119 | if [ "${TARGET_COMPOSE_ARCH}" != "x86_64" ]; then
120 | # Use pip to get a version that runns on this architecture
121 | if ! dpkg -s python3-minimal python3-pip libffi-dev python3-venv > /dev/null 2>&1; then
122 | apt_get_update_if_needed
123 | apt-get -y install python3-minimal python3-pip libffi-dev python3-venv
124 | fi
125 | export PIPX_HOME=/usr/local/pipx
126 | mkdir -p ${PIPX_HOME}
127 | export PIPX_BIN_DIR=/usr/local/bin
128 | export PYTHONUSERBASE=/tmp/pip-tmp
129 | export PIP_CACHE_DIR=/tmp/pip-tmp/cache
130 | pipx_bin=pipx
131 | if ! type pipx > /dev/null 2>&1; then
132 | pip3 install --disable-pip-version-check --no-warn-script-location --no-cache-dir --user pipx
133 | pipx_bin=/tmp/pip-tmp/bin/pipx
134 | fi
135 | ${pipx_bin} install --system-site-packages --pip-args '--no-cache-dir --force-reinstall' docker-compose
136 | rm -rf /tmp/pip-tmp
137 | else
138 | LATEST_COMPOSE_VERSION=$(basename "$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/docker/compose/releases/latest)")
139 | curl -fsSL "https://github.com/docker/compose/releases/download/${LATEST_COMPOSE_VERSION}/docker-compose-$(uname -s)-${TARGET_COMPOSE_ARCH}" -o /usr/local/bin/docker-compose
140 | chmod +x /usr/local/bin/docker-compose
141 | fi
142 | fi
143 |
144 | # If init file already exists, exit
145 | if [ -f "/usr/local/share/docker-init.sh" ]; then
146 | echo "/usr/local/share/docker-init.sh already exists, so exiting."
147 | exit 0
148 | fi
149 | echo "docker-init doesnt exist..."
150 |
151 | # Add user to the docker group
152 | if [ "${ENABLE_NONROOT_DOCKER}" = "true" ]; then
153 | if ! getent group docker > /dev/null 2>&1; then
154 | groupadd docker
155 | fi
156 |
157 | usermod -aG docker ${USERNAME}
158 | fi
159 |
160 | tee /usr/local/share/docker-init.sh > /dev/null \
161 | << 'EOF'
162 | #!/usr/bin/env bash
163 | #-------------------------------------------------------------------------------------------------------------
164 | # Copyright (c) Microsoft Corporation. All rights reserved.
165 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
166 | #-------------------------------------------------------------------------------------------------------------
167 |
168 | sudoIf()
169 | {
170 | if [ "$(id -u)" -ne 0 ]; then
171 | sudo "$@"
172 | else
173 | "$@"
174 | fi
175 | }
176 |
177 | # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
178 | # ie: docker kill
179 | sudoIf find /run /var/run -iname 'docker*.pid' -delete || :
180 | sudoIf find /run /var/run -iname 'container*.pid' -delete || :
181 |
182 | set -e
183 |
184 | ## Dind wrapper script from docker team
185 | # Maintained: https://github.com/moby/moby/blob/master/hack/dind
186 |
187 | export container=docker
188 |
189 | if [ -d /sys/kernel/security ] && ! sudoIf mountpoint -q /sys/kernel/security; then
190 | sudoIf mount -t securityfs none /sys/kernel/security || {
191 | echo >&2 'Could not mount /sys/kernel/security.'
192 | echo >&2 'AppArmor detection and --privileged mode might break.'
193 | }
194 | fi
195 |
196 | # Mount /tmp (conditionally)
197 | if ! sudoIf mountpoint -q /tmp; then
198 | sudoIf mount -t tmpfs none /tmp
199 | fi
200 |
201 | # cgroup v2: enable nesting
202 | if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
203 | # move the init process (PID 1) from the root group to the /init group,
204 | # otherwise writing subtree_control fails with EBUSY.
205 | sudoIf mkdir -p /sys/fs/cgroup/init
206 | sudoIf echo 1 > /sys/fs/cgroup/init/cgroup.procs
207 | # enable controllers
208 | sudoIf sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
209 | > /sys/fs/cgroup/cgroup.subtree_control
210 | fi
211 | ## Dind wrapper over.
212 |
213 | # Handle DNS
214 | set +e
215 | cat /etc/resolv.conf | grep -i 'internal.cloudapp.net'
216 | if [ $? -eq 0 ]
217 | then
218 | echo "Setting dockerd Azure DNS."
219 | CUSTOMDNS="--dns 168.63.129.16"
220 | else
221 | echo "Not setting dockerd DNS manually."
222 | CUSTOMDNS=""
223 | fi
224 | set -e
225 |
226 | # Start docker/moby engine
227 | ( sudoIf dockerd $CUSTOMDNS > /tmp/dockerd.log 2>&1 ) &
228 |
229 | set +e
230 |
231 | # Execute whatever commands were passed in (if any). This allows us
232 | # to set this script to ENTRYPOINT while still executing the default CMD.
233 | exec "$@"
234 | EOF
235 |
236 | chmod +x /usr/local/share/docker-init.sh
237 | chown ${USERNAME}:root /usr/local/share/docker-init.sh
--------------------------------------------------------------------------------
/ps-database-scripts/add-operation-column-and-index.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$MY_DB_URL" ]; then
4 | echo "MY_DB_URL not set, using default database - this will potentially interfere with demos of other people, are you sure to proceed?"
5 | read -p "Press enter to continue, or ctrl-c to abort"
6 | DB_NAME="matrix-demos"
7 | else
8 | echo "MY_DB_URL set, using your personal database"
9 | export DATABASE_URL="$MY_DB_URL"
10 | DB_NAME="matrix-demos-${GITHUB_USER}"
11 | fi
12 |
13 |
14 | ORG_NAME="planetscale-demo"
15 | BRANCH_NAME="add-operation-column-and-index"
16 |
17 | . use-pscale-docker-image.sh
18 | . wait-for-branch-readiness.sh
19 |
20 | #pscale auth login
21 | # delete the branch if it already exists
22 | pscale branch delete "$DB_NAME" "$BRANCH_NAME" --force --org "$ORG_NAME"
23 | pscale branch create "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
24 | wait_for_branch_readiness 7 "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" 10
25 | if [ $? -ne 0 ]; then
26 | echo "Branch $BRANCH_NAME is not ready"
27 | exit 1
28 | fi
29 | echo "alter table pixel_matrix add column operation varchar(10) default NULL; create index environment_operation on pixel_matrix(environment, operation);" | pscale shell "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
30 | if [ $? -ne 0 ]; then
31 | echo "Schema change in $BRANCH_NAME could not be created"
32 | exit 1
33 | fi
34 | pscale deploy-request create "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
35 |
--------------------------------------------------------------------------------
/ps-database-scripts/create-database.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | DB_NAME="matrix-demos-${GITHUB_USER}"
3 | ORG_NAME="planetscale-demo"
4 | BRANCH_NAME="main"
5 | CREDS="creds-${GITHUB_USER}"
6 |
7 | . wait-for-branch-readiness.sh
8 |
9 | # At the moment, service tokens do not allow DB creations or prod branch promotions, hence not using the service token.
10 | pscale auth login
11 | unset PLANETSCALE_SERVICE_TOKEN
12 |
13 | pscale database create "$DB_NAME" --org "$ORG_NAME"
14 | pscale branch create "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
15 | wait_for_branch_readiness 7 "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" 10
16 | echo "CREATE TABLE pixel_matrix (id bigint NOT NULL AUTO_INCREMENT, environment varchar(10) NOT NULL, cell varchar(10) NOT NULL, pixel_data longtext NOT NULL, PRIMARY KEY (id), KEY environment (environment), KEY cell (cell));" | pscale shell $DB_NAME $BRANCH_NAME --org $ORG_NAME
17 | # check whether table creation was successful
18 | if [ $? -ne 0 ]; then
19 | echo "Failed to create table in branch $BRANCH_NAME for database $DB_NAME"
20 | exit 1
21 | fi
22 |
23 | pscale branch promote "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
24 |
25 | # grant service token permission to use the database if service token is set
26 | if [ -n "$PLANETSCALE_SERVICE_TOKEN_NAME" ]; then
27 | pscale service-token add-access "$PLANETSCALE_SERVICE_TOKEN_NAME" approve_deploy_request connect_branch create_branch create_comment create_deploy_request delete_branch read_branch read_deploy_request connect_production_branch --database "$DB_NAME" --org "$ORG_NAME"
28 | fi
29 |
30 | MY_DB_URL=`pscale password create "$DB_NAME" "$BRANCH_NAME" "$CREDS" --org "$ORG_NAME" --format json | jq ". | \"mysql://\" + .username + \":\" + .password + \"@\" + .access_host_url + \"/${DB_NAME}\""`
31 | echo "Please set MY_DB_URL in your personal codespaces secrets to ${MY_DB_URL:1:${#MY_DB_URL}-2} and grant this repo access to it."
32 | echo "If you do not like to restart this Codespace, you would have to run the following command in your terminal:"
33 | echo "export MY_DB_URL=${MY_DB_URL}"
34 |
--------------------------------------------------------------------------------
/ps-database-scripts/merge-latest-open-deploy-request.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #unset PLANETSCALE_SERVICE_TOKEN
4 |
5 | if [ -z "$MY_DB_URL" ]; then
6 | echo "MY_DB_URL not set, using default database - this will potentially interfere with demos of other people, are you sure to proceed?"
7 | read -p "Press enter to continue, or ctrl-c to abort"
8 | DB_NAME="matrix-demos"
9 | else
10 | echo "MY_DB_URL set, using your personal database"
11 | export DATABASE_URL="$MY_DB_URL"
12 | DB_NAME="matrix-demos-${GITHUB_USER}"
13 | fi
14 |
15 | ORG_NAME="planetscale-demo"
16 |
17 | . use-pscale-docker-image.sh
18 | . wait-for-deploy-request-merged.sh
19 |
20 | raw_output=`pscale deploy-request list "$DB_NAME" --org "$ORG_NAME" --format json`
21 | # check return code, if not 0 then error
22 | if [ $? -ne 0 ]; then
23 | echo "Error: pscale deploy-branch list returned non-zero exit code $?: $raw_output"
24 | exit 1
25 | fi
26 | output=`echo $raw_output | jq "[.[] | select(.state == \"open\") ] | .[0].number "`
27 |
28 | # test whether the output is a number
29 | if [[ $output =~ ^[0-9]+$ ]]; then
30 | echo "Going to deploy latest open deployment request $output with the following changes: "
31 | pscale deploy-request diff "$DB_NAME" "$output" --org "$ORG_NAME"
32 | echo "Do you want to deploy this request? [y/n]"
33 | read answer
34 | if [ "$answer" == "y" ]; then
35 | pscale deploy-request deploy "$DB_NAME" $output --org "$ORG_NAME"
36 | # if command returns non-zero exit code then error
37 | if [ $? -ne 0 ]; then
38 | echo "Error: pscale deploy-request deploy returned non-zero exit code"
39 | exit 4
40 | fi
41 | wait_for_deploy_request_merged 9 "$DB_NAME" "$output" "$ORG_NAME" 60
42 | if [ $? -ne 0 ]; then
43 | echo "Error: wait-for-deploy-request-merged returned non-zero exit code"
44 | exit 5
45 | fi
46 | else
47 | echo "Aborting."
48 | exit 1
49 | fi
50 | else
51 | echo "No open deployment request found: $raw_output"
52 | exit 3
53 | fi
--------------------------------------------------------------------------------
/ps-database-scripts/remove-operation-column-and-index.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$MY_DB_URL" ]; then
4 | echo "MY_DB_URL not set, using default database - this will potentially interfere with demos of other people, are you sure to proceed?"
5 | read -p "Press enter to continue, or ctrl-c to abort"
6 | DB_NAME="matrix-demos"
7 | else
8 | echo "MY_DB_URL set, using your personal database"
9 | export DATABASE_URL="$MY_DB_URL"
10 | DB_NAME="matrix-demos-${GITHUB_USER}"
11 | fi
12 |
13 | ORG_NAME="planetscale-demo"
14 | BRANCH_NAME="remove-operation-column-and-index"
15 |
16 | . use-pscale-docker-image.sh
17 | . wait-for-branch-readiness.sh
18 |
19 | #pscale auth login
20 | #pscale database create $DB_NAME --org $ORG_NAME
21 | # delete the branch if it already exists
22 | pscale branch delete "$DB_NAME" "$BRANCH_NAME" --force --org "$ORG_NAME"
23 | pscale branch create "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
24 | wait_for_branch_readiness 7 "$DB_NAME" "$BRANCH_NAME" "$ORG_NAME" 10
25 | if [ $? -ne 0 ]; then
26 | echo "Branch $BRANCH_NAME is not ready"
27 | exit 1
28 | fi
29 | echo "alter table pixel_matrix drop column operation; drop index environment_operation on pixel_matrix;" | pscale shell "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
30 | if [ $? -ne 0 ]; then
31 | echo "Schema change in $BRANCH_NAME could not be created"
32 | exit 1
33 | fi
34 | pscale deploy-request create "$DB_NAME" "$BRANCH_NAME" --org "$ORG_NAME"
35 |
--------------------------------------------------------------------------------
/ps-database-scripts/use-pscale-docker-image.sh:
--------------------------------------------------------------------------------
1 | echo Using pscale CLI from latest docker image ...
2 | mkdir -p $HOME/.config/planetscale
3 |
4 | function pscale {
5 | local tty="-t"
6 | local non_interactive=""
7 | # if first arg equals shell, we have to turn of pseudo-tty and set PSCALE_ALLOW_NONINTERACTIVE_SHELL=true
8 | if [ "$1" = "shell" ]; then
9 | tty=""
10 | non_interactive="-e PSCALE_ALLOW_NONINTERACTIVE_SHELL=true"
11 | fi
12 |
13 | docker run -e PLANETSCALE_SERVICE_TOKEN=$PLANETSCALE_SERVICE_TOKEN -e PLANETSCALE_SERVICE_TOKEN_NAME=$PLANETSCALE_SERVICE_TOKEN_NAME -e HOME=/tmp -v $HOME/.config/planetscale:/tmp/.config/planetscale -e PSCALE_ALLOW_NONINTERACTIVE_SHELL=true --user $(id -u):$(id -g) --rm -i $tty -p 3306:3306/tcp planetscale/pscale:latest $@
14 | }
--------------------------------------------------------------------------------
/ps-database-scripts/wait-for-branch-readiness.sh:
--------------------------------------------------------------------------------
1 | function wait_for_branch_readiness {
2 | local retries=$1
3 | local db=$2
4 | local branch=$3
5 | local org=$4
6 |
7 | # check whether fifth parameter is set, otherwise use default value
8 | if [ -z "$5" ]; then
9 | local max_timeout=60
10 | else
11 | local max_timeout=$5
12 | fi
13 |
14 | local count=0
15 | local wait=1
16 |
17 | echo "Checking if branch $branch is ready for use..."
18 | while true; do
19 | local raw_output=`pscale branch list $db --org $org --format json`
20 | # check return code, if not 0 then error
21 | if [ $? -ne 0 ]; then
22 | echo "Error: pscale branch list returned non-zero exit code $?: $raw_output"
23 | return 1
24 | fi
25 | local output=`echo $raw_output | jq ".[] | select(.name == \"$branch\") | .ready"`
26 | # test whether output is false, if so, increase wait timeout exponentially
27 | if [ "$output" == "false" ]; then
28 | # increase wait variable exponentially but only if it is less than max_timeout
29 | if [ $((wait * 2)) -le $max_timeout ]; then
30 | wait=$((wait * 2))
31 | else
32 | wait=$max_timeout
33 | fi
34 |
35 | count=$((count+1))
36 | if [ $count -ge $retries ]; then
37 | echo "Branch $branch is not ready after $retries retries. Exiting..."
38 | return 2
39 | fi
40 | echo "Branch $branch is not ready yet. Retrying in $wait seconds..."
41 | sleep $wait
42 | elif [ "$output" == "true" ]; then
43 | echo "Branch $branch is ready for use."
44 | return 0
45 | else
46 | echo "Branch $branch in unknown status: $raw_output"
47 | return 3
48 | fi
49 | done
50 | }
--------------------------------------------------------------------------------
/ps-database-scripts/wait-for-deploy-request-merged.sh:
--------------------------------------------------------------------------------
1 | function wait_for_deploy_request_merged {
2 | local retries=$1
3 | local db=$2
4 | local number=$3
5 | local org=$4
6 |
7 | # check whether fifth parameter is set, otherwise use default value
8 | if [ -z "$5" ]; then
9 | local max_timeout=600
10 | else
11 | local max_timeout=$5
12 | fi
13 |
14 | local count=0
15 | local wait=1
16 |
17 | echo "Checking if deploy request $number is ready for use..."
18 | while true; do
19 | local raw_output=`pscale deploy-request list "$db" --org "$org" --format json`
20 | # check return code, if not 0 then error
21 | if [ $? -ne 0 ]; then
22 | echo "Error: pscale deploy-request list returned non-zero exit code $?: $raw_output"
23 | return 1
24 | fi
25 | local output=`echo $raw_output | jq ".[] | select(.number == $number) | .deployment.state"`
26 | # test whether output is pending, if so, increase wait timeout exponentially
27 | if [ "$output" = "\"pending\"" ] || [ "$output" = "\"in_progress\"" ]; then
28 | # increase wait variable exponentially but only if it is less than max_timeout
29 | if [ $((wait * 2)) -le $max_timeout ]; then
30 | wait=$((wait * 2))
31 | else
32 | wait=$max_timeout
33 | fi
34 |
35 | count=$((count+1))
36 | if [ $count -ge $retries ]; then
37 | echo "Deploy request $number is not ready after $retries retries. Exiting..."
38 | return 2
39 | fi
40 | echo "Deploy-request $number is not merged yet. Current status:"
41 | echo "show vitess_migrations\G" | pscale shell "$db" main --org "$org"
42 | echo "Retrying in $wait seconds..."
43 | sleep $wait
44 | elif [ "$output" = "\"complete\"" ]; then
45 | echo "Deploy-request $number has been merged successfully."
46 | return 0
47 | else
48 | echo "Deploy-request $number with unknown status: $output"
49 | return 3
50 | fi
51 | done
52 | }
--------------------------------------------------------------------------------
/redis-scripts/expose-redis.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: twemproxy
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: twemproxy
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | app: twemproxy
14 | spec:
15 | containers:
16 | - name: twemproxy
17 | image: jonico/twemproxy:2.0
18 | env:
19 | - name: REDIS_SERVERS
20 | value: redis-master.redis.svc.cluster.local:6379:1
21 | - name: SERVER_CONNECTIONS
22 | value: "100"
23 | - name: TIMEOUT
24 | value: "10000"
25 | - name: REDIS_PASSWORD
26 | valueFrom:
27 | secretKeyRef:
28 | name: redispassword
29 | key: redis_pwd
30 | ports:
31 | - containerPort: 6380
32 | ---
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: twemproxy
37 | spec:
38 | externalTrafficPolicy: Cluster
39 | ports:
40 | - port: 6379
41 | protocol: TCP
42 | targetPort: 6380
43 | selector:
44 | app: twemproxy
45 | sessionAffinity: None
46 | type: LoadBalancer
47 |
--------------------------------------------------------------------------------
/redis-scripts/redis.md:
--------------------------------------------------------------------------------
1 | =======================================================================
2 | = redis has been installed =
3 | =======================================================================
4 |
5 | # Redis can be accessed via port 6379 on the following DNS names from within your cluster:
6 |
7 | # redis-master.redis.svc.cluster.local for read/write operations
8 | # redis-slave.redis.svc.cluster.local for read-only operations
9 |
10 |
11 | # To get your password run:
12 |
13 | export REDIS_PASSWORD=$(kubectl get secret --namespace redis redis -o jsonpath="{.data.redis-password}" | base64 --decode)
14 |
15 | # To connect to your Redis server:
16 |
17 | # 1. Run a Redis pod that you can use as a client:
18 |
19 | kubectl run --namespace redis redis-client --rm --tty -i --restart='Never' \
20 | --env REDIS_PASSWORD=$REDIS_PASSWORD \
21 | --image docker.io/bitnami/redis:5.0.7-debian-10-r48 -- bash
22 |
23 | # 2. Connect using the Redis CLI:
24 | redis-cli -h redis-master -a $REDIS_PASSWORD
25 | redis-cli -h redis-slave -a $REDIS_PASSWORD
26 |
27 | # To connect to your database from outside the cluster execute the following commands:
28 |
29 | kubectl port-forward --namespace redis svc/redis-master 6379:6379 &
30 | redis-cli -h 127.0.0.1 -p 6379 -a $REDIS_PASSWORD
31 |
--------------------------------------------------------------------------------
/render-matrix-cell.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import argparse
3 | import time
4 | import os
5 | from urllib.request import urlopen
6 | from urllib.parse import urlparse
7 | from PIL import Image
8 | import pymysql.cursors
9 |
10 |
11 | class VisualizeMatrixBuild(object):
12 | def __init__(self, *args, **kwargs):
13 | self.parser = argparse.ArgumentParser()
14 | self.parser.add_argument("--max-job-x", help="max-job-x", default=8, type=int)
15 | self.parser.add_argument("--max-job-y", help="max job y", default=4, type=int)
16 | self.parser.add_argument("--max-x", help="max x pixels", default=32, type=int)
17 | self.parser.add_argument("--max-y", help="max y pixels", default=16, type=int)
18 | self.parser.add_argument("--job-x", help="job x", default=1, type=int)
19 | self.parser.add_argument("--job-y", help="job y", default=1, type=int)
20 | self.parser.add_argument("--environment", help="environment", default="barfoo", type=str)
21 | self.parser.add_argument("--image-file", help="image file location", default="images/static_image.jpg", type=str)
22 | self.parser.add_argument("--operation", help="operation to use", default=None, type=str)
23 | self.parser.add_argument("--duration", help="pixel render time in milliseconds", default="3000", type=int)
24 | self.parser.add_argument("--repetitions", help="times to switch between color and gray", default="0", type=int)
25 | self.parser.add_argument("--repetition-delay", help="time to wait between repetitions in ms", default="60000", type=int)
26 | self.parser.add_argument("--connections", help="number of db connections", default="1", type=int)
27 | self.args = self.parser.parse_args()
28 |
29 | def run(self):
30 | maxX = self.args.max_x
31 | maxY = self.args.max_y
32 |
33 | pixelsX = int (maxX/self.args.max_job_x)
34 | pixelsY = int (maxY/self.args.max_job_y)
35 |
36 | offsetX = (self.args.job_x-1) * pixelsX
37 | offsetY = (self.args.job_y-1) * pixelsY
38 |
39 | numberPixels = pixelsX * pixelsY
40 |
41 | numberConnections = self.args.connections
42 |
43 | operation = self.args.operation
44 |
45 | environment = self.args.environment
46 | duration = self.args.duration
47 | repetitions_delay = self.args.repetition_delay
48 |
49 | repetitions = self.args.repetitions
50 |
51 | sleepBetweenPixels = duration / numberPixels
52 |
53 | image_file = self.args.image_file
54 | if image_file.startswith("http"):
55 | image = Image.open(urlopen(image_file))
56 | else:
57 | image = Image.open(image_file)
58 |
59 | width, height = image.size
60 |
61 | if width != maxX and height != maxY:
62 | image.thumbnail((maxX, maxY), Image.ANTIALIAS)
63 |
64 |
65 | url = urlparse(os.environ.get('DATABASE_URL'))
66 |
67 | connections = {}
68 | cursors = {}
69 | for i in range (numberConnections):
70 | connections[i]= pymysql.connect(host=url.hostname,
71 | user=url.username,
72 | password=url.password,
73 | db=url.path[1:],
74 | ssl={'ca': 'certs.pem'})
75 | cursors[i] = connections[i].cursor()
76 | cursors[i].execute("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED")
77 |
78 | rgb_im = image.convert('RGB')
79 | width, height = rgb_im.size
80 |
81 | basicSchema = operation == None
82 | updateWholeCell = numberConnections == 1
83 |
84 | if basicSchema:
85 | add_pixels = ("INSERT INTO pixel_matrix "
86 | "(environment, cell, pixel_data ) "
87 | "VALUES (%s, %s, %s)" )
88 | else:
89 | add_pixels = ("INSERT INTO pixel_matrix "
90 | "(environment, cell, pixel_data, operation ) "
91 | "VALUES (%s, %s, %s, %s)" )
92 |
93 | for i in range(repetitions):
94 | if i != 0:
95 | time.sleep(repetitions_delay/1000)
96 |
97 | values = ""
98 | for y in range(pixelsY):
99 | cursor = cursors[y % numberConnections]
100 | connection = connections [y % numberConnections]
101 | for x in range(pixelsX):
102 | realX=x+offsetX
103 | realY=y+offsetY
104 | r, g, b = rgb_im.getpixel((realX%width,realY%height))
105 | gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
106 | if (i % 2 == 1):
107 | r, g, b = gray , gray, gray
108 | value=("%d,%d,%d,%d,%d")%(realX,realY,r,g,b)
109 | values+=value
110 | values+="\n"
111 |
112 | if (not updateWholeCell):
113 | hashKey = ("%d/%d/%d") % (self.args.job_x, self.args.job_y, y)
114 | if basicSchema:
115 | cursor.execute(add_pixels, (environment, hashKey, values))
116 | else:
117 | cursor.execute(add_pixels, (environment, hashKey, values, operation))
118 | connection.commit()
119 | values = ""
120 |
121 | time.sleep(sleepBetweenPixels*pixelsX/1000)
122 |
123 | if updateWholeCell:
124 | hashKey = ("job/%d/%d") % (self.args.job_x, self.args.job_y)
125 | if basicSchema:
126 | cursor.execute(add_pixels, (environment, hashKey, values))
127 | else:
128 | cursor.execute(add_pixels, (environment, hashKey, values, operation))
129 | connection.commit()
130 |
131 | for i in range (numberConnections):
132 | connections[i].close()
133 | cursors[i].close()
134 |
135 | # Main function
136 | if __name__ == "__main__":
137 | stream_pixels = VisualizeMatrixBuild()
138 | stream_pixels.run()
139 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | redis
2 | PySimpleGUI
3 | pillow
4 | PyMySQL
--------------------------------------------------------------------------------
/samplebase.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import time
3 | import sys
4 | import os
5 |
6 | sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/..'))
7 | from rgbmatrix import RGBMatrix, RGBMatrixOptions
8 |
9 |
10 | class SampleBase(object):
11 | def __init__(self, *args, **kwargs):
12 | self.parser = argparse.ArgumentParser()
13 |
14 | self.parser.add_argument("-r", "--led-rows", action="store", help="Display rows. 16 for 16x32, 32 for 32x32. Default: 32", default=32, type=int)
15 | self.parser.add_argument("--led-cols", action="store", help="Panel columns. Typically 32 or 64. (Default: 32)", default=32, type=int)
16 | self.parser.add_argument("-c", "--led-chain", action="store", help="Daisy-chained boards. Default: 1.", default=1, type=int)
17 | self.parser.add_argument("-P", "--led-parallel", action="store", help="For Plus-models or RPi2: parallel chains. 1..3. Default: 1", default=1, type=int)
18 | self.parser.add_argument("-p", "--led-pwm-bits", action="store", help="Bits used for PWM. Something between 1..11. Default: 11", default=11, type=int)
19 | self.parser.add_argument("-b", "--led-brightness", action="store", help="Sets brightness level. Default: 100. Range: 1..100", default=100, type=int)
20 | self.parser.add_argument("-m", "--led-gpio-mapping", help="Hardware Mapping: regular, adafruit-hat, adafruit-hat-pwm" , choices=['regular', 'adafruit-hat', 'adafruit-hat-pwm'], type=str)
21 | self.parser.add_argument("--led-scan-mode", action="store", help="Progressive or interlaced scan. 0 Progressive, 1 Interlaced (default)", default=1, choices=range(2), type=int)
22 | self.parser.add_argument("--led-pwm-lsb-nanoseconds", action="store", help="Base time-unit for the on-time in the lowest significant bit in nanoseconds. Default: 130", default=130, type=int)
23 | self.parser.add_argument("--led-show-refresh", action="store_true", help="Shows the current refresh rate of the LED panel")
24 | self.parser.add_argument("--led-slowdown-gpio", action="store", help="Slow down writing to GPIO. Range: 0..4. Default: 1", default=1, type=int)
25 | self.parser.add_argument("--led-no-hardware-pulse", action="store", help="Don't use hardware pin-pulse generation")
26 | self.parser.add_argument("--led-rgb-sequence", action="store", help="Switch if your matrix has led colors swapped. Default: RGB", default="RGB", type=str)
27 | self.parser.add_argument("--led-pixel-mapper", action="store", help="Apply pixel mappers. e.g \"Rotate:90\"", default="", type=str)
28 | self.parser.add_argument("--led-row-addr-type", action="store", help="0 = default; 1=AB-addressed panels; 2=row direct; 3=ABC-addressed panels; 4 = ABC Shift + DE direct", default=0, type=int, choices=[0,1,2,3,4])
29 | self.parser.add_argument("--led-multiplexing", action="store", help="Multiplexing type: 0=direct; 1=strip; 2=checker; 3=spiral; 4=ZStripe; 5=ZnMirrorZStripe; 6=coreman; 7=Kaler2Scan; 8=ZStripeUneven... (Default: 0)", default=0, type=int)
30 |
31 | def usleep(self, value):
32 | time.sleep(value / 1000000.0)
33 |
34 | def run(self):
35 | print("Running")
36 |
37 | def process(self):
38 | self.args = self.parser.parse_args()
39 |
40 | options = RGBMatrixOptions()
41 |
42 | if self.args.led_gpio_mapping != None:
43 | options.hardware_mapping = self.args.led_gpio_mapping
44 | options.rows = self.args.led_rows
45 | options.cols = self.args.led_cols
46 | options.chain_length = self.args.led_chain
47 | options.parallel = self.args.led_parallel
48 | options.row_address_type = self.args.led_row_addr_type
49 | options.multiplexing = self.args.led_multiplexing
50 | options.pwm_bits = self.args.led_pwm_bits
51 | options.brightness = self.args.led_brightness
52 | options.pwm_lsb_nanoseconds = self.args.led_pwm_lsb_nanoseconds
53 | options.led_rgb_sequence = self.args.led_rgb_sequence
54 | options.pixel_mapper_config = self.args.led_pixel_mapper
55 | #options.drop_privileges=False
56 |
57 | if self.args.led_show_refresh:
58 | options.show_refresh_rate = 1
59 |
60 | if self.args.led_slowdown_gpio != None:
61 | options.gpio_slowdown = self.args.led_slowdown_gpio
62 | if self.args.led_no_hardware_pulse:
63 | options.disable_hardware_pulsing = True
64 |
65 | self.matrix = RGBMatrix(options = options)
66 |
67 | try:
68 | # Start loop
69 | print("Press CTRL-C to stop sample")
70 | self.run()
71 | except KeyboardInterrupt:
72 | print("Exiting\n")
73 | sys.exit(0)
74 |
75 | return True
76 |
--------------------------------------------------------------------------------
/show-node-allocation-aws.sh:
--------------------------------------------------------------------------------
1 | python3 show-node-allocation-gui.py --length=8 --height=8 --max-x=48 --max-y=80 --window-x=768 --window-y=1200 -n github-actions-runner-octodemo ip-10-0-1-124.us-east-2.compute.internal ip-10-0-1-81.us-east-2.compute.internal ip-10-0-3-141.us-east-2.compute.internal
2 |
--------------------------------------------------------------------------------
/show-node-allocation-blinkt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import time
3 | import argparse
4 | import blinkt
5 | import subprocess
6 |
7 | class Pod:
8 | def __init__(self, name, status, node, position, shortName):
9 | self.name = name
10 | self.status = status
11 | self.node = node
12 | self.position = position
13 | self.shortName = shortName
14 |
15 |
16 | class PodStatusLed():
17 | def __init__(self, *args, **kwargs):
18 | self.parser = argparse.ArgumentParser()
19 | self.parser.add_argument("--max-y", help="max y pixels", default=blinkt.NUM_PIXELS, type=int)
20 | self.parser.add_argument("-n", "--namespace", help="Kubernetes namespace", default="github-actions-runner-link")
21 | self.parser.add_argument("nodes", action='store', nargs='+', default=["node64-2"])
22 |
23 | self.args = self.parser.parse_args()
24 |
25 | def find_first_unused_position (positionSet):
26 | for i in range (1000):
27 | if (not i in positionSet):
28 | return i
29 | return -1
30 |
31 | def status_color(status):
32 | return {
33 | 'Running': [0, 255, 0],
34 | 'CrashLoopBackOff': [255, 0, 0],
35 | 'CreateContainerError': [255, 0, 0],
36 | 'InvalidImageName': [255, 0, 0],
37 | 'ImagePullBackOff': [255, 0, 0],
38 | 'Terminating': [165,42,42],
39 | 'Completed': [0, 0, 255],
40 | 'Pending': [255, 255, 255],
41 | 'ContainerCreating': [255, 255, 0],
42 | 'Terminated': [0, 0, 0],
43 | 'Ready': [128, 128, 128],
44 | 'NotReady': [255, 0, 0]
45 | }.get(status, [255,182,193])
46 |
47 |
48 | def run(self):
49 | nodes = {}
50 | nodeStatus = {}
51 | nodesByPosition = {}
52 | positionsAlreadyTaken = {}
53 | positionMax = self.args.max_y
54 |
55 | numberNodes=len(self.args.nodes)
56 | namespace = self.args.namespace
57 |
58 |
59 | for node in self.args.nodes:
60 | nodes[node] = {}
61 | nodeStatus[node] = "NotReady"
62 | nodesByPosition[node] = []
63 | positionsAlreadyTaken[node] = set()
64 |
65 | while True:
66 |
67 | podsSeenThisRound = set()
68 | podsToBeInsertedThisRound = {}
69 | for node in self.args.nodes:
70 | podsToBeInsertedThisRound[node]= []
71 |
72 | output = subprocess.getoutput("kubectl get nodes --no-headers")
73 | for row in output.split("\n"):
74 | values = row.split();
75 | if (not values):
76 | continue
77 | # read in node status
78 | nodeStatus[values[0]]=values[1]
79 |
80 | output = subprocess.getoutput("kubectl get pods --namespace %s --no-headers -o wide" % namespace)
81 | for row in output.split("\n"):
82 | values = row.split();
83 | if (not values):
84 | continue
85 |
86 | podStatus = values[2]
87 | nodeName = values[6]
88 | podShortName = values[0]
89 | podName = podShortName + "-" + nodeName
90 |
91 | if (nodeName not in nodes.keys()):
92 | continue
93 |
94 | podsSeenThisRound.add(podName)
95 |
96 | pod = nodes[nodeName].get(podName)
97 | if (not pod):
98 | # we have to schedule the position after this loop
99 | podsToBeInsertedThisRound[nodeName].append(Pod(podName, podStatus, nodeName, -1, podShortName))
100 | else:
101 | # we only change the status, and maybe node position is already set
102 | pod.status=podStatus
103 |
104 |
105 | for node, pods in podsToBeInsertedThisRound.items():
106 | performedDefrag = False
107 | for pod in pods:
108 | position = PodStatusLed.find_first_unused_position(positionsAlreadyTaken[pod.node])
109 | if position >= positionMax:
110 | if not performedDefrag:
111 | # idea: turn defrag logic into a function
112 | for podName, existingPod in nodes[pod.node].items():
113 | if (not podName in podsSeenThisRound):
114 | # mark position for potential override, don't do it yet
115 | positionsAlreadyTaken[existingPod.node].remove(existingPod.position)
116 | performedDefrag = True
117 | position = PodStatusLed.find_first_unused_position(positionsAlreadyTaken[pod.node])
118 |
119 | # if defrag was already performed this round or we have not been lucky
120 | if position >= positionMax:
121 | print("Display too small, skipping node %s until we can allocate a position." % pod.name)
122 | continue
123 |
124 | pod.position = position
125 | positionsAlreadyTaken[pod.node].add(position)
126 | nodes[pod.node][pod.name] = pod
127 | if (position= positionMax:
230 | if not performedDefrag:
231 | # idea: turn defrag logic into a function
232 | for podName, existingPod in nodes[pod.node].items():
233 | if (not podName in podsSeenThisRound):
234 | # mark position for potential override, don't do it yet
235 | positionsAlreadyTaken[existingPod.node].remove(existingPod.position)
236 | performedDefrag = True
237 | position = PodStatusLed.find_first_unused_position(positionsAlreadyTaken[pod.node])
238 |
239 | # if defrag was already performed this round or we have not been lucky
240 | if position >= positionMax:
241 | print("Display too small, skipping node %s until we can allocate a position." % pod.name)
242 | continue
243 |
244 | pod.position = position
245 | positionsAlreadyTaken[pod.node].add(position)
246 | nodes[pod.node][pod.name] = pod
247 | if (position= positionMax:
126 | if not performedDefrag:
127 | # idea: turn defrag logic into a function
128 | for podName, existingPod in nodes[pod.node].items():
129 | if (not podName in podsSeenThisRound):
130 | # mark position for potential override, don't do it yet
131 | positionsAlreadyTaken[existingPod.node].remove(existingPod.position)
132 | performedDefrag = True
133 | position = PodStatusLed.find_first_unused_position(positionsAlreadyTaken[pod.node])
134 |
135 | # if defrag was already performed this round or we have not been lucky
136 | if position >= positionMax:
137 | print("LED Matrix too small, skipping node %s until we can allocate a position." % pod.name)
138 | continue
139 |
140 | pod.position = position
141 | positionsAlreadyTaken[pod.node].add(position)
142 | nodes[pod.node][pod.name] = pod
143 | if (position