├── .gitattributes ├── .githooks └── pre-commit ├── .github └── workflows │ ├── deploy.yaml │ ├── dev_env.yaml │ ├── e2etest.yaml │ ├── prod.yaml │ ├── public_image.yml │ ├── public_image_dev.yml │ └── test.yaml ├── .gitignore ├── .vscode └── launch.json ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── apiserver ├── apiserver.go ├── apiserver_test.go └── config.go ├── auth ├── auth.go ├── auth_test.go ├── azure_token.go └── config.go ├── cmd ├── dereference │ └── main.go ├── transpile │ └── main.go └── validate │ └── main.go ├── config.yml ├── dev ├── Dockerfile.cluster ├── Dockerfile.mongo ├── Dockerfile.server ├── Dockerfile.server_builder ├── Makefile ├── argo-cluster-install │ ├── base │ │ ├── clusterroles.yaml │ │ ├── configmaps.yaml │ │ ├── deployments.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── secrets.yaml │ │ └── services.yaml │ ├── kustomization.yaml │ ├── sandbox-project-a │ │ ├── configmaps.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── roles.yaml │ │ └── secrets.yaml │ └── sandbox-project-b │ │ ├── configmaps.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── secrets.yaml ├── cluster_runner.sh ├── docker-compose-e2e.yaml ├── docker-compose.yaml ├── example_data │ └── dump │ │ ├── admin │ │ ├── system.version.bson │ │ └── system.version.metadata.json │ │ ├── config │ │ ├── external_validation_keys.bson │ │ ├── external_validation_keys.metadata.json │ │ ├── tenantMigrationDonors.bson │ │ ├── tenantMigrationDonors.metadata.json │ │ ├── tenantMigrationRecipients.bson │ │ └── tenantMigrationRecipients.metadata.json │ │ └── test │ │ ├── Components.bson │ │ ├── Components.metadata.json │ │ ├── Jobs.bson │ │ ├── Jobs.metadata.json │ │ ├── Workflows.bson │ │ └── Workflows.metadata.json ├── flowify_server_runner.sh ├── kind.yaml └── kind_cluster_config_export.sh ├── docker-compose-tests.yaml ├── e2etest ├── Makefile ├── artifact_test.go ├── component_test.go ├── default-roles.yaml ├── e2e_test.go ├── secret_test.go ├── test.sh ├── workspace_cm_test.yaml └── workspace_test.go ├── go.mod ├── go.sum ├── main.go ├── models ├── examples │ ├── brick-parameter-component.json │ ├── bricksexample.json │ ├── graph-input-volumes.json │ ├── graph-throughput-volumes.json │ ├── hello-world-workflow.json │ ├── if-else-statement.json │ ├── if-statement.json │ ├── job-example.json │ ├── job-map-example.json │ ├── job-mounts.json │ ├── job-submap-example.json │ ├── minimal-any-component.json │ ├── minimal-any-workflow.json │ ├── minimal-brick-component.json │ ├── minimal-conditional-component.json │ ├── minimal-graph-component.json │ ├── minimal-map-component.json │ ├── multi-level-secrets.json │ ├── myexample.json │ ├── single-node-graph-component.json │ ├── two-node-graph-component-with-cref.json │ └── two-node-graph-component.json ├── job.go ├── models.go ├── models_test.go ├── spec │ ├── any.schema.json │ ├── arg.schema.json │ ├── brick.schema.json │ ├── component.schema.json │ ├── componentpostrequest.schema.json │ ├── conditional.schema.json │ ├── cref.schema.json │ ├── crefversion.schema.json │ ├── data.schema.json │ ├── dataarray.schema.json │ ├── edge.schema.json │ ├── expression.schema.json │ ├── flowify.json │ ├── flowify.rapidoc.html │ ├── flowify.redoc.html │ ├── flowify.swagger.html │ ├── graph.schema.json │ ├── job.schema.json │ ├── jobpostrequest.schema.json │ ├── jobstatus.schema.json │ ├── map.schema.json │ ├── mapping.schema.json │ ├── metadata.schema.json │ ├── metadatalist.schema.json │ ├── metadataworkspace.schema.json │ ├── metadataworkspacelist.schema.json │ ├── node.schema.json │ ├── pageinfo.schema.json │ ├── port.schema.json │ ├── res.schema.json │ ├── secret.schema.json │ ├── userinfo.schema.json │ ├── value.schema.json │ ├── version.schema.json │ ├── volume.schema.json │ ├── volumelist.schema.json │ ├── workflow.schema.json │ ├── workflowpostrequest.schema.json │ └── workspace.schema.json └── validate.go ├── pkg ├── secret │ ├── config.go │ ├── mock.go │ ├── secret.go │ └── secret_test.go └── workspace │ ├── mock.go │ ├── workspace.go │ └── workspace_test.go ├── rest ├── components.go ├── handlers_test.go ├── jobs.go ├── rest.go ├── secrets.go ├── secrets_test.go ├── userinfo.go ├── volumes.go ├── volumes_test.go ├── workflows.go └── workspaces.go ├── sandbox ├── Makefile ├── get-flowify-workflow.sh ├── get-workflow-template.sh ├── get-workflow.sh ├── lint-workflow.sh ├── list-flowify-workflow-versions.sh ├── list-flowify-workflows.sh ├── list-workflow-templates.sh ├── list-workflows.sh ├── list-workspaces.sh ├── post-flowify-workflow.sh ├── postman.ipynb ├── reset.sh ├── sandbox-config.yaml ├── secrets.yaml ├── start.sh ├── stop.sh ├── submit-flowify-workflow.sh └── userinfo.sh ├── storage ├── local.go ├── mongo.go ├── mongovolumestorage.go ├── parsequery.go ├── parsequery_test.go ├── references.go ├── references_test.go ├── storage.go ├── storage_test.go └── volumestorage_test.go ├── transpiler ├── argo.go ├── helpers.go ├── transpiler.go └── transpiler_test.go └── user └── user.go /.gitattributes: -------------------------------------------------------------------------------- 1 | *.pdf filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # https://medium.com/@radlinskii/writing-the-pre-commit-git-hook-for-go-files-810f8d5f1c6f 3 | 4 | # Handle missing files 5 | # https://gist.github.com/radlinskii/0ba6ec694b1e590d8457c98a358f335f 6 | STAGED_GO_FILES=$(git diff --cached --name-status --diff-filter d -- '*.go' | awk '{ print $2 }') 7 | 8 | for FILE in $STAGED_GO_FILES 9 | do 10 | go fmt $FILE 11 | git add $FILE 12 | done 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy 2 | on: 3 | workflow_dispatch: {} 4 | push: 5 | paths-ignore: 6 | - ".github/**" 7 | - "dev/**" 8 | tags: '*' 9 | branches: 10 | - main 11 | env: 12 | PROJECT: flowify 13 | 14 | jobs: 15 | build: 16 | name: Push image 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v2 21 | with: 22 | fetch-depth: 0 23 | - name: Login to the dev container registry 24 | run: | 25 | echo "${{ secrets.DOCKER_PASSWORD }}" \ 26 | | docker login --username "${{ secrets.DOCKER_USERNAME }}" \ 27 | --password-stdin \ 28 | auroradevacr.azurecr.io 29 | - name: Build dev image 30 | uses: docker/build-push-action@v2 31 | with: 32 | labels: | 33 | com.equinor.aurora.project=${{ env.PROJECT }} 34 | org.opencontainers.image.created=${{ env.DATE }} 35 | org.opencontainers.image.revision=${{ github.sha }} 36 | tags: auroradevacr.azurecr.io/flowify/flowify-workflows-server:${{ github.sha }} 37 | build-args: FLOWIFY_GIT_SHA=${{ github.sha }} 38 | push: true 39 | - name: Logout from the container registry 40 | run: | 41 | docker logout 42 | - name: Login to the prod container registry 43 | run: | 44 | echo "${{ secrets.DOCKER_PASSWORD }}" \ 45 | | docker login --username "${{ secrets.DOCKER_USERNAME }}" \ 46 | --password-stdin \ 47 | auroraprodacr.azurecr.io 48 | - name: Build prod image 49 | uses: docker/build-push-action@v2 50 | with: 51 | labels: | 52 | com.equinor.aurora.project=${{ env.PROJECT }} 53 | org.opencontainers.image.created=${{ env.DATE }} 54 | org.opencontainers.image.revision=${{ github.sha }} 55 | tags: auroraprodacr.azurecr.io/flowify/flowify-workflows-server:${{ github.sha }} 56 | build-args: FLOWIFY_GIT_SHA=${{ github.sha }} 57 | push: true 58 | - name: Logout from the container registry 59 | run: | 60 | docker logout 61 | deploy: 62 | name: Update deployment 63 | runs-on: ubuntu-latest 64 | needs: build 65 | env: 66 | EMAIL: ${{ github.event.head_commit.author.email }} 67 | NAME: ${{ github.event.head_commit.author.name }} 68 | steps: 69 | - name: Checkout infra 70 | uses: actions/checkout@v2 71 | with: 72 | ref: main 73 | repository: equinor/flowify-infrastructure 74 | ssh-key: ${{ secrets.FLOWIFY_INFRA_DEPLOY_KEY }} 75 | - name: Update infra 76 | run: | 77 | SHA_SHORT=$(echo ${{ github.sha }} | cut -c1-8) 78 | SHA_LONG=${{ github.sha }} 79 | git config --global user.email "${EMAIL}" 80 | git config --global user.name "GitHub Actions (${NAME})" 81 | sed -i "s/imageTag:.*/imageTag: $SHA_LONG/g" kube/server/values-dev.yaml 82 | git add kube/server/values-dev.yaml 83 | git commit --message "GHA: Update development imageTag" || true 84 | git push 85 | -------------------------------------------------------------------------------- /.github/workflows/dev_env.yaml: -------------------------------------------------------------------------------- 1 | name: build dev environment images 2 | on: 3 | workflow_dispatch: {} 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'dev/**' 9 | permissions: 10 | id-token: write 11 | contents: read 12 | packages: write 13 | jobs: 14 | build: 15 | runs-on: ubuntu-20.04 16 | 17 | steps: 18 | - name: Checkout branch 19 | uses: actions/checkout@v1 20 | 21 | - name: Set image tag and short sha 22 | run: | 23 | echo "SHORT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_ENV 24 | 25 | - name: GitHub Container Registry login 26 | uses: docker/login-action@v1 27 | with: 28 | registry: ghcr.io 29 | username: ${{ github.actor }} 30 | password: ${{ secrets.GITHUB_TOKEN }} 31 | 32 | - name: Initialize BuildX 33 | uses: docker/setup-buildx-action@v1 34 | 35 | - name: Build and push kind cluster image 36 | uses: docker/build-push-action@v2 37 | with: 38 | context: . 39 | file: ./dev/Dockerfile.cluster 40 | push: true 41 | tags: | 42 | ghcr.io/equinor/flowify-dev-cluster:${{ env.SHORT_SHA}} 43 | ghcr.io/equinor/flowify-dev-cluster:latest 44 | 45 | - name: Build and push dev env builder image 46 | uses: docker/build-push-action@v2 47 | with: 48 | context: . 49 | file: ./dev/Dockerfile.server_builder 50 | push: true 51 | tags: | 52 | ghcr.io/equinor/flowify-dev-builder:${{ env.SHORT_SHA}} 53 | ghcr.io/equinor/flowify-dev-builder:latest 54 | 55 | - name: Build and push mongo image 56 | uses: docker/build-push-action@v2 57 | with: 58 | context: . 59 | file: ./dev/Dockerfile.mongo 60 | push: true 61 | tags: | 62 | ghcr.io/equinor/flowify-mongo:${{ env.SHORT_SHA}} 63 | ghcr.io/equinor/flowify-mongo:latest 64 | -------------------------------------------------------------------------------- /.github/workflows/e2etest.yaml: -------------------------------------------------------------------------------- 1 | name: End-to-end test 2 | on: 3 | workflow_dispatch: {} 4 | push: 5 | paths-ignore: 6 | - ".github/**" 7 | - "dev/**" 8 | jobs: 9 | tests: 10 | name: Run end-to-end tests 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | - name: Run try to build e2e-container 16 | run: make docker_e2e_build flowify_git_sha=${{ github.sha }} 17 | - name: Run tests and generate report 18 | run: make docker_e2e_test flowify_git_sha=${{ github.sha }} 19 | - name: Archive code test results 20 | uses: actions/upload-artifact@v2 21 | if: always() 22 | with: 23 | name: report 24 | path: testoutputs/e2ereport.xml 25 | - name: Publish end-to-end test results 26 | uses: EnricoMi/publish-unit-test-result-action@v2 27 | if: always() 28 | with: 29 | files: testoutputs/e2ereport.xml 30 | check_name: End-to-end test results 31 | 32 | -------------------------------------------------------------------------------- /.github/workflows/prod.yaml: -------------------------------------------------------------------------------- 1 | name: Prod 2 | on: 3 | workflow_dispatch: {} 4 | 5 | jobs: 6 | deploy: 7 | name: Update deployment 8 | runs-on: ubuntu-latest 9 | env: 10 | EMAIL: ${{ github.event.head_commit.author.email }} 11 | NAME: ${{ github.event.head_commit.author.name }} 12 | steps: 13 | - name: Checkout infra 14 | uses: actions/checkout@v2 15 | with: 16 | ref: main 17 | repository: equinor/flowify-infrastructure 18 | ssh-key: ${{ secrets.FLOWIFY_INFRA_DEPLOY_KEY }} 19 | - name: Update infra 20 | run: | 21 | SHA_SHORT=$(echo ${{ github.sha }} | cut -c1-8) 22 | SHA_LONG=${{ github.sha }} 23 | git config --global user.email "${EMAIL}" 24 | git config --global user.name "GitHub Actions (${NAME})" 25 | sed -i "s/imageTag:.*/imageTag: $SHA_LONG/g" kube/server/values-prod.yaml 26 | git add kube/server/values-prod.yaml 27 | git commit --message "GHA: Update production imageTag" || true 28 | git push 29 | -------------------------------------------------------------------------------- /.github/workflows/public_image.yml: -------------------------------------------------------------------------------- 1 | name: Build public docker images to ghcr 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | permissions: 8 | id-token: write 9 | contents: read 10 | packages: write 11 | jobs: 12 | build: 13 | runs-on: ubuntu-20.04 14 | 15 | steps: 16 | - name: Checkout branch 17 | uses: actions/checkout@v1 18 | 19 | - name: GitHub Container Registry login 20 | uses: docker/login-action@v1 21 | with: 22 | registry: ghcr.io 23 | username: ${{ github.actor }} 24 | password: ${{ secrets.GITHUB_TOKEN }} 25 | 26 | - name: Initialize BuildX 27 | uses: docker/setup-buildx-action@v1 28 | 29 | - name: Build and push docker image (for k8s deploy) 30 | uses: docker/build-push-action@v2 31 | with: 32 | context: . 33 | push: true 34 | tags: | 35 | ghcr.io/equinor/flowify-workflows-server:${{ github.event.release.tag_name }} 36 | ghcr.io/equinor/flowify-workflows-server:latest 37 | 38 | - name: Build and push docker image (for local run) 39 | uses: docker/build-push-action@v2 40 | with: 41 | context: . 42 | file: ./dev/Dockerfile.server 43 | push: true 44 | tags: | 45 | ghcr.io/equinor/flowify-workflows-server-local:${{ github.event.release.tag_name }} 46 | ghcr.io/equinor/flowify-workflows-server-local:latest -------------------------------------------------------------------------------- /.github/workflows/public_image_dev.yml: -------------------------------------------------------------------------------- 1 | name: Build public dev docker images to ghcr 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | paths-ignore: 7 | - '.github/**' 8 | 9 | permissions: 10 | id-token: write 11 | contents: read 12 | packages: write 13 | jobs: 14 | build: 15 | runs-on: ubuntu-20.04 16 | 17 | steps: 18 | - name: Checkout branch 19 | uses: actions/checkout@v1 20 | 21 | - name: Set image tag and short sha 22 | run: | 23 | echo "DOCKER_IMG_VERSION=$(cat ./aim/version.txt)" >> $GITHUB_ENV 24 | echo "SHORT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_ENV 25 | 26 | - name: GitHub Container Registry login 27 | uses: docker/login-action@v1 28 | with: 29 | registry: ghcr.io 30 | username: ${{ github.actor }} 31 | password: ${{ secrets.GITHUB_TOKEN }} 32 | 33 | - name: Initialize BuildX 34 | uses: docker/setup-buildx-action@v1 35 | 36 | - name: Build and push docker image (for k8s deploy) 37 | uses: docker/build-push-action@v2 38 | with: 39 | context: . 40 | push: true 41 | tags: | 42 | ghcr.io/equinor/flowify-workflows-server:sha-${{ env.SHORT_SHA}} 43 | ghcr.io/equinor/flowify-workflows-server:dev 44 | 45 | - name: Build and push docker image (for local run) 46 | uses: docker/build-push-action@v2 47 | with: 48 | context: . 49 | file: ./dev/Dockerfile.server 50 | push: true 51 | tags: | 52 | ghcr.io/equinor/flowify-workflows-server-local:sha-${{ env.SHORT_SHA}} 53 | ghcr.io/equinor/flowify-workflows-server-local:dev -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Unit tests 2 | on: 3 | workflow_dispatch: {} 4 | push: 5 | paths-ignore: 6 | - ".github/**" 7 | - "dev/**" 8 | jobs: 9 | tests: 10 | name: Run all unit tests 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | - name: Run try to build container 16 | run: docker build --build-arg FLOWIFY_GIT_SHA=${{ github.sha }} . 17 | - name: Run tests and generate report 18 | run: make docker_unittest 19 | - name: Upload coverage 20 | uses: romeovs/lcov-reporter-action@v0.2.21 21 | if: always() 22 | with: 23 | github-token: ${{ secrets.GITHUB_TOKEN }} 24 | lcov-file: testoutputs/coverage.lcov 25 | - name: Archive coverage 26 | uses: actions/upload-artifact@v2 27 | if: always() 28 | with: 29 | name: coverage 30 | path: testoutputs/coverage.lcov 31 | - name: Archive code test results 32 | uses: actions/upload-artifact@v2 33 | if: always() 34 | with: 35 | name: report 36 | path: testoutputs/report.xml 37 | - name: Publish Unit Test Results 38 | uses: EnricoMi/publish-unit-test-result-action@v1 39 | if: always() 40 | with: 41 | files: testoutputs/report.xml 42 | 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | 3 | ### Go ### 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | ### Go Patch ### 21 | /vendor/ 22 | /Godeps/ 23 | build/ 24 | 25 | ### Project files from codegen 26 | *.pb.go 27 | *.pb.gw.go 28 | *.swagger.json 29 | 30 | 31 | ### Jupyter ### 32 | .ipynb_checkpoints -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Launch Package", 6 | "type": "go", 7 | "request": "launch", 8 | "mode": "debug", 9 | "program": "${fileDirname}" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.18-alpine as base 2 | LABEL description="Flowify build test environment" 3 | LABEL org.opencontainers.image.source = "https://github.com/equinor/flowify-workflows-server" 4 | 5 | RUN apk add git make binutils gcc musl-dev 6 | 7 | FROM base as builder 8 | RUN mkdir -p $GOPATH/src/github.com/equinor/ 9 | WORKDIR $GOPATH/src/github.com/equinor/flowify-workflows-server 10 | # We should tighten this up 11 | COPY . . 12 | 13 | ARG FLOWIFY_GIT_SHA 14 | RUN make strip=1 flowify_git_sha=${FLOWIFY_GIT_SHA} 15 | 16 | FROM builder as tester 17 | RUN go install github.com/jstemmer/go-junit-report@v0.9.1 18 | RUN go install github.com/jandelgado/gcov2lcov@v1.0.5 19 | #RUN apk add nodejs 20 | 21 | COPY --from=builder /go/src/github.com/equinor/flowify-workflows-server/build ./ 22 | CMD ["./flowify-workflows-server"] 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | # Make sure we inject a sha into the binary, if available 4 | ifndef flowify_git_sha 5 | flowify_git_sha=$(shell git rev-parse --short HEAD) 6 | $(info Set flowify_git_sha=$(flowify_git_sha) from git rev-parse /) 7 | else 8 | $(info Set flowify_git_sha=$(flowify_git_sha) from arg /) 9 | endif 10 | 11 | SRCS := $(shell find . -name "*.go" -not -path "./vendor/*" -not -path "./test/*" ! -name '*_test.go' -not -path "./mock/*") 12 | 13 | ifdef strip 14 | STRIP=strip 15 | else 16 | STRIP=true 17 | endif 18 | 19 | all: server 20 | 21 | server: build/flowify-workflows-server 22 | 23 | build/flowify-workflows-server: $(SRCS) 24 | CGO_ENABLED=0 go build -v -o $@ -ldflags "-X 'github.com/equinor/flowify-workflows-server/apiserver.CommitSHA=$(flowify_git_sha)' -X 'github.com/equinor/flowify-workflows-server/apiserver.BuildTime=$(shell date -Is)'" 25 | $(STRIP) $@ 26 | 27 | init: 28 | git config core.hooksPath .githooks 29 | 30 | clean: 31 | @go clean 32 | @rm -rf build 33 | @rm -rf docs/*.json 34 | @rm -rf docs/*.yaml 35 | 36 | TEST_OUTPUT_DIR = ./testoutputs 37 | 38 | # exclude slow e2e tests depending on running server infrastructure 39 | # define the UNITTEST_COVERAGE variable to output coverage 40 | unittest: 41 | ifdef UNITTEST_COVERAGE 42 | mkdir -p $(TEST_OUTPUT_DIR) 43 | rm -f pipe1 44 | mkfifo pipe1 45 | (tee $(TEST_OUTPUT_DIR)/unittest.log | go-junit-report > $(TEST_OUTPUT_DIR)/report.xml) < pipe1 & 46 | go test $(UNITTEST_FLAGS) `go list ./... | grep -v e2etest` -covermode=count -coverprofile=coverage.out -ldflags "-X 'github.com/equinor/flowify-workflows-server/apiserver.CommitSHA=$(flowify_git_sha)' -X 'github.com/equinor/flowify-workflows-server/apiserver.BuildTime=$(shell date -Is)'" 2>&1 -v > pipe1 47 | gcov2lcov -infile=coverage.out -outfile=$(TEST_OUTPUT_DIR)/coverage.lcov 48 | else 49 | go test $(UNITTEST_FLAGS) `go list ./... | grep -v e2etest` 50 | endif 51 | 52 | e2etest: server 53 | $(MAKE) -C e2etest all flowify_git_sha=$(flowify_git_sha) 54 | 55 | test: unittest e2etest 56 | 57 | # the docker tests run the unittests and e2etest in a dockerized environment 58 | 59 | docker_unittest: 60 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f docker-compose-tests.yaml build 61 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f docker-compose-tests.yaml up --exit-code-from app 62 | 63 | 64 | docker_e2e_build: 65 | # build base services 66 | docker-compose -f dev/docker-compose.yaml build 67 | # build composed testrunner image 68 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f dev/docker-compose.yaml -f dev/docker-compose-e2e.yaml build flowify-e2e-runner 69 | 70 | 71 | docker_e2e_test: docker_e2e_build 72 | # explicit 'up' means we stop (but don't remove) containers afterwards 73 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f dev/docker-compose.yaml -f dev/docker-compose-e2e.yaml up --timeout 5 --exit-code-from flowify-e2e-runner cluster mongo flowify-e2e-runner 74 | 75 | docker_e2e_test_run: docker_e2e_build 76 | # explicit 'run' means we dont stop other containers afterwards 77 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f dev/docker-compose.yaml -f dev/docker-compose-e2e.yaml run --rm flowify-e2e-runner 78 | 79 | 80 | .PHONY: all server init clean test docker_unittest e2etest 81 | -------------------------------------------------------------------------------- /apiserver/apiserver_test.go: -------------------------------------------------------------------------------- 1 | package apiserver 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "testing" 9 | 10 | "github.com/equinor/flowify-workflows-server/auth" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | "k8s.io/client-go/kubernetes/fake" 14 | ) 15 | 16 | const ( 17 | test_server_port = 1234 18 | mongo_test_host = "localhost" 19 | mongo_test_port = 27017 20 | test_db_name = "test" 21 | test_namespace = "testing-namespace" 22 | n_items = 5 23 | ext_mongo_hostname_env = "FLOWIFY_MONGO_ADDRESS" 24 | ext_mongo_port_env = "FLOWIFY_MONGO_PORT" 25 | ) 26 | 27 | type testCase struct { 28 | Name string 29 | URL string 30 | StatusCode int 31 | Body string 32 | } 33 | 34 | func Test_ApiServer(t *testing.T) { 35 | server, err := NewFlowifyServer( 36 | fake.NewSimpleClientset(), 37 | "not-used", /* config namespace for k8s */ 38 | nil, /* wfclient cs_workflow.Interface */ 39 | nil, /* storage */ 40 | nil, /* volumeStorage */ 41 | 1234, 42 | auth.AzureTokenAuthenticator{}, 43 | ) 44 | require.NoError(t, err) 45 | 46 | /* 47 | spin up a apiserver server with some functionality not connected 48 | */ 49 | 50 | ready := make(chan bool, 1) 51 | go server.Run(context.TODO(), &ready) 52 | 53 | require.True(t, <-ready, "make sure the server started before we continue") 54 | 55 | testcases := []testCase{ 56 | {Name: "z-page/live", URL: "livez", StatusCode: http.StatusOK, Body: "alive"}, 57 | {Name: "z-page/ready", URL: "readyz", StatusCode: http.StatusOK, Body: "ready"}, 58 | {Name: "z-page/version", URL: "versionz", StatusCode: http.StatusOK, Body: CommitSHA}, 59 | } 60 | 61 | for _, test := range testcases { 62 | t.Run(test.Name, func(t *testing.T) { 63 | endpoint := fmt.Sprintf("http://localhost:%d/%s", test_server_port, test.URL) 64 | resp, err := http.Get(endpoint) 65 | require.NoError(t, err) 66 | require.NotNil(t, resp) 67 | 68 | assert.Equal(t, test.StatusCode, resp.StatusCode) 69 | payload, err := io.ReadAll(resp.Body) 70 | assert.NoError(t, err) 71 | assert.Equal(t, test.Body, string(payload)) 72 | }) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /apiserver/config.go: -------------------------------------------------------------------------------- 1 | package apiserver 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "reflect" 8 | "strconv" 9 | "strings" 10 | 11 | "github.com/equinor/flowify-workflows-server/auth" 12 | "github.com/equinor/flowify-workflows-server/storage" 13 | "github.com/mitchellh/mapstructure" 14 | "github.com/pkg/errors" 15 | log "github.com/sirupsen/logrus" 16 | "github.com/spf13/viper" 17 | "gopkg.in/yaml.v3" 18 | ) 19 | 20 | type KubernetesKonfig struct { 21 | KubeConfigPath string `mapstructure:"kubeconfigpath"` 22 | Namespace string `mapstructure:"namespace"` 23 | } 24 | 25 | type LogConfig struct { 26 | LogLevel string `mapstructure:"loglevel"` 27 | } 28 | 29 | type ServerConfig struct { 30 | Port int `mapstructure:"port"` 31 | } 32 | 33 | type Config struct { 34 | DbConfig storage.DbConfig `mapstructure:"db"` 35 | KubernetesKonfig KubernetesKonfig `mapstructure:"kubernetes"` 36 | AuthConfig auth.AuthConfig `mapstructure:"auth"` 37 | 38 | LogConfig LogConfig `mapstructure:"logging"` 39 | ServerConfig ServerConfig `mapstructure:"server"` 40 | } 41 | 42 | func (cfg Config) String() string { 43 | bytes, err := yaml.Marshal(cfg) 44 | if err != nil { 45 | log.Error("Could not stringify config", err) 46 | return "" 47 | } 48 | return string(bytes) 49 | } 50 | 51 | func (cfg Config) Dump(path string) error { 52 | str := cfg.String() 53 | switch path { 54 | case "-": 55 | // stdout 56 | fmt.Println(str) 57 | default: 58 | err := os.WriteFile(path, []byte(str), 0666) 59 | if err != nil { 60 | log.Error("Could write config to file ", path) 61 | return err 62 | } 63 | } 64 | return nil 65 | } 66 | 67 | func viperConfig() { 68 | viper.SetConfigType("yaml") 69 | viper.AutomaticEnv() // let env override config if available 70 | 71 | // to allow environment parse nested config 72 | viper.SetEnvKeyReplacer(strings.NewReplacer(`.`, `_`)) 73 | 74 | // prefix all envs for uniqueness 75 | viper.SetEnvPrefix("FLOWIFY") 76 | } 77 | 78 | func viperDecodeHook() viper.DecoderConfigOption { 79 | return viper.DecodeHook( 80 | mapstructure.ComposeDecodeHookFunc( 81 | // Try to silent convert string to int 82 | // Port env var can be set as the string, not as required int 83 | func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { 84 | if f.Kind() != reflect.String { 85 | return data, nil 86 | } 87 | if t.Kind() != reflect.Interface { 88 | return data, nil 89 | } 90 | v, err := strconv.Atoi(data.(string)) 91 | //fmt.Printf("Converting (%v, %v) %v => %d. (%v)\n", f, t, data, v, err) 92 | if err != nil { 93 | return data, nil 94 | } 95 | return v, nil 96 | }, 97 | ), 98 | ) 99 | } 100 | 101 | func LoadConfigFromReader(stream io.Reader) (Config, error) { 102 | viperConfig() 103 | config := Config{} 104 | if err := viper.ReadConfig(stream); err != nil { 105 | return Config{}, errors.Wrap(err, "Cannot load config from reader") 106 | } 107 | 108 | err := viper.Unmarshal(&config, viperDecodeHook()) 109 | if err != nil { 110 | return Config{}, errors.Wrap(err, "Cannot load config from reader") 111 | } 112 | 113 | return config, nil 114 | 115 | } 116 | 117 | func LoadConfigFromPath(path string) (Config, error) { 118 | viper.AddConfigPath(path) 119 | viperConfig() 120 | 121 | err := viper.ReadInConfig() 122 | if err != nil { 123 | return Config{}, errors.Wrap(err, "Cannot not read config from path") 124 | } 125 | 126 | config := Config{} 127 | err = viper.Unmarshal(&config, viperDecodeHook()) 128 | if err != nil { 129 | return Config{}, errors.Wrap(err, "Cannot not read config from path") 130 | } 131 | return config, nil 132 | } 133 | -------------------------------------------------------------------------------- /auth/auth.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/equinor/flowify-workflows-server/pkg/workspace" 8 | "github.com/equinor/flowify-workflows-server/user" 9 | "github.com/pkg/errors" 10 | ) 11 | 12 | // an authclient either gives an error or an authenticated user 13 | type AuthenticationClient interface { 14 | Authenticate(r *http.Request) (user.User, error) 15 | } 16 | 17 | // the mock authenticator can be used for testing 18 | type MockAuthenticator struct { 19 | User user.MockUser 20 | } 21 | 22 | func (m MockAuthenticator) Authenticate(r *http.Request) (user.User, error) { 23 | return m.User, nil 24 | } 25 | 26 | type ContextKey = int 27 | 28 | const ( 29 | AuthorizationKey ContextKey = iota 30 | ) 31 | 32 | type Authorization struct { 33 | Action string 34 | Authorized bool 35 | } 36 | 37 | func GetAuthorization(ctx context.Context) *Authorization { 38 | val := ctx.Value(AuthorizationKey) 39 | 40 | if val == nil { 41 | return nil 42 | } else { 43 | return val.(*Authorization) 44 | } 45 | } 46 | 47 | type AuthorizationClient interface { 48 | Authorize(subject Subject, action Action, user user.User, object any) (bool, error) 49 | // AuthorizePath(user user.User, ) 50 | } 51 | 52 | type RoleAuthorizer struct { 53 | // map subject -> action -> required permssion 54 | Workspaces workspace.WorkspaceClient 55 | } 56 | 57 | type Action string 58 | 59 | const ( 60 | Read Action = "read" 61 | Write Action = "write" 62 | Delete Action = "delete" 63 | List Action = "list" 64 | ) 65 | 66 | type Subject string 67 | 68 | const ( 69 | Secrets Subject = "secrets" 70 | Volumes Subject = "volumes" 71 | ) 72 | 73 | type AccessLevel struct { 74 | User bool 75 | Admin bool 76 | } 77 | 78 | func (ra RoleAuthorizer) GetWorkspacePermissions(wsp string, usr user.User) (AccessLevel, error) { 79 | wss := ra.Workspaces.ListWorkspaces() 80 | 81 | for _, ws := range wss { 82 | var al AccessLevel 83 | if ws.Name == wsp { 84 | al.User = ws.UserHasAccess(usr) 85 | al.Admin = ws.UserHasAdminAccess(usr) 86 | return al, nil 87 | } 88 | } 89 | 90 | return AccessLevel{}, nil 91 | } 92 | 93 | func (ra RoleAuthorizer) GetSecretPermissions(usr user.User, data any) (map[Action]bool, error) { 94 | p := make(map[Action]bool) 95 | 96 | workspace, ok := data.(string) 97 | if !ok { 98 | return map[Action]bool{}, errors.Errorf("could not decode the workspace variable") 99 | } 100 | 101 | al, err := ra.GetWorkspacePermissions(workspace, usr) 102 | if err != nil { 103 | return map[Action]bool{}, errors.Wrap(err, "could not get secret permissions") 104 | } 105 | 106 | // this is where access levels map to actions. 107 | p[Read] = al.User || al.Admin 108 | p[List] = al.User || al.Admin 109 | p[Write] = al.Admin 110 | p[Delete] = al.Admin 111 | 112 | return p, nil 113 | } 114 | 115 | func (ra RoleAuthorizer) GetVolumePermissions(usr user.User, data any) (map[Action]bool, error) { 116 | p := make(map[Action]bool) 117 | 118 | workspace, ok := data.(string) 119 | if !ok { 120 | return map[Action]bool{}, errors.Errorf("could not decode the workspace variable") 121 | } 122 | 123 | al, err := ra.GetWorkspacePermissions(workspace, usr) 124 | if err != nil { 125 | return map[Action]bool{}, errors.Wrap(err, "could not get secret permissions") 126 | } 127 | 128 | // this is where access levels map to actions. 129 | p[Read] = al.User || al.Admin 130 | p[List] = al.Admin || al.User 131 | p[Write] = al.Admin 132 | p[Delete] = al.Admin 133 | 134 | return p, nil 135 | } 136 | 137 | func (ra RoleAuthorizer) GetPermissions(subject Subject, action Action, usr user.User, data any) (bool, error) { 138 | switch subject { 139 | case Secrets: 140 | perms, err := ra.GetSecretPermissions(usr, data) 141 | if err != nil { 142 | return false, err 143 | } 144 | if p, ok := perms[action]; ok { 145 | return p, nil 146 | } 147 | return false, errors.Errorf("Rule %s:%s not found", subject, action) 148 | case Volumes: 149 | perms, err := ra.GetVolumePermissions(usr, data) 150 | if err != nil { 151 | return false, err 152 | } 153 | if p, ok := perms[action]; ok { 154 | return p, nil 155 | } 156 | return false, errors.Errorf("Rule %s:%s not found", subject, action) 157 | default: 158 | return false, errors.Errorf("no such subject '%s'", subject) 159 | } 160 | } 161 | 162 | func (ra RoleAuthorizer) Authorize(subject Subject, action Action, user user.User, object any) (bool, error) { 163 | p, err := ra.GetPermissions(subject, action, user, object) 164 | if err != nil { 165 | return false, errors.Wrapf(err, "could not authorize request for %s:%s", subject, action) 166 | } 167 | 168 | return p, nil 169 | } 170 | -------------------------------------------------------------------------------- /auth/azure_token.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "crypto/subtle" 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | "time" 9 | 10 | "github.com/equinor/flowify-workflows-server/user" 11 | "github.com/pkg/errors" 12 | "github.com/sirupsen/logrus" 13 | 14 | "github.com/golang-jwt/jwt/v4" 15 | ) 16 | 17 | // implements user.User 18 | type AzureTokenUser struct { 19 | Name string `json:"name"` 20 | Email string `json:"email"` 21 | Oid string `json:"oid"` 22 | Roles []user.Role `json:"roles"` 23 | jwt.RegisteredClaims 24 | 25 | expectedAudience string 26 | expectedIssuer string 27 | } 28 | 29 | func NewAzureTokenUser(audience string, issuer string) AzureTokenUser { 30 | // empty user 31 | user := AzureTokenUser{} 32 | 33 | // for validation 34 | user.expectedAudience = audience 35 | user.expectedIssuer = issuer 36 | 37 | return user 38 | } 39 | 40 | // The time to use when validating token life-time, 41 | // defaults to time.Now which is UTC, https://tools.ietf.org/html/rfc7519#section-4.1.4 42 | // can be temporarily overridden when testing 43 | var TimeFunc = time.Now 44 | 45 | // the same as the jwt KeyFunc 46 | type AzureKeyFunc = func(claim *jwt.Token) (interface{}, error) 47 | 48 | type AzureTokenAuthenticatorOptions struct { 49 | // Disable verification of the signature of the tokens, (claims are still validated) 50 | DisableVerification bool 51 | } 52 | 53 | type AzureTokenAuthenticator struct { 54 | KeyFunc AzureKeyFunc 55 | // the intended audience to be verified with the token `aud` claim 56 | Audience string 57 | // the issuer id to be verified with the token `iss` claim 58 | Issuer string 59 | 60 | // Use only in safe environments 61 | Options AzureTokenAuthenticatorOptions 62 | } 63 | 64 | func NewAzureTokenAuthenticator(KeyFunc AzureKeyFunc, 65 | Audience string, 66 | Issuer string, 67 | Options AzureTokenAuthenticatorOptions) AuthenticationClient { 68 | 69 | return AzureTokenAuthenticator{KeyFunc: KeyFunc, 70 | Audience: Audience, Issuer: Issuer, 71 | Options: Options} 72 | } 73 | 74 | func (a AzureTokenAuthenticator) Authenticate(r *http.Request) (user.User, error) { 75 | authStr := r.Header.Get("Authorization") 76 | 77 | // Permission injection is required 78 | if authStr == "" { 79 | return AzureTokenUser{}, fmt.Errorf("no Authorization header given") 80 | } 81 | 82 | parts := strings.SplitN(authStr, " ", 2) 83 | 84 | if len(parts) < 2 || !strings.EqualFold(parts[0], "bearer") { 85 | return AzureTokenUser{}, fmt.Errorf("bad Authorization header") 86 | } 87 | 88 | user := NewAzureTokenUser(a.Audience, a.Issuer) 89 | err := user.Parse(parts[1], a.KeyFunc, a.Options.DisableVerification) 90 | if err != nil { 91 | return AzureTokenUser{}, errors.Wrap(err, "authentication error") 92 | } 93 | return user, nil 94 | } 95 | 96 | func (t AzureTokenUser) GetUid() string { return t.Oid } 97 | func (t AzureTokenUser) GetName() string { return t.Name } 98 | func (t AzureTokenUser) GetEmail() string { return t.Email } 99 | func (t AzureTokenUser) GetRoles() []user.Role { return t.Roles } 100 | func (t *AzureTokenUser) Parse(tokenString string, keyFunc AzureKeyFunc, disableVerification bool) error { 101 | if disableVerification { 102 | logrus.Warn("jwt token verification is DISABLED") 103 | if _, _, err := jwt.NewParser().ParseUnverified(tokenString, t); err != nil { 104 | return err 105 | } 106 | 107 | // parse unverified doesn't call validation, do it explicitly 108 | return t.Valid() 109 | } 110 | 111 | _, err := jwt.ParseWithClaims(tokenString, t, keyFunc) 112 | return err 113 | } 114 | 115 | // called from the jwt-parser code to ensure the token is valid wrt 116 | // also called explicitly from the no-verification path of Parse 117 | func (t AzureTokenUser) Valid() error { 118 | now := TimeFunc() 119 | 120 | requireSet := true 121 | // The claims below are optional, by default, but we force them tested 122 | 123 | if !t.VerifyExpiresAt(now, requireSet) { 124 | if t.ExpiresAt != nil { 125 | logrus.Warnf("token expired: 'now' > 'exp', %s < %s", now.UTC().Format(time.RFC3339), t.ExpiresAt.UTC().Format(time.RFC3339)) 126 | } else { 127 | logrus.Warn("token missing 'exp' claim") 128 | } 129 | return fmt.Errorf("token expired") 130 | } 131 | 132 | if !t.VerifyIssuedAt(now, requireSet) { 133 | if t.IssuedAt != nil { 134 | logrus.Warnf("token used before issued: 'now' < 'iat', %s < %s", now.UTC().Format(time.RFC3339), t.IssuedAt.UTC().Format(time.RFC3339)) 135 | } else { 136 | logrus.Warn("token missing 'iat' claim") 137 | } 138 | 139 | return fmt.Errorf("token not valid") 140 | } 141 | 142 | if !t.VerifyNotBefore(now, requireSet) { 143 | if t.NotBefore != nil { 144 | logrus.Warnf("token used before valid: 'now' < 'nbf' %s < %s", now.UTC().Format(time.RFC3339), t.NotBefore.UTC().Format(time.RFC3339)) 145 | } else { 146 | logrus.Warn("token missing 'nbf' claim") 147 | } 148 | return fmt.Errorf("token not yet valid") 149 | } 150 | 151 | if !t.VerifyAudience(t.expectedAudience, requireSet) { 152 | if t.Audience != nil { 153 | logrus.Warnf("token bad aud claim (%s), expected %s", t.Audience, t.expectedAudience) 154 | } else { 155 | logrus.Warn("token missing 'aud' claim") 156 | } 157 | 158 | return fmt.Errorf("invalid token `aud`") 159 | } 160 | 161 | // dont mistake comparison semantics, 1 is *match* 162 | if subtle.ConstantTimeCompare([]byte(t.Issuer), []byte(t.expectedIssuer)) != 1 { 163 | logrus.Warnf("token bad iss claim (%s), expected: %s", t.Issuer, t.expectedIssuer) 164 | return fmt.Errorf("invalid token `iss`") 165 | } 166 | 167 | return nil 168 | } 169 | 170 | /* 171 | func readAll(url string) ([]byte, error) { 172 | r, err := http.Get(url) 173 | if err != nil { 174 | return []byte{}, err 175 | } 176 | if r.StatusCode != http.StatusOK { 177 | return []byte{}, fmt.Errorf("could not get azure validation info") 178 | } 179 | 180 | buf := new(bytes.Buffer) 181 | if err := func() error { // scope for defer and err 182 | _, err := buf.ReadFrom(r.Body) 183 | defer r.Body.Close() 184 | return err 185 | }(); err != nil { 186 | return []byte{}, err 187 | } 188 | return buf.Bytes(), nil 189 | } 190 | */ 191 | -------------------------------------------------------------------------------- /auth/config.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/MicahParks/keyfunc" 8 | "github.com/equinor/flowify-workflows-server/user" 9 | "github.com/mitchellh/mapstructure" 10 | "github.com/pkg/errors" 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | type AuthConfig struct { 15 | Handler string `mapstructure:"handler"` 16 | // the config is polymorphic based on the handler string 17 | Config map[string]interface{} `mapstructure:"config"` 18 | } 19 | 20 | type AzureConfig struct { 21 | Issuer string 22 | Audience string 23 | KeysUrl string 24 | } 25 | 26 | func NewAuthClientFromConfig(config AuthConfig) (AuthenticationClient, error) { 27 | 28 | switch config.Handler { 29 | case "azure-oauth2-openid-token": 30 | { 31 | var azData AzureConfig 32 | err := mapstructure.Decode(config.Config, &azData) 33 | if err != nil { 34 | return nil, errors.Wrapf(err, "could not decode AuthConfig: %v", config.Config) 35 | } 36 | 37 | opts := AzureTokenAuthenticatorOptions{} 38 | var jwks AzureKeyFunc 39 | if azData.KeysUrl == "DISABLE_JWT_SIGNATURE_VERIFICATION" { 40 | log.Warn("running the authenticator without signature verification is UNSAFE") 41 | opts.DisableVerification = true 42 | } else { 43 | // Create the JWKS from the resource at the given URL. 44 | JWKS, err := keyfunc.Get(azData.KeysUrl, keyfunc.Options{ 45 | // best practices for azure key roll-over: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-signing-key-rollover 46 | RefreshInterval: time.Hour * 24, 47 | RefreshRateLimit: time.Minute * 5, 48 | // when encountering a "new" key id, allow immediate refresh (rate limited) 49 | RefreshUnknownKID: true, 50 | // make sure errors make it into the log 51 | RefreshErrorHandler: func(err error) { log.Error("jwks refresh error:", err) }, 52 | }) 53 | if err != nil { 54 | return nil, errors.Wrap(err, "failed to get the JWKS") 55 | } 56 | jwks = JWKS.Keyfunc 57 | } 58 | return AzureTokenAuthenticator{Issuer: azData.Issuer, Audience: azData.Audience, KeyFunc: jwks, Options: opts}, nil 59 | } 60 | 61 | case "disabled-auth": 62 | { 63 | var muser user.MockUser 64 | err := mapstructure.Decode(config.Config, &muser) 65 | if err != nil { 66 | return nil, errors.Wrapf(err, "could not decode AuthConfig: %v", config.Config) 67 | } 68 | log.Warn("flowify using no authentication and static dummy-authorization: User = ", muser) 69 | 70 | return MockAuthenticator{ 71 | User: muser, 72 | }, nil 73 | } 74 | default: 75 | { 76 | return nil, fmt.Errorf("auth handler (%s) not supported", config.Handler) 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /cmd/dereference/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "encoding/json" 8 | "flag" 9 | "fmt" 10 | "io" 11 | "os" 12 | "strings" 13 | 14 | "github.com/equinor/flowify-workflows-server/models" 15 | "github.com/equinor/flowify-workflows-server/storage" 16 | "github.com/google/uuid" 17 | log "github.com/sirupsen/logrus" 18 | "github.com/spf13/viper" 19 | ) 20 | 21 | func myUsage() { 22 | fmt.Printf("Usage: %s [OPTIONS] cmpRef\n", os.Args[0]) 23 | flag.PrintDefaults() 24 | } 25 | 26 | // read a reference or an inline component 27 | func parseInput(doc []byte) (interface{}, error) { 28 | { 29 | // try a plain reference 30 | cref, err := uuid.ParseBytes(bytes.TrimSpace(doc)) 31 | if err == nil { 32 | return models.ComponentReference(cref), nil 33 | } 34 | log.Info("Not a plain uuid") 35 | } 36 | 37 | { 38 | // try component 39 | var cmp models.Component 40 | err := json.Unmarshal(doc, &cmp) 41 | if err == nil { 42 | return cmp, nil 43 | } 44 | log.Info("Not a component") 45 | } 46 | 47 | return models.ComponentReference{}, fmt.Errorf("could not parse '%s'", doc) 48 | } 49 | 50 | func LoadDbConfig(path string) (config storage.DbConfig, err error) { 51 | viper.AddConfigPath(path) 52 | viper.SetConfigName("config") 53 | viper.SetConfigType("yaml") 54 | viper.AutomaticEnv() // let env override config if available 55 | 56 | // to allow environment parse nested config 57 | viper.SetEnvKeyReplacer(strings.NewReplacer(`.`, `_`)) 58 | 59 | // prefix all envs for uniqueness 60 | viper.SetEnvPrefix("FLOWIFY") 61 | 62 | err = viper.ReadInConfig() 63 | if err != nil { 64 | return 65 | } 66 | 67 | err = viper.Unmarshal(&config) 68 | return 69 | } 70 | 71 | func isFlagPassed(name string) bool { 72 | found := false 73 | flag.Visit(func(f *flag.Flag) { 74 | if f.Name == name { 75 | found = true 76 | } 77 | }) 78 | return found 79 | } 80 | 81 | func main() { 82 | log.SetLevel(log.InfoLevel) 83 | 84 | // read config, possible overloaded by ENV VARS 85 | cfg, err := LoadDbConfig(".") 86 | 87 | fileName := flag.String("file", "", "Read from file instead of cmd line arg, '-' for stdin") 88 | dbName := flag.String("db", "Flowify", "Set the name of the database to use") 89 | if isFlagPassed("db") { 90 | cfg.DbName = *dbName 91 | } 92 | 93 | flag.Parse() 94 | flag.Usage = myUsage 95 | 96 | // 1. read from arg (typically uid) 97 | // 2. read from file (if selected), - means stdin 98 | if (flag.NArg() == 1) == (*fileName != "") { 99 | flag.Usage() 100 | return 101 | } 102 | 103 | var bytes []byte 104 | 105 | if flag.NArg() == 1 { 106 | // 1. read from arg 107 | bytes = []byte(flag.Arg(0)) 108 | } else if *fileName != "" { 109 | // 2. read from file 110 | 111 | var err error // nil error 112 | if *fileName == "-" { 113 | bytes, err = io.ReadAll(bufio.NewReader(os.Stdin)) 114 | } else { 115 | bytes, err = os.ReadFile(*fileName) 116 | } 117 | if err != nil { 118 | panic(err) 119 | } 120 | } else { 121 | panic("unexpected") 122 | } 123 | 124 | any, err := parseInput(bytes) 125 | if err != nil { 126 | panic(err) 127 | } 128 | 129 | var component models.Component 130 | cstorage, err := storage.NewMongoStorageClientFromConfig(cfg, nil) 131 | if err != nil { 132 | panic(err) 133 | } 134 | 135 | switch concrete := any.(type) { 136 | case models.ComponentReference: 137 | // retrieve 138 | c, err := cstorage.GetComponent(context.TODO(), concrete) 139 | if err != nil { 140 | fmt.Println("oops!") 141 | panic(err) 142 | } 143 | component = c 144 | case models.Component: 145 | component = concrete 146 | default: 147 | panic("unexpected") 148 | } 149 | 150 | cmpResolved, err := storage.DereferenceComponent(context.TODO(), cstorage, component) 151 | if err != nil { 152 | panic(err) 153 | } 154 | 155 | outBytes, _ := json.MarshalIndent(cmpResolved, "", " ") 156 | fmt.Print(string(outBytes), "\n") 157 | } 158 | -------------------------------------------------------------------------------- /cmd/transpile/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "fmt" 7 | "os" 8 | 9 | "sigs.k8s.io/yaml" 10 | 11 | // "github.com/google/uuid" 12 | 13 | "github.com/equinor/flowify-workflows-server/models" 14 | "github.com/equinor/flowify-workflows-server/transpiler" 15 | log "github.com/sirupsen/logrus" 16 | ) 17 | 18 | func myUsage() { 19 | fmt.Printf("Usage: %s [OPTIONS] filename\n", os.Args[0]) 20 | flag.PrintDefaults() 21 | } 22 | 23 | func main() { 24 | log.SetLevel(log.InfoLevel) 25 | 26 | schemaFilePtr := flag.String("schema", "", "a file path") 27 | flag.Parse() 28 | flag.Usage = myUsage 29 | if flag.NArg() > 1 { 30 | flag.Usage() 31 | return 32 | } 33 | 34 | rawbytes, err := os.ReadFile(flag.Arg(0)) 35 | if err != nil { 36 | log.Fatal(err.Error()) 37 | return 38 | } 39 | 40 | schema := models.FindSchema(*schemaFilePtr) 41 | log.Infof("schema from: %s", *schemaFilePtr) 42 | 43 | if schema != nil { 44 | log.Infof("schema: %s", *schemaFilePtr) 45 | var v interface{} 46 | if err := json.Unmarshal(rawbytes, &v); err != nil { 47 | log.Fatalf(err.Error()) 48 | } 49 | 50 | err := schema.Validate(v) 51 | if err != nil { 52 | b := fmt.Sprintf("%#v\n", err) 53 | log.Fatal(string(b)) 54 | } 55 | log.Info("schema validates") 56 | 57 | } 58 | var job models.Job 59 | var workflow models.Workflow 60 | var component models.Component 61 | 62 | err = json.Unmarshal(rawbytes, &job) 63 | if err == nil && job.Type == models.ComponentType("job") { 64 | log.Info("Job in the input file.") 65 | } else { 66 | err = json.Unmarshal(rawbytes, &workflow) 67 | if err == nil && workflow.Type == "workflow" { 68 | log.Info("Workflow in the input file.") 69 | job = models.Job{Metadata: models.Metadata{Description: "Empty job from workflow"}, Type: "job", InputValues: nil, Workflow: workflow} 70 | } else { 71 | err = json.Unmarshal(rawbytes, &component) 72 | if err == nil { 73 | log.Info("Component in the input file.") 74 | workflow = models.Workflow{Metadata: models.Metadata{}, Component: component, Workspace: ""} 75 | job = models.Job{Metadata: models.Metadata{Description: "Empty job from component"}, Type: "job", InputValues: nil, Workflow: workflow} 76 | } else { 77 | log.Fatal("Can't convert file content to Job/Workflow/Component object.") 78 | } 79 | } 80 | } 81 | 82 | ajob, err := transpiler.GetArgoWorkflow(job) 83 | if err != nil { 84 | log.Fatal(err.Error()) 85 | } 86 | 87 | outBytes, _ := yaml.Marshal(ajob) 88 | fmt.Print(string(outBytes)) 89 | } 90 | -------------------------------------------------------------------------------- /cmd/validate/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "fmt" 7 | "os" 8 | 9 | "github.com/equinor/flowify-workflows-server/models" 10 | log "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func myUsage() { 14 | fmt.Printf("Usage: %s [OPTIONS] filename\n", os.Args[0]) 15 | flag.PrintDefaults() 16 | } 17 | 18 | func main() { 19 | log.SetLevel(log.InfoLevel) 20 | 21 | schemaFilePtr := flag.String("schema", "", "a file path") 22 | flag.Parse() 23 | flag.Usage = myUsage 24 | if flag.NArg() > 1 { 25 | flag.Usage() 26 | return 27 | } 28 | 29 | rawbytes, err := os.ReadFile(flag.Arg(0)) 30 | if err != nil { 31 | log.Fatal(err.Error()) 32 | return 33 | } 34 | log.SetLevel(log.DebugLevel) 35 | schema := models.FindSchema(*schemaFilePtr) 36 | if schema != nil { 37 | var v interface{} 38 | if err := json.Unmarshal(rawbytes, &v); err != nil { 39 | log.Fatal("Cannot unmarshal JSON: ", err.Error()) 40 | } 41 | 42 | err := schema.Validate(v) 43 | if err != nil { 44 | log.Fatalf("Validation errorv: %#v", err) 45 | } 46 | log.Info("schema validates") 47 | } else { 48 | log.Info("schema not validated") 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | db: 2 | # select which db to use 3 | select: mongo 4 | # the flowify document database 5 | dbname: test 6 | # mongo: 7 | config: 8 | # Mongo fields 9 | # (FLOWIFY_)DB_CONFIG_ADDRESS=... 10 | # url to database 11 | address: localhost 12 | # port where mongo is listening 13 | port: 27017 14 | 15 | # Cosmos fields 16 | # export (FLOWIFY_)DB_CONFIG_CREDENTIALS=... 17 | credentials: SET_FROM_ENV 18 | 19 | kubernetes: 20 | # how to locate the kubernetes server 21 | kubeconfigpath: SET_FROM_ENV 22 | # the namespace containing the flowify configuration and setup 23 | namespace: flowify 24 | 25 | auth: 26 | handler: azure-oauth2-openid-token 27 | config: 28 | issuer: sandbox 29 | audience: flowify 30 | # keysurl: http://localhost:32023/jwkeys/ 31 | keysurl: SET_FROM_ENV 32 | 33 | #auth: 34 | # handler: disabled-auth 35 | # config: 36 | # uid: "0" 37 | # name: Auth Disabled 38 | # email: auth@disabled.com 39 | # roles: 40 | # - tester 41 | # - dummy 42 | 43 | logging: 44 | loglevel: info 45 | 46 | server: 47 | port: 8842 48 | 49 | -------------------------------------------------------------------------------- /dev/Dockerfile.cluster: -------------------------------------------------------------------------------- 1 | FROM alpine:latest as base 2 | LABEL description="Flowify cluster environment" 3 | RUN apk add --no-cache \ 4 | bash \ 5 | bash-completion \ 6 | curl \ 7 | docker \ 8 | openssl \ 9 | vim 10 | 11 | FROM base as buildbase 12 | WORKDIR /root 13 | # Install kubectl 14 | RUN curl -LO https://dl.k8s.io/release/v1.25.0/bin/linux/amd64/kubectl && \ 15 | chmod +x ./kubectl && \ 16 | mv ./kubectl /usr/local/bin/kubectl 17 | # Install Kubernetes in Docker (kind) 18 | RUN curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.15.0/kind-linux-amd64 && \ 19 | chmod +x ./kind && \ 20 | mv ./kind /usr/local/bin/kind 21 | # Install argo 22 | RUN curl -LO https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/argo-linux-amd64.gz && \ 23 | gunzip argo-linux-amd64.gz && \ 24 | chmod +x argo-linux-amd64 && \ 25 | mv ./argo-linux-amd64 /usr/local/bin/argo 26 | RUN echo 'source <(kubectl completion bash)' >>~/.bashrc 27 | RUN echo 'source <(argo completion bash)' >>~/.bashrc 28 | COPY dev/cluster_runner.sh . 29 | COPY dev/kind.yaml . 30 | COPY dev/argo-cluster-install/ ./argo-cluster-install 31 | RUN chmod +x ./cluster_runner.sh 32 | 33 | ENTRYPOINT ["/bin/bash", "cluster_runner.sh"] 34 | -------------------------------------------------------------------------------- /dev/Dockerfile.mongo: -------------------------------------------------------------------------------- 1 | FROM mongo:5.0 2 | 3 | COPY ./dev/example_data/ . -------------------------------------------------------------------------------- /dev/Dockerfile.server: -------------------------------------------------------------------------------- 1 | FROM golang:1.18-alpine as base 2 | LABEL description="Flowify dev environment" 3 | RUN apk add --no-cache \ 4 | bash \ 5 | binutils \ 6 | curl \ 7 | docker \ 8 | gcc \ 9 | git \ 10 | jq \ 11 | make \ 12 | musl-dev \ 13 | openssl \ 14 | shadow 15 | 16 | FROM base as buildbase 17 | WORKDIR /root 18 | RUN curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.15.0/kind-linux-amd64 && \ 19 | chmod +x ./kind && \ 20 | mv ./kind /usr/local/bin/kind 21 | RUN mkdir -p $GOPATH/src/github.com/equinor/ 22 | WORKDIR $GOPATH/src/github.com/equinor/flowify-workflows-server 23 | COPY dev/flowify_server_runner.sh . 24 | RUN chmod +x ./flowify_server_runner.sh 25 | COPY dev/kind_cluster_config_export.sh . 26 | RUN chmod +x ./kind_cluster_config_export.sh 27 | COPY go.mod . 28 | COPY go.sum . 29 | RUN go mod download 30 | ENV PATH="${PATH}:/root" 31 | 32 | FROM buildbase as devbase 33 | RUN go install github.com/jstemmer/go-junit-report@latest 34 | RUN go install github.com/jandelgado/gcov2lcov@latest 35 | 36 | FROM devbase as devserver 37 | COPY apiserver ./apiserver 38 | COPY auth ./auth 39 | COPY cmd ./cmd 40 | COPY models ./models 41 | COPY pkg ./pkg 42 | COPY rest ./rest 43 | COPY storage ./storage 44 | COPY transpiler ./transpiler 45 | COPY user ./user 46 | COPY config.yml . 47 | COPY main.go . 48 | COPY Makefile . 49 | COPY e2etest ./e2etest 50 | 51 | 52 | ARG FLOWIFY_GIT_SHA 53 | RUN ["/bin/bash", "-c", "make server strip=1 flowify_git_sha=${FLOWIFY_GIT_SHA}"] 54 | 55 | CMD ["./flowify_server_runner.sh"] -------------------------------------------------------------------------------- /dev/Dockerfile.server_builder: -------------------------------------------------------------------------------- 1 | FROM golang:1.18-alpine as base 2 | LABEL description="Flowify dev environment builder" 3 | RUN apk add --no-cache \ 4 | bash \ 5 | binutils \ 6 | curl \ 7 | docker \ 8 | gcc \ 9 | git \ 10 | jq \ 11 | make \ 12 | musl-dev \ 13 | openssl \ 14 | shadow 15 | 16 | FROM base as buildbase 17 | WORKDIR /root 18 | RUN curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.15.0/kind-linux-amd64 && \ 19 | chmod +x ./kind && \ 20 | mv ./kind /usr/local/bin/kind 21 | RUN mkdir -p $GOPATH/src/github.com/equinor/ 22 | WORKDIR $GOPATH/src/github.com/equinor/flowify-workflows-server 23 | COPY dev/flowify_server_runner.sh . 24 | RUN chmod +x ./flowify_server_runner.sh 25 | -------------------------------------------------------------------------------- /dev/Makefile: -------------------------------------------------------------------------------- 1 | all: docker 2 | 3 | docker: 4 | # build services 5 | docker-compose -f docker-compose.yaml build 6 | # build e2e-test-runner 7 | docker-compose -f docker-compose.yaml -f docker-compose-e2e.yaml build flowify-e2e-runner 8 | 9 | .PHONY: docker 10 | -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/clusterroles.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | annotations: 6 | workflows.argoproj.io/description: | 7 | Minimum recommended permissions to use artifact GC. 8 | name: artifactgc 9 | rules: 10 | - apiGroups: 11 | - argoproj.io 12 | resources: 13 | - workflowartifactgctasks 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - argoproj.io 19 | resources: 20 | - workflowartifactgctasks/status 21 | verbs: 22 | - patch 23 | --- 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | kind: ClusterRole 26 | metadata: 27 | annotations: 28 | workflows.argoproj.io/description: | 29 | Recomended minimum permissions for the `emissary` executor. 30 | name: executor 31 | rules: 32 | - apiGroups: 33 | - argoproj.io 34 | resources: 35 | - workflowtaskresults 36 | verbs: 37 | - create 38 | - patch 39 | --- 40 | apiVersion: rbac.authorization.k8s.io/v1 41 | kind: ClusterRoleBinding 42 | metadata: 43 | name: artifactgc-default 44 | roleRef: 45 | apiGroup: rbac.authorization.k8s.io 46 | kind: ClusterRole 47 | name: artifactgc 48 | subjects: 49 | - kind: ServiceAccount 50 | name: default 51 | namespace: argo 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | kind: ClusterRoleBinding 55 | metadata: 56 | name: executor-default 57 | roleRef: 58 | apiGroup: rbac.authorization.k8s.io 59 | kind: ClusterRole 60 | name: executor 61 | subjects: 62 | - kind: ServiceAccount 63 | name: default 64 | namespace: argo 65 | --- -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/configmaps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: "workspace-config" 7 | app.kubernetes.io/part-of: "flowify" 8 | name: "sandbox-project-a" 9 | namespace: "argo" 10 | data: 11 | roles: "[[\"sandbox-developer\"]]" 12 | --- 13 | apiVersion: v1 14 | kind: ConfigMap 15 | metadata: 16 | labels: 17 | app.kubernetes.io/component: "workspace-config" 18 | app.kubernetes.io/part-of: "flowify" 19 | name: "sandbox-project-b" 20 | namespace: "argo" 21 | data: 22 | roles: "[[\"sandbox\"]]" 23 | 24 | --- 25 | apiVersion: v1 26 | kind: ConfigMap 27 | metadata: 28 | labels: 29 | app.kubernetes.io/part-of: "flowify" 30 | name: "role-descriptions" 31 | namespace: "argo" 32 | data: 33 | "sandbox-developer": "Need to play in the sandbox" 34 | "sandbox-admin": "Required for God-mode" 35 | --- 36 | apiVersion: v1 37 | kind: ConfigMap 38 | metadata: 39 | annotations: 40 | workflows.argoproj.io/default-artifact-repository: default-v1 41 | name: artifact-repositories 42 | namespace: argo 43 | data: 44 | default-v1: | 45 | archiveLogs: true 46 | s3: 47 | bucket: my-bucket 48 | endpoint: minio.argo.svc.cluster.local:9000 49 | insecure: true 50 | accessKeySecret: 51 | name: my-minio-cred 52 | key: accesskey 53 | secretKeySecret: 54 | name: my-minio-cred 55 | key: secretkey 56 | empty: "" 57 | my-key: | 58 | archiveLogs: true 59 | s3: 60 | bucket: my-bucket 61 | endpoint: minio.argo.svc.cluster.local:9000 62 | insecure: true 63 | accessKeySecret: 64 | name: my-minio-cred 65 | key: accesskey 66 | secretKeySecret: 67 | name: my-minio-cred 68 | key: secretkey 69 | --- -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/deployments.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: minio 7 | name: minio 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: minio 12 | template: 13 | metadata: 14 | labels: 15 | app: minio 16 | spec: 17 | containers: 18 | - command: 19 | - minio 20 | - server 21 | - --console-address 22 | - :9001 23 | - /data 24 | env: 25 | - name: MINIO_ACCESS_KEY 26 | value: admin 27 | - name: MINIO_SECRET_KEY 28 | value: password 29 | image: minio/minio 30 | lifecycle: 31 | postStart: 32 | exec: 33 | command: 34 | - mkdir 35 | - -p 36 | - /data/my-bucket 37 | livenessProbe: 38 | httpGet: 39 | path: /minio/health/live 40 | port: 9000 41 | initialDelaySeconds: 5 42 | periodSeconds: 10 43 | name: main 44 | ports: 45 | - containerPort: 9000 46 | name: api 47 | - containerPort: 9001 48 | name: dashboard 49 | readinessProbe: 50 | httpGet: 51 | path: /minio/health/ready 52 | port: 9000 53 | initialDelaySeconds: 5 54 | periodSeconds: 10 55 | --- 56 | -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: argo 5 | resources: 6 | - namespace.yaml 7 | - configmaps.yaml 8 | - secrets.yaml 9 | - https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/install.yaml 10 | - clusterroles.yaml 11 | - deployments.yaml 12 | - services.yaml -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/namespace.yaml: -------------------------------------------------------------------------------- 1 | # Argo server, workflow, configMap, secrets namespace 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | labels: 6 | app.kubernetes.io/part-of: "flowify" 7 | name: "argo" -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | labels: 6 | app: minio 7 | name: my-minio-cred 8 | stringData: 9 | accesskey: admin 10 | secretkey: password 11 | type: Opaque 12 | --- 13 | -------------------------------------------------------------------------------- /dev/argo-cluster-install/base/services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | app: minio 7 | name: minio 8 | spec: 9 | ports: 10 | - name: api 11 | port: 9000 12 | protocol: TCP 13 | targetPort: 9000 14 | - name: dashboard 15 | port: 9001 16 | protocol: TCP 17 | targetPort: 9001 18 | selector: 19 | app: minio 20 | --- 21 | -------------------------------------------------------------------------------- /dev/argo-cluster-install/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - base 6 | - sandbox-project-a -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-a/configmaps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | annotations: 6 | workflows.argoproj.io/default-artifact-repository: default-v1 7 | name: artifact-repositories 8 | data: 9 | default-v1: | 10 | archiveLogs: true 11 | s3: 12 | bucket: my-bucket 13 | endpoint: minio.argo.svc.cluster.local:9000 14 | insecure: true 15 | accessKeySecret: 16 | name: my-minio-cred 17 | key: accesskey 18 | secretKeySecret: 19 | name: my-minio-cred 20 | key: secretkey 21 | empty: "" 22 | my-key: | 23 | archiveLogs: true 24 | s3: 25 | bucket: my-bucket 26 | endpoint: minio.argo.svc.cluster.local:9000 27 | insecure: true 28 | accessKeySecret: 29 | name: my-minio-cred 30 | key: accesskey 31 | secretKeySecret: 32 | name: my-minio-cred 33 | key: secretkey 34 | --- -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-a/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: sandbox-project-a 5 | resources: 6 | - namespace.yaml 7 | - configmaps.yaml 8 | - roles.yaml 9 | - secrets.yaml -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-a/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | labels: 6 | app.kubernetes.io/part-of: "flowify" 7 | name: "sandbox-project-a" 8 | --- -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-a/roles.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Role for sandbox-project-a 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: workflow-role 7 | namespace: sandbox-project-a 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - pods 13 | verbs: 14 | - get 15 | - watch 16 | - patch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - pods/log 21 | verbs: 22 | - get 23 | - watch 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - pods/exec 28 | verbs: 29 | - create 30 | - apiGroups: 31 | - "" 32 | resources: 33 | - configmaps 34 | verbs: 35 | - create 36 | - get 37 | - update 38 | - apiGroups: 39 | - argoproj.io 40 | resources: 41 | - workflows 42 | verbs: 43 | - create 44 | - get 45 | - apiGroups: 46 | - argoproj.io 47 | resources: 48 | - workflowtasksets 49 | - workflowtasksets/finalizers 50 | verbs: 51 | - list 52 | - watch 53 | - get 54 | - update 55 | - patch 56 | --- 57 | # Role binding for sandbox-project-a 58 | apiVersion: rbac.authorization.k8s.io/v1 59 | kind: RoleBinding 60 | metadata: 61 | name: workflow-project-a-binding 62 | namespace: sandbox-project-a 63 | roleRef: 64 | apiGroup: rbac.authorization.k8s.io 65 | kind: Role 66 | name: workflow-role 67 | subjects: 68 | - kind: ServiceAccount 69 | name: default 70 | namespace: sandbox-project-a 71 | --- -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-a/secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | labels: 6 | app: minio 7 | name: my-minio-cred 8 | stringData: 9 | accesskey: admin 10 | secretkey: password 11 | type: Opaque 12 | --- 13 | -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-b/configmaps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | annotations: 6 | workflows.argoproj.io/default-artifact-repository: default-v1 7 | name: artifact-repositories 8 | data: 9 | default-v1: | 10 | archiveLogs: true 11 | s3: 12 | bucket: my-bucket 13 | endpoint: minio.argo.svc.cluster.local:9000 14 | insecure: true 15 | accessKeySecret: 16 | name: my-minio-cred 17 | key: accesskey 18 | secretKeySecret: 19 | name: my-minio-cred 20 | key: secretkey 21 | empty: "" 22 | my-key: | 23 | archiveLogs: true 24 | s3: 25 | bucket: my-bucket 26 | endpoint: minio.argo.svc.cluster.local:9000 27 | insecure: true 28 | accessKeySecret: 29 | name: my-minio-cred 30 | key: accesskey 31 | secretKeySecret: 32 | name: my-minio-cred 33 | key: secretkey 34 | --- -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-b/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: sandbox-project-b 5 | resources: 6 | - namespace.yaml 7 | - configmaps.yaml 8 | - secrets.yaml -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-b/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | labels: 6 | app.kubernetes.io/part-of: "flowify" 7 | name: "sandbox-project-b" 8 | --- 9 | -------------------------------------------------------------------------------- /dev/argo-cluster-install/sandbox-project-b/secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | labels: 6 | app: minio 7 | name: my-minio-cred 8 | stringData: 9 | accesskey: admin 10 | secretkey: password 11 | type: Opaque 12 | --- 13 | -------------------------------------------------------------------------------- /dev/cluster_runner.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RED='\033[0;31m' 4 | GREEN='\033[0;32m' 5 | BLUE='\033[0;34m' 6 | PURPLE='\033[0;35m' 7 | CYAN='\033[0;36m' 8 | WHITE='\033[0;37m' 9 | NOCOLOR='\033[0m' # No Color 10 | 11 | bash -c 'kind export --name cluster kubeconfig 2>/dev/null' 12 | cluster_exist=$? 13 | 14 | if [ $cluster_exist -eq 0 ] 15 | then 16 | echo -e ${GREEN} 17 | echo ===================================================================== 18 | echo Kind cluster exist, getting kubeconfig from cluster 19 | echo ===================================================================== 20 | echo -e ${NOCOLOR} 21 | else 22 | echo -e ${BLUE} 23 | echo ===================================================================== 24 | echo Bringing up a cluster 25 | echo ===================================================================== 26 | echo -e ${NOCOLOR} 27 | bash -c '/usr/local/bin/kind create cluster --name cluster --config /root/kind.yaml' 28 | fi 29 | 30 | # Set a trap for SIGTERM signal 31 | if ! [[ "$KEEP_KIND_CLUSTER_ALIVE" = true ]] 32 | then 33 | trap "docker rm -f cluster-control-plane" SIGTERM 34 | fi 35 | 36 | echo -e ${GREEN} 37 | echo ===================================================================== 38 | echo Modifying Kubernetes config to point to Kind master node 39 | echo ===================================================================== 40 | echo -e ${NOCOLOR} 41 | sed -i "s/^ server:.*/ server: https:\/\/$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/" $HOME/.kube/config 42 | 43 | if [ $cluster_exist -ne 0 ] 44 | then 45 | echo -e ${BLUE} 46 | echo ===================================================================== 47 | echo Deploying argo 48 | echo ===================================================================== 49 | echo -e ${NOCOLOR} 50 | kubectl apply -k /root/argo-cluster-install 51 | 52 | echo -e ${PURPLE} 53 | echo ===================================================================== 54 | echo "Waiting for deployment..." 55 | echo ===================================================================== 56 | echo -e ${NOCOLOR} 57 | kubectl rollout status deployments -n argo 58 | fi 59 | 60 | while true 61 | do 62 | sleep 1 63 | done 64 | -------------------------------------------------------------------------------- /dev/docker-compose-e2e.yaml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | # This test runner needs to be merged with the docker-compose defining the cluster and mongo services 3 | # Usage: docker-compose -f docker-compose.yaml -f docker-compose-e2e.yaml ... 4 | services: 5 | flowify-e2e-runner: 6 | container_name: flowify_e2e-runner 7 | build: 8 | context: ../ 9 | dockerfile: dev/Dockerfile.server 10 | args: 11 | - FLOWIFY_GIT_SHA=${FLOWIFY_GIT_SHA} 12 | volumes: 13 | - /var/run/docker.sock:/var/run/docker.sock 14 | - ../testoutputs:/go/src/github.com/equinor/flowify-workflows-server/testoutputs 15 | depends_on: 16 | cluster: 17 | condition: service_healthy 18 | mongo: 19 | condition: service_healthy 20 | environment: 21 | - KUBERNETES_SERVICE_HOST=cluster-control-plane 22 | - KUBERNETES_SERVICE_PORT=6443 23 | - FLOWIFY_DB_SELECT=mongo 24 | - FLOWIFY_DB_CONFIG_ADDRESS=mongo_server 25 | - FLOWIFY_DB_CONFIG_PORT=27017 26 | - FLOWIFY_SERVER_PORT=8842 27 | - FLOWIFY_KUBERNETES_NAMESPACE=argo 28 | - FLOWIFY_KUBERNETES_KUBECONFIGPATH=/root/.kube/config 29 | - FLOWIFY_AUTH_HANDLER=azure-oauth2-openid-token 30 | - FLOWIFY_AUTH_CONFIG_KEYSURL=DISABLE_JWT_SIGNATURE_VERIFICATION 31 | command: bash -c "./kind_cluster_config_export.sh; make e2etest flowify_git_sha=$FLOWIFY_GIT_SHA" 32 | -------------------------------------------------------------------------------- /dev/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | services: 3 | mongo: 4 | # one node mongoDB replica set for local development 5 | container_name: mongo_server 6 | image: mongo:5.0 7 | restart: unless-stopped 8 | ports: 9 | - "27017:27017" 10 | # volumes: 11 | # - ./database-rs:/data/db 12 | healthcheck: 13 | test: test $$(echo "rs.initiate().ok || rs.status().ok" | mongo --quiet) -eq 1 14 | interval: 10s 15 | command: ["--replSet", "rs0", "--bind_ip_all"] 16 | 17 | cluster: 18 | container_name: kind_cluster 19 | # build: 20 | # context: ../ 21 | # dockerfile: dev/Dockerfile.cluster 22 | # image: dev_cluster 23 | image: ghcr.io/equinor/flowify-dev-cluster:latest 24 | volumes: 25 | - /var/run/docker.sock:/var/run/docker.sock 26 | depends_on: 27 | - mongo 28 | environment: 29 | - KUBERNETES_SERVICE_HOST=cluster-control-plane 30 | - KUBERNETES_SERVICE_PORT=6443 31 | - KEEP_KIND_CLUSTER_ALIVE=false 32 | healthcheck: 33 | test: kubectl rollout status deployments -n argo --timeout=1s || exit 1 34 | interval: 5s 35 | retries: 25 36 | start_period: 1s 37 | timeout: 120s 38 | 39 | server: 40 | container_name: flowify_server 41 | build: 42 | context: ../ 43 | dockerfile: dev/Dockerfile.server 44 | image: dev_server 45 | ports: 46 | - "8842:8842" 47 | volumes: 48 | - /var/run/docker.sock:/var/run/docker.sock 49 | depends_on: 50 | cluster: 51 | condition: service_healthy 52 | environment: 53 | - KUBERNETES_SERVICE_HOST=cluster-control-plane 54 | - KUBERNETES_SERVICE_PORT=6443 55 | - FLOWIFY_DB_SELECT=mongo 56 | - FLOWIFY_DB_CONFIG_ADDRESS=mongo_server 57 | - FLOWIFY_DB_CONFIG_PORT=27017 58 | - FLOWIFY_SERVER_PORT=8842 59 | - FLOWIFY_KUBERNETES_NAMESPACE=argo 60 | - FLOWIFY_KUBERNETES_KUBECONFIGPATH=/root/.kube/config 61 | - KUBECONFIG=/root/.kube/config 62 | - FLOWIFY_AUTH_HANDLER=azure-oauth2-openid-token 63 | - FLOWIFY_AUTH_CONFIG_ISSUER=sandbox 64 | - FLOWIFY_AUTH_CONFIG_AUDIENCE=flowify 65 | - FLOWIFY_AUTH_CONFIG_KEYSURL=DISABLE_JWT_SIGNATURE_VERIFICATION 66 | healthcheck: 67 | test: curl -sL 127.0.0.1:8842 -o /dev/null || exit 1 68 | interval: 5s 69 | retries: 5 70 | start_period: 1s 71 | timeout: 30s 72 | 73 | networks: 74 | default: 75 | name: kind 76 | external: false 77 | driver: bridge 78 | -------------------------------------------------------------------------------- /dev/example_data/dump/admin/system.version.bson: -------------------------------------------------------------------------------- 1 | ; _id featureCompatibilityVersion version 5.0 -------------------------------------------------------------------------------- /dev/example_data/dump/admin/system.version.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"031d66b3c1dc4832a0d66f16d7ad0bad","collectionName":"system.version","type":"collection"} -------------------------------------------------------------------------------- /dev/example_data/dump/config/external_validation_keys.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinor/flowify-workflows-server/5b6efc03407e5cf7cc068ec2d21cdd6bbde960dd/dev/example_data/dump/config/external_validation_keys.bson -------------------------------------------------------------------------------- /dev/example_data/dump/config/external_validation_keys.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"},{"v":{"$numberInt":"2"},"key":{"ttlExpiresAt":{"$numberInt":"1"}},"name":"ExternalKeysTTLIndex","expireAfterSeconds":{"$numberInt":"0"}}],"uuid":"7c0a6005e0f44ed5965e0a320522a822","collectionName":"external_validation_keys","type":"collection"} -------------------------------------------------------------------------------- /dev/example_data/dump/config/tenantMigrationDonors.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinor/flowify-workflows-server/5b6efc03407e5cf7cc068ec2d21cdd6bbde960dd/dev/example_data/dump/config/tenantMigrationDonors.bson -------------------------------------------------------------------------------- /dev/example_data/dump/config/tenantMigrationDonors.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"},{"v":{"$numberInt":"2"},"key":{"expireAt":{"$numberInt":"1"}},"name":"TenantMigrationDonorTTLIndex","expireAfterSeconds":{"$numberInt":"0"}}],"uuid":"ad328b28de834e40900968113dea9e2d","collectionName":"tenantMigrationDonors","type":"collection"} -------------------------------------------------------------------------------- /dev/example_data/dump/config/tenantMigrationRecipients.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinor/flowify-workflows-server/5b6efc03407e5cf7cc068ec2d21cdd6bbde960dd/dev/example_data/dump/config/tenantMigrationRecipients.bson -------------------------------------------------------------------------------- /dev/example_data/dump/config/tenantMigrationRecipients.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"},{"v":{"$numberInt":"2"},"key":{"expireAt":{"$numberInt":"1"}},"name":"TenantMigrationRecipientTTLIndex","expireAfterSeconds":{"$numberInt":"0"}}],"uuid":"a4ddd395332a4262828dd57ce1116636","collectionName":"tenantMigrationRecipients","type":"collection"} -------------------------------------------------------------------------------- /dev/example_data/dump/test/Components.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinor/flowify-workflows-server/5b6efc03407e5cf7cc068ec2d21cdd6bbde960dd/dev/example_data/dump/test/Components.bson -------------------------------------------------------------------------------- /dev/example_data/dump/test/Components.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"98689d39dbc04881b98cdb43e6696f73","collectionName":"Components","type":"collection"} -------------------------------------------------------------------------------- /dev/example_data/dump/test/Jobs.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinor/flowify-workflows-server/5b6efc03407e5cf7cc068ec2d21cdd6bbde960dd/dev/example_data/dump/test/Jobs.bson -------------------------------------------------------------------------------- /dev/example_data/dump/test/Jobs.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"e244652e3a7b4cddabe930f4d5bc87ff","collectionName":"Jobs","type":"collection"} -------------------------------------------------------------------------------- /dev/example_data/dump/test/Workflows.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinor/flowify-workflows-server/5b6efc03407e5cf7cc068ec2d21cdd6bbde960dd/dev/example_data/dump/test/Workflows.bson -------------------------------------------------------------------------------- /dev/example_data/dump/test/Workflows.metadata.json: -------------------------------------------------------------------------------- 1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"7b4157a1f2a64ea098a001f68b3fb279","collectionName":"Workflows","type":"collection"} -------------------------------------------------------------------------------- /dev/flowify_server_runner.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RED='\033[0;31m' 4 | GREEN='\033[0;32m' 5 | BLUE='\033[0;34m' 6 | PURPLE='\033[0;35m' 7 | CYAN='\033[0;36m' 8 | WHITE='\033[0;37m' 9 | NOCOLOR='\033[0m' # No Color 10 | 11 | bash kind_cluster_config_export.sh 12 | cluster_exists=$? 13 | 14 | if [ "$cluster_exist" -neq 0 ] 15 | then 16 | echo -e ${RED} 17 | echo ===================================================================== 18 | echo Cluster does not exist, cannot continue 19 | echo ===================================================================== 20 | echo -e ${NOCOLOR} 21 | exit $cluster_exists 22 | fi 23 | 24 | echo -e ${BLUE} 25 | echo ===================================================================== 26 | echo Deploying flowify server 27 | echo ===================================================================== 28 | echo -e ${NOCOLOR} 29 | 30 | bash -c '$GOPATH/src/github.com/equinor/flowify-workflows-server/build/flowify-workflows-server' 31 | -------------------------------------------------------------------------------- /dev/kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | # - role: worker -------------------------------------------------------------------------------- /dev/kind_cluster_config_export.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | RED='\033[0;31m' 4 | GREEN='\033[0;32m' 5 | BLUE='\033[0;34m' 6 | PURPLE='\033[0;35m' 7 | CYAN='\033[0;36m' 8 | WHITE='\033[0;37m' 9 | NOCOLOR='\033[0m' # No Color 10 | 11 | bash -c 'kind export --name cluster kubeconfig 2>/dev/null' 12 | cluster_exist=$? 13 | 14 | if [ "$cluster_exist" -eq 0 ] 15 | then 16 | echo -e ${GREEN} 17 | echo ===================================================================== 18 | echo Kind cluster exist, getting kubeconfig from cluster 19 | echo Modifying Kubernetes config to point to Kind master node 20 | echo ===================================================================== 21 | echo -e ${NOCOLOR} 22 | sed -i "s/^ server:.*/ server: https:\/\/$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/" $HOME/.kube/config 23 | else 24 | echo -e ${RED} 25 | echo ===================================================================== 26 | echo Kind cluster doesn\'t exist, server cannot be run 27 | echo ===================================================================== 28 | echo -e ${NOCOLOR} 29 | exit -1 30 | fi 31 | -------------------------------------------------------------------------------- /docker-compose-tests.yaml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | services: 3 | mongo: 4 | # one node mongoDB replica set for local development 5 | container_name: mongodb 6 | image: mongo:5 7 | healthcheck: 8 | test: test $$(echo "rs.initiate().ok || rs.status().ok" | mongo --quiet) -eq 1 9 | interval: 10s 10 | command: ["--replSet", "rs0", "--bind_ip_all"] 11 | app: 12 | build: 13 | context: . 14 | target: tester 15 | args: 16 | - FLOWIFY_GIT_SHA=${FLOWIFY_GIT_SHA} 17 | environment: 18 | FLOWIFY_DB_CONFIG_ADDRESS: mongo 19 | FLOWIFY_DB_CONFIG_PORT: 27017 20 | depends_on: 21 | - mongo 22 | volumes: 23 | - ./testoutputs:/go/src/github.com/equinor/flowify-workflows-server/testoutputs 24 | command: make UNITTEST_COVERAGE=1 unittest flowify_git_sha=${FLOWIFY_GIT_SHA} 25 | -------------------------------------------------------------------------------- /e2etest/Makefile: -------------------------------------------------------------------------------- 1 | all: e2etest 2 | 3 | # Make sure we inject a sha into the test binaries, if available 4 | ifndef flowify_git_sha 5 | flowify_git_sha=$(shell git rev-parse --short HEAD) 6 | $(info Set flowify_git_sha=$(flowify_git_sha) from git rev-parse /e2etest) 7 | else 8 | $(info Set flowify_git_sha=$(flowify_git_sha) from arg /e2etest) 9 | endif 10 | 11 | TEST_OUTPUT_DIR = ../testoutputs 12 | 13 | e2etest: 14 | mkdir -p $(TEST_OUTPUT_DIR) 15 | (go test -v . -ldflags "-X 'github.com/equinor/flowify-workflows-server/apiserver.CommitSHA=$(flowify_git_sha)' -X 'github.com/equinor/flowify-workflows-server/apiserver.BuildTime=$(shell date -Is)'" | tee $(TEST_OUTPUT_DIR)/e2erun.log) || true 16 | cat $(TEST_OUTPUT_DIR)/e2erun.log | go-junit-report > $(TEST_OUTPUT_DIR)/e2ereport.xml 17 | 18 | .PHONY: e2etest -------------------------------------------------------------------------------- /e2etest/artifact_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "net/http" 5 | ) 6 | 7 | func (s *e2eTestSuite) Test_getArtifact() { 8 | s.T().Skip("Artifact test known to fail") 9 | requestor := make_requestor(s.client) 10 | 11 | defer func() { 12 | resp, err := requestor("http://localhost:8842/api/v1/workflows/test/workflow1", http.MethodDelete, "") 13 | 14 | s.NoError(err) 15 | s.Equal(http.StatusOK, resp.StatusCode, "Expected and known to fail") 16 | }() 17 | /* 18 | // Push a workflow 19 | resp, err := requestor("http://localhost:8842/api/v1/workflows/test", http.MethodPost, mockdata.WorkflowWithOutputArtifact) 20 | s.NoError(err) 21 | 22 | s.Equal(http.StatusOK, resp.StatusCode) 23 | 24 | if err != nil { 25 | s.T().Fatalf("Error reaching the flowify server: %v", err) 26 | } 27 | 28 | s.Equal(http.StatusOK, resp.StatusCode) 29 | 30 | // Give container time to spin up and do stuff. Should be changed for a 31 | // wait condition at some point. 32 | time.Sleep(10 * time.Second) 33 | resp, err = requestor("http://localhost:8842/artifacts/test/artifact-passing/artifact-passing/hello-art", http.MethodGet, "") 34 | s.NoError(err) 35 | */ 36 | } 37 | -------------------------------------------------------------------------------- /e2etest/component_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | // flowifypkg "github.com/equinor/flowify-workflows-server/pkg/apiclient/interceptor" 7 | // "github.com/equinor/flowify-workflows-server/workflowserver" 8 | ) 9 | 10 | func body2string(body io.ReadCloser) []byte { 11 | buf := new(bytes.Buffer) 12 | buf.ReadFrom(body) 13 | 14 | return buf.Bytes() 15 | } 16 | 17 | func wrap(workflowstring string) string { 18 | return "{ \"template\":" + workflowstring + "}" 19 | } 20 | 21 | func (s *e2eTestSuite) Test_components() { 22 | /* 23 | 24 | requestor := make_requestor(s.client) 25 | 26 | ccstore := workflowserver.NewFlowifyWorkflowStorageClient(storageclient.NewMongoClient()) 27 | 28 | // Clear DB collection before returning the client handler 29 | ccstore.Clear() 30 | 31 | // Change names to match e2e tests jwt token 32 | wf1 := mockdata.WorkflowTemplate1 33 | wf2 := mockdata.WorkflowTemplate2 34 | 35 | resp, err := requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf1)) 36 | s.NoError(err) 37 | s.Equal(http.StatusOK, http.StatusOK) 38 | 39 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf1)) 40 | s.NoError(err) 41 | s.Equal(http.StatusOK, http.StatusOK) 42 | 43 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf1)) 44 | s.NoError(err) 45 | s.Equal(http.StatusOK, http.StatusOK) 46 | 47 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf2)) 48 | s.NoError(err) 49 | s.Equal(http.StatusOK, http.StatusOK) 50 | 51 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodGet, "") 52 | s.Equal(http.StatusOK, resp.StatusCode) 53 | 54 | var l1 workflowserver.WorkflowList 55 | json.Unmarshal(body2string(resp.Body), &l1) 56 | s.Len(l1.Items, 2) 57 | 58 | var wft v1alpha1.WorkflowTemplate 59 | json.Unmarshal(l1.Items[0].Content, &wft) 60 | 61 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/"+wft.ObjectMeta.Name+"/versions?workspace=test", http.MethodGet, "") 62 | s.Equal(http.StatusOK, resp.StatusCode) 63 | 64 | var l2 workflowserver.VersionList 65 | json.Unmarshal(body2string(resp.Body), &l2) 66 | 67 | s.Len(l2.Versions, 3) 68 | 69 | Names := make([]string, 3) 70 | Versions := make([]string, 3) 71 | 72 | for i, item := range l2.Versions { 73 | Names[i] = item.WrittenBy 74 | Versions[i] = item.Version 75 | } 76 | 77 | s.Len(l2.Versions, 3) 78 | 79 | s.ElementsMatch([]string{"0", "1", "2"}, Versions) 80 | s.ElementsMatch([]string{"test@test.com", "test@test.com", "test@test.com"}, Names) // injected from the used test auth token 81 | 82 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/workflowtemplate1?workspace=test", http.MethodGet, "") 83 | s.NoError(err) 84 | s.Equal(http.StatusOK, resp.StatusCode) 85 | s.True(json.Valid(body2string(resp.Body))) 86 | 87 | _, err = requestor("http://localhost:8842/api/v1/flowify-workflows/workflowtemplate1?version=1&workspace=test", http.MethodGet, "") 88 | s.NoError(err) 89 | s.Equal(http.StatusOK, resp.StatusCode) 90 | 91 | // Submit non-existing version 92 | req := flowifypkg.WorkflowSubmitRequest{Namespace: "test", ResourceKind: "WorkflowTemplate", ResourceName: "workflowtemplate2", Version: "1"} 93 | payload, err := json.Marshal(req) 94 | s.NoError(err) 95 | 96 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/submit", http.MethodPost, string(payload)) 97 | s.NoError(err) 98 | s.Equal(http.StatusNotFound, resp.StatusCode) 99 | 100 | // Submit existing version, with explicit version 101 | req = flowifypkg.WorkflowSubmitRequest{Namespace: "test", ResourceKind: "WorkflowTemplate", ResourceName: "workflowtemplate2", Version: "0"} 102 | payload, err = json.Marshal(req) 103 | s.NoError(err) 104 | 105 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/submit", http.MethodPost, string(payload)) 106 | s.NoError(err) 107 | s.Equal(http.StatusOK, resp.StatusCode) 108 | 109 | // submit with implicit 'last' version 110 | req = flowifypkg.WorkflowSubmitRequest{Namespace: "test", ResourceKind: "WorkflowTemplate", ResourceName: "workflowtemplate2"} 111 | payload, err = json.Marshal(req) 112 | s.NoError(err) 113 | 114 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/submit", http.MethodPost, string(payload)) 115 | s.NoError(err) 116 | s.Equal(http.StatusOK, resp.StatusCode) 117 | 118 | var wf v1alpha1.Workflow 119 | err = json.Unmarshal(body2string(resp.Body), &wf) 120 | s.NoError(err) 121 | 122 | name := wf.ObjectMeta.Name 123 | 124 | // Fetch workflow, and verify name is the same as the submitted workflow object 125 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/"+name, http.MethodGet, "") 126 | s.Equal(http.StatusOK, resp.StatusCode) 127 | s.NoError(err) 128 | 129 | err = json.Unmarshal(body2string(resp.Body), &wf) 130 | s.NoError(err) 131 | 132 | s.Equal(name, wf.ObjectMeta.Name) 133 | 134 | // Check that the workflowtemplate was reaped 135 | resp, err = requestor("http://localhost:8842/api/v1/workflow-templates/test", http.MethodGet, "") 136 | s.NoError(err) 137 | s.Equal(http.StatusOK, resp.StatusCode) 138 | 139 | var wftList v1alpha1.WorkflowTemplateList 140 | err = json.Unmarshal(body2string(resp.Body), &wftList) 141 | 142 | s.Len(wftList.Items, 0) 143 | s.NoError(err) 144 | 145 | // Remove the workflow 146 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/"+name, http.MethodDelete, "") 147 | s.NoError(err) 148 | s.Equal(http.StatusOK, resp.StatusCode) 149 | */ 150 | } 151 | -------------------------------------------------------------------------------- /e2etest/default-roles.yaml: -------------------------------------------------------------------------------- 1 | # Deploy the access permissions for the `default` service account 2 | 3 | --- 4 | 5 | apiVersion: rbac.authorization.k8s.io/v1 6 | kind: RoleBinding 7 | metadata: 8 | name: default-rb 9 | namespace: test 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: Role 13 | name: default-role 14 | subjects: 15 | - kind: ServiceAccount 16 | name: default 17 | namespace: test 18 | 19 | --- 20 | 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: Role 23 | metadata: 24 | name: default-role 25 | namespace: test 26 | rules: 27 | - apiGroups: ["argoproj.io"] 28 | resources: ["workflows"] 29 | verbs: ["get", "create"] 30 | - apiGroups: [""] 31 | resources: ["pods"] 32 | verbs: ["create"] 33 | -------------------------------------------------------------------------------- /e2etest/secret_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | 8 | wf "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" 9 | ) 10 | 11 | func ignore[T any](T) {} 12 | func (s *e2eTestSuite) Test_SecretHandling_live_system() { 13 | requestor := make_requestor(s.client) 14 | ignore(requestor) 15 | // Push some secrets 16 | 17 | type SecretField struct { 18 | Key string `json:"key"` 19 | Value string `json:"value"` 20 | } 21 | 22 | type SecretFieldList struct { 23 | Items []SecretField `json:"items"` 24 | } 25 | 26 | payload_obj1 := SecretFieldList{ 27 | Items: []SecretField{ 28 | {Key: "key1", Value: "val1"}, 29 | {Key: "key2", Value: "val2"}, 30 | {Key: "fake_key1", Value: "dummyload"}}, 31 | } 32 | 33 | ignore(payload_obj1) 34 | /* 35 | 36 | // There is no check on write, so this is legit. But the token cannot read it back again... 37 | 38 | payload_obj2 := secret.SecretFieldList{ 39 | Items: []secret.SecretField{secret.SecretField{"key1", "val1"}, secret.SecretField{"key2", "val2"}, secret.SecretField{"fake_key1", "valX"}}} 40 | 41 | workspaces := []string{"test", "test-no-access", "not-existing-workspace"} 42 | statuses := []int{http.StatusCreated, http.StatusForbidden, http.StatusNotFound} 43 | 44 | for i, obj := range []secret.SecretFieldList{payload_obj1, payload_obj2, payload_obj2} { 45 | payload_json, err := json.Marshal(obj) 46 | s.NoError(err) 47 | resp, err := requestor("http://localhost:8842/api/v1/secrets/"+workspaces[i], http.MethodPost, string(payload_json)) 48 | s.NoError(err) 49 | s.Equal(statuses[i], resp.StatusCode) 50 | } 51 | 52 | // Read back available fields 53 | resp, err := requestor("http://localhost:8842/api/v1/secrets/test", http.MethodGet, "") 54 | 55 | s.NoError(err) 56 | s.Equal(http.StatusOK, resp.StatusCode) 57 | 58 | var list secret.SecretKeyList 59 | marshalResponse(resp, &list) 60 | 61 | s.ElementsMatch(list.Keys, []string{"key1", "key2", "fake_key1"}) 62 | 63 | // Run a workflow with valid secret access 64 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test", http.MethodPost, mockdata.WorkflowWithSecret) 65 | 66 | s.NoError(err) 67 | s.Equal(http.StatusOK, resp.StatusCode) 68 | 69 | checkLogMessage(s, "wfwithsecret", "dummyload") 70 | 71 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/wfwithsecret", http.MethodDelete, "") 72 | s.NoError(err) 73 | s.Equal(http.StatusOK, resp.StatusCode) 74 | */ 75 | } 76 | 77 | func checkLogMessage(s *e2eTestSuite, wfName, expectedMessage string) { 78 | requestor := make_requestor(s.client) 79 | resp, err := requestor("http://localhost:8842/api/v1/workflows/test/"+wfName+"/log?logOptions.container=main&logOptions.follow=true", http.MethodGet, "") 80 | s.NoError(err) 81 | s.Equal(http.StatusOK, resp.StatusCode) 82 | s.Equal("text/event-stream", resp.Header.Get("Content-Type")) 83 | 84 | buf_log := new(bytes.Buffer) 85 | buf_log.ReadFrom(resp.Body) 86 | buf_log.Next(6) // remove data prefix 87 | var objmap map[string]json.RawMessage 88 | 89 | err = json.Unmarshal(buf_log.Bytes(), &objmap) 90 | s.NoError(err) 91 | 92 | var entry wf.LogEntry 93 | s.NoError(json.Unmarshal(objmap["result"], &entry)) 94 | s.Equal(expectedMessage, entry.Content) 95 | } 96 | -------------------------------------------------------------------------------- /e2etest/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu -o pipefail 4 | 5 | function cleanup { 6 | set +e # Continue cleaning up if there is an issue 7 | 8 | printf 'Test shell script cleanup\n' 9 | 10 | kubectl delete ns test --ignore-not-found 11 | kubectl delete ns test-no-access --ignore-not-found 12 | 13 | # Quit the Argo conroller 14 | 15 | argopid=$(ps -ef | grep [w]orkflow-controller | tr -s ' '| cut -f 2 -d ' ') 16 | 17 | if [[ ! -z ${argopid} ]]; then 18 | kill $argopid 19 | fi 20 | 21 | # Quit the Flowify server 22 | flowifypid=$(ps -ef | grep [f]lowify-server | tr -s ' '| cut -f 2 -d ' ') 23 | 24 | if [[ ! -z ${flowifypid} ]]; then 25 | kill $flowifypid 26 | fi 27 | 28 | } 29 | 30 | trap cleanup EXIT 31 | 32 | pushd .. 33 | 34 | # Start a kubernetes cluster 35 | minikube start 36 | kubectl create namespace test --dry-run=client -o yaml | kubectl apply -f - 37 | kubectl create namespace test-no-access --dry-run=client -o yaml | kubectl apply -f - 38 | 39 | # Inject the default service account with the corresponding roles 40 | kubectl apply -f e2etest/default-roles.yaml 41 | 42 | # Copy artifact configmap to test namespace 43 | kubectl get cm artifact-repositories --namespace=argo -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=test -f - 44 | kubectl get secret my-minio-cred --namespace=argo -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=test -f - 45 | 46 | # Launch the flowify server 47 | export KUBERNETES_SERVICE_HOST=$(kubectl config view --minify | grep server | cut -f 3 -d "/" | cut -d ":" -f 1) 48 | export KUBERNETES_SERVICE_PORT=$(kubectl config view --minify | grep server | cut -f 4 -d ":") 49 | 50 | export FLOWIFY_MONGO_ADDRESS=localhost 51 | export FLOWIFY_MONGO_PORT=27017 52 | 53 | ./build/flowify-workflows-server -v 7 > /dev/null 2>& 1 & 54 | 55 | # Prints the PID of the flowify server so we can hook-up a debugger 56 | ps -ef | grep [f]lowify-server | tr -s ' ' | cut -f 2 -d ' ' 57 | 58 | # Start a MongoDB server 59 | docker container start $(docker container ls --all | grep mongo | cut -f 1 -d ' ') > /dev/null 2>& 1 60 | 61 | cd $GOPATH 62 | controller=$(find . -wholename "*/dist/workflow-controller") 63 | 64 | # Launch the Argo controller 65 | PNS_PRIVILEGED=true DEFAULT_REQUEUE_TIME=100ms LEADER_ELECTION_IDENTITY=local ALWAYS_OFFLOAD_NODE_STATUS=false OFFLOAD_NODE_STATUS_TTL=30s WORKFLOW_GC_PERIOD=30s UPPERIO_DB_DEBUG=0 ARCHIVED_WORKFLOW_GC_PERIOD=30s $controller --executor-image argoproj/argoexec:v3.1.13 --namespaced=true --namespace test > /dev/null 2>& 1 & 66 | 67 | popd 68 | 69 | unset KUBERNETES_SERVICE_HOST 70 | unset KUBERNETES_SERVICE_PORT 71 | 72 | # Run all e2e tests (the tests in this directory) 73 | go test . 74 | 75 | 76 | -------------------------------------------------------------------------------- /e2etest/workspace_cm_test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | labels: 7 | app.kubernetes.io/component: workspace-config 8 | app.kubernetes.io/part-of: flowify 9 | name: test-no-access 10 | namespace: test 11 | data: 12 | roles: "[\"role-x\", \"role-y\", \"role-z\"]" 13 | projectName: test-no-access 14 | description: > 15 | The e2e injected token should not be able to read/write to this workspace 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: ConfigMap 21 | metadata: 22 | labels: 23 | app.kubernetes.io/component: workspace-config 24 | app.kubernetes.io/part-of: flowify 25 | name: test 26 | namespace: test 27 | data: 28 | roles: "[[\"role-x\"], [\"role-y\"]]" 29 | projectName: test 30 | description: bla 31 | hideForUnauthorized: "false" 32 | serviceAccountName: default 33 | -------------------------------------------------------------------------------- /e2etest/workspace_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/equinor/flowify-workflows-server/pkg/workspace" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func (s *e2eTestSuite) Test_Workspaces() { 11 | requestor := make_authenticated_requestor(s.client, mockUser) 12 | 13 | resp, err := requestor(server_addr+"/api/v1/workspaces/", http.MethodGet, "") 14 | require.NoError(s.T(), err, BodyStringer{resp.Body}) 15 | require.Equal(s.T(), http.StatusOK, resp.StatusCode, BodyStringer{resp.Body}) 16 | 17 | type WorkspaceList struct { 18 | Items []workspace.WorkspaceGetRequest `json:"items"` 19 | } 20 | var list WorkspaceList 21 | err = marshalResponse(ResponseBodyBytes(resp), &list) 22 | 23 | s.NoError(err) 24 | s.NotEmpty(list.Items) 25 | } 26 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "os" 7 | "syscall" 8 | "time" 9 | 10 | log "github.com/sirupsen/logrus" 11 | 12 | "github.com/equinor/flowify-workflows-server/apiserver" 13 | ) 14 | 15 | const ( 16 | maxWait = time.Second * 10 17 | ) 18 | 19 | var status = 0 20 | 21 | func logFatalHandler() { 22 | status = 1 23 | // send SIGTERM to itself to exit gracefully 24 | syscall.Kill(os.Getpid(), syscall.SIGTERM) 25 | select {} 26 | } 27 | 28 | func isFlagPassed(name string) bool { 29 | found := false 30 | flag.Visit(func(f *flag.Flag) { 31 | if f.Name == name { 32 | found = true 33 | } 34 | }) 35 | return found 36 | } 37 | 38 | func first[T any, S any](t T, s S) T { return t } 39 | 40 | func main() { 41 | log.Infof("Starting process with pid %d", os.Getpid()) 42 | log.RegisterExitHandler(logFatalHandler) 43 | 44 | // read config, possible overloaded by ENV VARS 45 | cfg, err := apiserver.LoadConfigFromPath(".") 46 | if err != nil { 47 | log.Error("could not load config, ", err) 48 | return 49 | } 50 | 51 | // Set some common flags 52 | logLevel := flag.String("loglevel", "info", "Set the printout level for the logger (trace, debug, info, warn, error, fatal, panic)") 53 | portNumber := flag.Int("port", 8842, "Set the TCP port nubmer accepting connections") 54 | dbName := flag.String("db", "Flowify", "Set the name of the database to use") 55 | k8sConfigNamespace := flag.String("namespace", "test", "K8s configuration namespace to use") 56 | authHandlerSelector := flag.String("auth", "azure-oauth2-openid-token", "Set the security handler for the backend") 57 | kubeconfig := flag.String("kubeconfig", "~/kube/config", "path to kubeconfig file") 58 | dumpConfig := flag.String("dumpconfig", "", "Dump the config in yaml format to filename or stdout '-'") 59 | flag.Parse() 60 | 61 | // Connect flags to override config (flags > env > configfile ) 62 | // viper nested keys dont work well with flags so do it explicitly: https://github.com/spf13/viper/issues/368 63 | if isFlagPassed("loglevel") { 64 | cfg.LogConfig.LogLevel = *logLevel 65 | } 66 | if isFlagPassed("port") { 67 | cfg.ServerConfig.Port = *portNumber 68 | } 69 | if isFlagPassed("db") { 70 | cfg.DbConfig.DbName = *dbName 71 | } 72 | if isFlagPassed("kubeconfig") { 73 | cfg.KubernetesKonfig.KubeConfigPath = *kubeconfig 74 | } 75 | if isFlagPassed("namespace") { 76 | cfg.KubernetesKonfig.Namespace = *k8sConfigNamespace 77 | } 78 | if isFlagPassed("auth") { 79 | cfg.AuthConfig.Handler = *authHandlerSelector 80 | } 81 | 82 | // handle config output 83 | if isFlagPassed("dumpconfig") { 84 | cfg.Dump(*dumpConfig) 85 | } 86 | 87 | // LogConfig is handled directly 88 | level, err := log.ParseLevel(cfg.LogConfig.LogLevel) 89 | if err != nil { 90 | log.Errorf("could not parse log level: %s", cfg.LogConfig) 91 | } 92 | log.SetLevel(level) 93 | log.WithFields(log.Fields{"Loglevel": log.StandardLogger().Level}).Infof("Setting global loglevel") 94 | 95 | ctx, cancel := context.WithCancel(context.Background()) 96 | defer cancel() 97 | 98 | server, err := apiserver.NewFlowifyServerFromConfig(cfg) 99 | if err != nil { 100 | log.Error("Cannot create a Flowify server object", err) 101 | os.Exit(1) 102 | } 103 | 104 | // run is a blocking call, but may return early on error 105 | err = server.Run(ctx, nil) 106 | if err != nil { 107 | log.Error(err) 108 | os.Exit(1) 109 | } 110 | 111 | // Create a deadline to wait for. 112 | ctx, cancel = context.WithTimeout(context.Background(), maxWait) 113 | defer cancel() 114 | 115 | log.Info("Received SIGNAL: waiting for active requests to finish...") 116 | server.HttpServer.Shutdown(ctx) 117 | 118 | os.Exit(status) 119 | } 120 | -------------------------------------------------------------------------------- /models/examples/brick-parameter-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "A brick component with an input parameter", 3 | "inputs": [ 4 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" }, 5 | { "name": "sender", "mediatype": ["string"], "type": "parameter" } 6 | ], 7 | "type": "component", 8 | "implementation": { 9 | "type": "brick", 10 | "container": { 11 | "name": "anyname", 12 | "image": "docker/whalesay", 13 | "command": ["cowsay"], 14 | "args": [ 15 | "Hello static I will be appended (TBD?) by potentially variable flowify-args" 16 | ] 17 | }, 18 | "args": [ 19 | { 20 | "source": "Hello static text.", 21 | "description": "A static argument" 22 | }, 23 | { 24 | "source": { "port": "greeting" }, 25 | "target": { "type": "env_secret" }, 26 | "description": "A variable stored in env $(GREET) and expanded by k8s upon execution" 27 | }, 28 | { 29 | "source": { "port": "sender" }, 30 | "target": { "type": "parameter" } 31 | }, 32 | { 33 | "source": { "port": "sender" }, 34 | "target": { "type": "file" }, 35 | "description": "A variable stored as an argo artefact in /tmp/sender" 36 | } 37 | ], 38 | "results": [ 39 | { 40 | "source": "Hello static text.", 41 | "target": { "port": "outputport" }, 42 | "description": "A static result, eg for mocking" 43 | }, 44 | { 45 | "source": { "file": "/tmp/res.txt" }, 46 | "target": { "port": "outputport" }, 47 | "description": "A result from file mapped to component output interface" 48 | } 49 | ] 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /models/examples/graph-input-volumes.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "job", 3 | "uid": "00000000-0000-0000-0000-000000000001", 4 | "description": "A job with volume input used as input into graph components", 5 | "inputValues": [ 6 | { "value": "{\"name\":\"vol-config-0\"}", "target": "mount-0" }, 7 | { "value": "{\"name\":\"vol-config-1\"}", "target": "mount-1" } 8 | ], 9 | "workflow": { 10 | "uid": "00000000-0000-0000-0000-000000000002", 11 | "component": { 12 | "type": "component", 13 | "uid": "00000000-0000-0000-0000-000000000003", 14 | "inputs": [ 15 | { "name": "mount-0", "type": "volume" }, 16 | { "name": "mount-1", "type": "volume" } 17 | ], 18 | "implementation": { 19 | "type": "graph", 20 | "inputMappings": [ 21 | { 22 | "source": { "port": "mount-0" }, 23 | "target": { "node": "a1", "port": "mount-a" } 24 | }, 25 | { 26 | "source": { "port": "mount-1" }, 27 | "target": { "node": "a1", "port": "mount-b" } 28 | } 29 | ], 30 | "nodes": [ 31 | { 32 | "id": "a1", 33 | "node": { 34 | "uid": "00000000-0000-0000-0000-000000000004", 35 | "type": "component", 36 | "inputs": [ 37 | { "name": "mount-a", "type": "volume" }, 38 | { "name": "mount-b", "type": "volume" } 39 | ], 40 | "implementation": { 41 | "type": "brick", 42 | "container": { 43 | "name": "whale", 44 | "image": "docker/whalesay", 45 | "command": ["cowsay"] 46 | }, 47 | "args": [ 48 | { 49 | "target": { 50 | "type": "volume", 51 | "prefix": "/opt/volumes/" 52 | }, 53 | "source": { "port": "mount-a" } 54 | }, 55 | { 56 | "target": { 57 | "type": "volume", 58 | "prefix": "/mnt" 59 | }, 60 | "source": { "port": "mount-b" } 61 | } 62 | ] 63 | } 64 | } 65 | } 66 | ] 67 | } 68 | }, 69 | "type": "workflow", 70 | "workspace": "test" 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /models/examples/hello-world-workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hello-world-workflow", 3 | "description": "A hello world workflow (with an inline component)", 4 | "type": "workflow", 5 | "component": { 6 | "description": "A hello world component", 7 | "inputs": [], 8 | "outputs": [], 9 | "type": "component", 10 | "implementation": { 11 | "type": "brick", 12 | "container": { 13 | "name": "saycontainer", 14 | "image": "docker/whalesay", 15 | "command": ["cowsay"], 16 | "args": ["hello world"] 17 | } 18 | } 19 | }, 20 | "workspace": "test" 21 | } 22 | -------------------------------------------------------------------------------- /models/examples/if-statement.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Conditional example", 3 | "type": "job", 4 | "inputValues": [ 5 | { 6 | "value": "10", 7 | "target": "max" 8 | } 9 | ], 10 | "workflow": { 11 | "name": "wf-example", 12 | "description": "Test workflow with an if statement example", 13 | "type": "workflow", 14 | "workspace": "argo", 15 | "component": { 16 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0", 17 | "description": "Graph component", 18 | "inputs": [ 19 | { "name": "max", "mediatype": ["integer"], "type": "parameter" } 20 | ], 21 | "outputs": [{ "name": "description", "type": "parameter" }], 22 | "type": "component", 23 | "implementation": { 24 | "type": "graph", 25 | "inputMappings": [ 26 | { 27 | "source": { "port": "max" }, 28 | "target": { "node": "N1", "port": "value" } 29 | } 30 | ], 31 | "outputMappings": [ 32 | { 33 | "source": { "node": "If", "port": "ifOut" }, 34 | "target": { "port": "description" } 35 | } 36 | ], 37 | "nodes": [ 38 | { 39 | "id": "N1", 40 | "node": { 41 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b0", 42 | "description": "Generate", 43 | "inputs": [{ "name": "value", "type": "parameter" }], 44 | "outputs": [{ "name": "rand", "type": "parameter" }], 45 | "type": "component", 46 | "implementation": { 47 | "type": "brick", 48 | "container": { 49 | "name": "containername_n1_b1", 50 | "image": "bash:latest", 51 | "command": ["bash", "-c", "shuf -i 0-$0 -n1 > /tmp/out"] 52 | }, 53 | "args": [ 54 | { 55 | "source": { "port": "value" }, 56 | "target": { "type": "parameter" } 57 | } 58 | ], 59 | "results": [ 60 | { 61 | "source": { "file": "/tmp/out" }, 62 | "target": { "port": "rand" } 63 | } 64 | ] 65 | } 66 | } 67 | }, 68 | { 69 | "id": "If", 70 | "node": { 71 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c2", 72 | "description": "If/else component", 73 | "inputs": [ 74 | { 75 | "name": "valFromParam", 76 | "mediatype": ["number"], 77 | "type": "parameter" 78 | } 79 | ], 80 | "outputs": [{ "name": "ifOut", "type": "parameter" }], 81 | "type": "component", 82 | "implementation": { 83 | "type": "conditional", 84 | "inputMappings": [ 85 | { 86 | "source": { "port": "valFromParam" }, 87 | "target": { "port": "valParam" } 88 | } 89 | ], 90 | "outputMappings": [ 91 | { 92 | "source": { "node": "nodeTrue", "port": "out" }, 93 | "target": { "port": "ifOut" } 94 | } 95 | ], 96 | "nodeTrue": { 97 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b4", 98 | "description": "", 99 | "inputs": [ 100 | { 101 | "name": "valParam", 102 | "mediatype": ["number"], 103 | "type": "parameter" 104 | } 105 | ], 106 | "outputs": [{ "name": "out", "type": "parameter" }], 107 | "type": "component", 108 | "implementation": { 109 | "type": "brick", 110 | "container": { 111 | "name": "containername", 112 | "image": "alpine:latest", 113 | "command": [ 114 | "sh", 115 | "-c", 116 | "echo value $0 is huge > /tmp/out" 117 | ] 118 | }, 119 | "args": [ 120 | { 121 | "source": { "port": "valParam" }, 122 | "target": { "type": "parameter" } 123 | } 124 | ], 125 | "results": [ 126 | { 127 | "source": { "file": "/tmp/out" }, 128 | "target": { "port": "out" } 129 | } 130 | ] 131 | } 132 | }, 133 | "expression": { 134 | "left": { 135 | "name": "valFromParam", 136 | "mediatype": ["number"], 137 | "type": "parameter" 138 | }, 139 | "operator": ">=", 140 | "right": "5" 141 | } 142 | } 143 | } 144 | } 145 | ], 146 | "edges": [ 147 | { 148 | "source": { "node": "N1", "port": "rand" }, 149 | "target": { "node": "If", "port": "valFromParam" } 150 | } 151 | ] 152 | } 153 | } 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /models/examples/job-map-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Job example", 3 | "type": "job", 4 | "inputValues": [ 5 | { 6 | "value": "6", 7 | "target": "numParts" 8 | }, 9 | { 10 | "value": "SECRET_PASS", 11 | "target": "secretL1" 12 | }, 13 | { 14 | "value": ["A", "B"], 15 | "target": "branch" 16 | } 17 | ], 18 | "workflow": 19 | { 20 | "name": "wf-example", 21 | "description": "Test workflow with an map example", 22 | "type": "workflow", 23 | "workspace": "argo", 24 | "component": 25 | { 26 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0", 27 | "description": "Map component", 28 | "inputs": [ 29 | { "name": "numParts", "type": "parameter" }, 30 | { "name": "secretL1", "type": "env_secret" }, 31 | { "name": "branch", "type": "parameter_array" } 32 | ], 33 | "outputs": [ 34 | { "name": "outputParamArray", "type": "parameter_array" } 35 | ], 36 | "type": "component", 37 | "implementation": { 38 | "type": "map", 39 | "inputMappings": [ 40 | { 41 | "source": { "port": "numParts" }, 42 | "target": { "port": "inputParam" } 43 | }, 44 | { 45 | "source": { "port": "branch" }, 46 | "target": { "port": "val" } 47 | }, 48 | { 49 | "source": { "port": "secretL1" }, 50 | "target": { "port": "inputScrt" } 51 | } 52 | ], 53 | "outputMappings": [ 54 | { 55 | "source": { "port": "output" }, 56 | "target": { "port": "outputParamArray" } 57 | } 58 | ], 59 | "node": { 60 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b2", 61 | "description": "MapNode1", 62 | "inputs": [ 63 | { "name": "inputParam", "type": "parameter" }, 64 | { "name": "val", "type": "parameter" }, 65 | { "name": "inputScrt", "type": "env_secret" } 66 | ], 67 | "outputs": [ 68 | { "name": "output", "type": "parameter_array" } 69 | ], 70 | "type": "component", 71 | "implementation": { 72 | "type": "brick", 73 | "container": { 74 | "name": "containername_n1_b1", 75 | "image": "alpine:latest", 76 | "command": ["sh", "-c", "echo $inputScrt; ARR=\"[\"; for i in $(seq $0); do ARR=$ARR\\\"$1$i\\\"\", \" ; done; ARR=${ARR%??}\"]\"; echo $ARR | tee /tmp/prm"], 77 | "args": [] 78 | }, 79 | "args": [ 80 | { 81 | "source": { "port": "inputParam" }, 82 | "target": { "type": "parameter" } 83 | }, 84 | { 85 | "source": { "port": "val" }, 86 | "target": { "type": "parameter" } 87 | } 88 | ], 89 | "results": [ 90 | { 91 | "source": { "file": "/tmp/prm" }, 92 | "target": { "port": "output" } 93 | } 94 | ] 95 | } 96 | } 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /models/examples/job-mounts.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Job example with volume mounts", 3 | "type": "job", 4 | "inputValues": [ 5 | { 6 | "value": "{\"name\":\"workdir\",\"persistentVolumeClaim\":{\"claimName\":\"my-existing-volume\"}}", 7 | "target": "mount-a" 8 | } 9 | ], 10 | "workflow": { 11 | "name": "hello-mount-example", 12 | "description": "Test workflow with a mounted volume", 13 | "type": "workflow", 14 | "workspace": "argo", 15 | "component": { 16 | "description": "My cool component, that can read from a mount.", 17 | "inputs": [{ "name": "mount-a", "type": "volume" }], 18 | "outputs": [], 19 | "type": "component", 20 | "implementation": { 21 | "type": "brick", 22 | "container": { 23 | "name": "containername", 24 | "image": "alpine:latest", 25 | "command": ["sh", "-c", "ls /volumes/mount-a"] 26 | } 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /models/examples/minimal-any-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Smallest possible component uses the any implementation", 3 | "type": "component", 4 | "implementation": { 5 | "type": "any" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /models/examples/minimal-any-workflow.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "miny-wf", 3 | "description": "A minimal workflow with an inline any-component", 4 | "type": "workflow", 5 | "component": { 6 | "description": "Smallest possible component uses the any implementation", 7 | "type": "component", 8 | "implementation": { 9 | "type": "any" 10 | } 11 | }, 12 | "workspace": "sandbox-project-a" 13 | } 14 | -------------------------------------------------------------------------------- /models/examples/minimal-brick-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "A brick component", 3 | "type": "component", 4 | "implementation": { 5 | "type": "brick", 6 | "container": { 7 | "name": "containername", 8 | "image": "docker/whalesay", 9 | "command": ["cowsay"], 10 | "args": ["Hello Test"] 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /models/examples/minimal-conditional-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "If/else component", 3 | "type": "component", 4 | "implementation": { 5 | "type": "conditional", 6 | "nodeTrue": { 7 | "description": "", 8 | "type": "component", 9 | "implementation": { 10 | "type": "brick", 11 | "container": { 12 | "name": "containername", 13 | "image": "alpine:latest", 14 | "command": ["sh"] 15 | } 16 | } 17 | }, 18 | "nodeFalse": { 19 | "version": 2, 20 | "uid": "44763f88-7f51-11ec-a8a3-0242ac120002" 21 | }, 22 | "expression": { 23 | "left": { 24 | "name": "valFromParam", 25 | "mediatype": ["number"], 26 | "type": "parameter" 27 | }, 28 | "operator": ">=", 29 | "right": "5" 30 | } 31 | } 32 | } -------------------------------------------------------------------------------- /models/examples/minimal-graph-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "An single node graph component", 3 | "type": "component", 4 | "implementation": { 5 | "type": "graph", 6 | "nodes": [ 7 | { 8 | "id": "gaia", 9 | "node": { 10 | "description": "A brick component", 11 | "type": "component", 12 | "implementation": { 13 | "type": "brick", 14 | "container": { 15 | "name": "containername", 16 | "image": "docker/whalesay", 17 | "command": ["cowsay"], 18 | "args": ["Hello Test"] 19 | } 20 | } 21 | } 22 | } 23 | ] 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /models/examples/minimal-map-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "An map component", 3 | "version": { 4 | "current": 5, 5 | "tags": ["tag1", "tag2"], 6 | "previous": { 7 | "version": 10, 8 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0" 9 | } 10 | }, 11 | "type": "component", 12 | "implementation": { 13 | "type": "map", 14 | "node": { 15 | "description": "A brick component", 16 | "version": { 17 | "current": 1, 18 | "tags": ["tag3", "tag2"], 19 | "previous": { 20 | "version": 0 21 | } 22 | }, 23 | "type": "component", 24 | "implementation": { 25 | "type": "brick", 26 | "container": { 27 | "name": "containername", 28 | "image": "docker/whalesay", 29 | "command": ["cowsay"], 30 | "args": ["Hello Test"] 31 | } 32 | } 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /models/examples/multi-level-secrets.json: -------------------------------------------------------------------------------- 1 | { 2 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0", 3 | "description": "My cool graph", 4 | "inputs": [ 5 | { "name": "seedT", "mediatype": ["integer"], "type": "parameter" }, 6 | { "name": "secretL1", "mediatype": ["env_secret"], "type": "env_secret" }, 7 | { "name": "secretL2", "mediatype": ["env_secret"], "type": "env_secret" }, 8 | { "name": "secretL3", "mediatype": ["env_secret"], "type": "env_secret" } 9 | ], 10 | "outputs": [], 11 | "type": "component", 12 | "implementation": { 13 | "type": "graph", 14 | "inputMappings": [ 15 | { 16 | "source": { "port": "seedT" }, 17 | "target": { "node": "N1", "port": "seedN1" } 18 | }, 19 | { 20 | "source": { "port": "secretL1" }, 21 | "target": { "node": "N1", "port": "secretB1" } 22 | }, 23 | { 24 | "source": { "port": "secretL2" }, 25 | "target": { "node": "N1", "port": "secretB2" } 26 | }, 27 | { 28 | "source": { "port": "secretL1" }, 29 | "target": { "node": "N2", "port": "secretG1" } 30 | }, 31 | { 32 | "source": { "port": "secretL2" }, 33 | "target": { "node": "N2", "port": "secretG2" } 34 | }, 35 | { 36 | "source": { "port": "secretL3" }, 37 | "target": { "node": "N2", "port": "secretG3" } 38 | } 39 | ], 40 | "nodes": [ 41 | { 42 | "id": "N1", 43 | "node": { 44 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b1", 45 | "description": "B1", 46 | "inputs": [ 47 | { "name": "seedN1", "mediatype": ["integer"], "type": "parameter" }, 48 | { "name": "secretB1", "mediatype": ["env_secret"], "type": "env_secret" }, 49 | { "name": "secretB2", "mediatype": ["env_secret"], "type": "env_secret" } 50 | ], 51 | "outputs": [], 52 | "type": "component", 53 | "implementation": { 54 | "type": "brick", 55 | "container": { 56 | "name": "containername_n1_b1", 57 | "image": "alpine:latest", 58 | "command": ["sh", "-c"], 59 | "args": [] 60 | }, 61 | "args": [ 62 | { "source": "echo " }, 63 | { 64 | "source": { "port": "seedN1" }, 65 | "target": { "type": "parameter", "name": "seed" } 66 | }, 67 | { "source": "; echo $secretB1 ; echo $secretB2"} 68 | ] 69 | } 70 | } 71 | }, 72 | { 73 | "id": "N2", 74 | "node": { 75 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c2", 76 | "description": "G2", 77 | "inputs": [ 78 | { "name": "secretG1", "mediatype": ["env_secret"], "type": "env_secret" }, 79 | { "name": "secretG2", "mediatype": ["env_secret"], "type": "env_secret" }, 80 | { "name": "secretG3", "mediatype": ["env_secret"], "type": "env_secret" }, 81 | { "name": "secretG4", "mediatype": ["env_secret"], "type": "env_secret" } 82 | ], 83 | "outputs": [], 84 | "type": "component", 85 | "implementation": { 86 | "type": "graph", 87 | "inputMappings": [ 88 | { 89 | "source": { "port": "secretG1" }, 90 | "target": { "node": "N2G2B2", "port": "secretW1" } 91 | }, 92 | { 93 | "source": { "port": "secretG2" }, 94 | "target": { "node": "N2G2B2", "port": "secretW2" } 95 | }, 96 | { 97 | "source": { "port": "secretG3" }, 98 | "target": { "node": "N2G2B2", "port": "secretW3" } 99 | }, 100 | { 101 | "source": { "port": "secretG4" }, 102 | "target": { "node": "N2G2B2", "port": "secretW4" } 103 | } 104 | ], 105 | "nodes": [ 106 | { 107 | "id": "N2G2B2", 108 | "node": { 109 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b2", 110 | "description": "B2", 111 | "inputs": [ 112 | { "name": "secretW1", "mediatype": ["env_secret"], "type": "env_secret" }, 113 | { "name": "secretW2", "mediatype": ["env_secret"], "type": "env_secret" }, 114 | { "name": "secretW3", "mediatype": ["env_secret"], "type": "env_secret" }, 115 | { "name": "secretW4", "mediatype": ["env_secret"], "type": "env_secret" } 116 | ], 117 | "outputs": [], 118 | "type": "component", 119 | "implementation": { 120 | "type": "brick", 121 | "container": { 122 | "name": "containername", 123 | "image": "alpine:latest", 124 | "command": ["sh", "-c"], 125 | "args": [] 126 | }, 127 | "args": [ 128 | {"source": "echo $secretW1; echo $secretW2; echo $secretW3; echo $secretW4"} 129 | ] 130 | } 131 | } 132 | } 133 | ], 134 | "edges": [] 135 | } 136 | } 137 | } 138 | ], 139 | "edges": [ 140 | ] 141 | } 142 | } 143 | 144 | 145 | -------------------------------------------------------------------------------- /models/examples/single-node-graph-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "An single node graph component", 3 | "inputs": [{ "name": "greeting", "type": "parameter" }], 4 | "type": "component", 5 | "implementation": { 6 | "type": "graph", 7 | "nodes": [ 8 | { 9 | "id": "A", 10 | "node": { 11 | "description": "A brick component", 12 | "type": "component", 13 | "implementation": { 14 | "type": "brick", 15 | "container": { 16 | "name": "containername", 17 | "image": "docker/whalesay", 18 | "command": ["cowsay"], 19 | "args": ["hello world"] 20 | } 21 | } 22 | } 23 | } 24 | ] 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /models/examples/two-node-graph-component-with-cref.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "An single node graph component", 3 | "inputs": [ 4 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" }, 5 | { "name": "sender", "mediatype": ["string"], "type": "parameter" } 6 | ], 7 | "type": "component", 8 | "implementation": { 9 | "type": "graph", 10 | "inputMappings": [ 11 | { 12 | "source": { "port": "greeting" }, 13 | "target": { "node": "greeter", "port": "greeting" } 14 | } 15 | ], 16 | "nodes": [ 17 | { 18 | "id": "greeter-node", 19 | "node": { 20 | "description": "A brick component", 21 | "inputs": [ 22 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" } 23 | ], 24 | "type": "component", 25 | "implementation": { 26 | "type": "brick", 27 | "container": { 28 | "name": "containername", 29 | "image": "docker/whalesay", 30 | "command": ["cowsay"], 31 | "args": ["Hello Test!"] 32 | }, 33 | "args": [ 34 | { 35 | "source": "Hello static text.", 36 | "description": "A static argument" 37 | }, 38 | { 39 | "source": { "port": "GREETING" }, 40 | "target": { "type": "env", "name": "GREET" }, 41 | "description": "A variable stored in env $(GREET) and expanded by k8s upon execution" 42 | } 43 | ] 44 | } 45 | } 46 | }, 47 | { 48 | "id": "responder-node", 49 | "node": "44763f88-7f51-11ec-a8a3-0242ac120002" 50 | } 51 | ] 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /models/examples/two-node-graph-component.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "An single node graph component", 3 | "inputs": [ 4 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" }, 5 | { "name": "sender", "mediatype": ["string"], "type": "parameter" } 6 | ], 7 | "type": "component", 8 | "implementation": { 9 | "type": "graph", 10 | "inputMappings": [ 11 | { 12 | "source": { "port": "greeting" }, 13 | "target": { "node": "greeter", "port": "greeting" } 14 | } 15 | ], 16 | "nodes": [ 17 | { 18 | "id": "greeter-node", 19 | "node": { 20 | "description": "A brick component", 21 | "inputs": [ 22 | { 23 | "name": "greeting", 24 | "mediatype": ["string"], 25 | "type": "parameter", 26 | "userdata": {"position":{"x":3,"y":6,"note":"save test without formating to avoid breaking the tests."}} 27 | } 28 | ], 29 | "type": "component", 30 | "implementation": { 31 | "type": "brick", 32 | "container": { 33 | "name": "containername", 34 | "image": "docker/whalesay", 35 | "command": ["cowsay"], 36 | "args": ["Hello Test!"] 37 | }, 38 | "args": [ 39 | { 40 | "source": "Hello static text.", 41 | "description": "A static argument" 42 | }, 43 | { 44 | "source": { "port": "GREETING" }, 45 | "target": { "type": "env", "name": "GREET" }, 46 | "description": "A variable stored in env $(GREET) and expanded by k8s upon execution" 47 | } 48 | ] 49 | } 50 | } 51 | }, 52 | { 53 | "id": "responder-node", 54 | "node": { 55 | "description": "A brick component", 56 | "inputs": [ 57 | { "name": "sender", "mediatype": ["string"], "type": "parameter" } 58 | ], 59 | "type": "component", 60 | "implementation": { 61 | "type": "brick", 62 | "container": { 63 | "name": "containername", 64 | "image": "docker/whalesay", 65 | "command": ["cowsay"], 66 | "args": ["Hello there! From... "] 67 | }, 68 | "args": [ 69 | { 70 | "source": { "port": "sender" }, 71 | "target": { "name": "from", "type": "parameter" } 72 | } 73 | ] 74 | } 75 | } 76 | } 77 | ] 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /models/job.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | type Value struct { 11 | Value interface{} `json:"value" bson:"value"` 12 | Target string `json:"target" bson:"target"` 13 | } 14 | 15 | func (v *Value) UnmarshalJSON(document []byte) error { 16 | var partialValue struct { 17 | Target string `json:"target"` 18 | Value json.RawMessage `json:"value"` 19 | } 20 | 21 | err := json.Unmarshal(document, &partialValue) 22 | if err != nil { 23 | return errors.Wrapf(err, "cannot unmarshal partial value") 24 | } 25 | v.Target = partialValue.Target 26 | 27 | var arr []string 28 | err = json.Unmarshal(partialValue.Value, &arr) 29 | if err == nil { 30 | v.Value = arr 31 | return nil 32 | } 33 | 34 | var str string 35 | err = json.Unmarshal(partialValue.Value, &str) 36 | if err == nil { 37 | v.Value = str 38 | return nil 39 | } 40 | 41 | return err 42 | } 43 | 44 | type JobEvent wfv1.Workflow 45 | 46 | type Job struct { 47 | Metadata `json:",inline" bson:",inline"` 48 | // the workflow is either a workflow or a reference to one in the database 49 | Type ComponentType `json:"type" bson:"type"` 50 | InputValues []Value `json:"inputValues,omitempty" bson:"inputValues,omitempty"` 51 | Workflow Workflow `json:"workflow" bson:"workflow"` 52 | Events []JobEvent `json:"events,omitempty" bson:"events,omitempty"` 53 | } 54 | 55 | type JobStatus struct { 56 | Uid ComponentReference `json:"uid" bson:"uid"` 57 | Status wfv1.WorkflowPhase `json:"status" bson:"status"` 58 | } 59 | 60 | type JobPostRequest struct { 61 | Job Job `json:"job"` 62 | SubmitOptions JobPostOptions `json:"options"` 63 | } 64 | 65 | type JobPostOptions struct { 66 | Constants []interface{} `json:"constants"` 67 | Tags []string `json:"tags"` 68 | } 69 | -------------------------------------------------------------------------------- /models/spec/any.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "type": { 5 | "type": "string", 6 | "pattern": "^any$" 7 | } 8 | }, 9 | "required": ["type"], 10 | "additionalProperties": false 11 | } 12 | -------------------------------------------------------------------------------- /models/spec/arg.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "source": { 5 | "oneOf": [ 6 | { "type": "string" }, 7 | { 8 | "$ref": "port.schema.json" 9 | } 10 | ] 11 | }, 12 | "target": { 13 | "type": "object", 14 | "properties": { 15 | "type": { "type": "string" }, 16 | "prefix": { 17 | "type": "string", 18 | "description": "Prefix added in front of the value extracted from the argument (e.g. prefix \"--value=\" will result in \"--value={{parameter_value}}\")" 19 | }, 20 | "suffix": { 21 | "type": "string", 22 | "description": "Suffix added at the end of the value extracted from the argument (e.g. prefix \"/file.txt\" will result in \"{{parameter_value}}/file.txt\")" 23 | } 24 | }, 25 | "required": ["type"], 26 | "additionalItems": false 27 | }, 28 | "description": { 29 | "type": "string" 30 | } 31 | }, 32 | "additionalProperties": false, 33 | "required": ["source"] 34 | } 35 | -------------------------------------------------------------------------------- /models/spec/brick.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "type": { 5 | "type": "string", 6 | "pattern": "^brick$" 7 | }, 8 | "container": { 9 | "$ref": "https://raw.githubusercontent.com/kubernetes/kubernetes/v1.21.2/api/openapi-spec/swagger.json#/definitions/io.k8s.api.core.v1.Container" 10 | }, 11 | "args": { 12 | "type": "array", 13 | "description": "An array of arguments that are appended to the k8s container.args above", 14 | "items": { "$ref": "arg.schema.json" } 15 | }, 16 | "results": { 17 | "type": "array", 18 | "description": "An array of results that are mapped to the component output interface", 19 | "items": { "$ref": "res.schema.json" } 20 | } 21 | }, 22 | "additionalProperties": false, 23 | "required": ["type", "container"] 24 | } 25 | -------------------------------------------------------------------------------- /models/spec/component.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "type": { 5 | "type": "string", 6 | "pattern": "^component$" 7 | }, 8 | "inputs": { 9 | "description": "The output interface; the data objects going out.", 10 | "type": "array", 11 | "minItems": 0, 12 | "uniqueItems": true, 13 | "items": { 14 | "$ref": "data.schema.json" 15 | } 16 | }, 17 | "outputs": { 18 | "description": "The input interface; the data objects going in.", 19 | "type": "array", 20 | "minItems": 0, 21 | "uniqueItems": true, 22 | "items": { 23 | "$ref": "data.schema.json" 24 | } 25 | }, 26 | "implementation": { 27 | "oneOf": [ 28 | { 29 | "$ref": "any.schema.json" 30 | }, 31 | { 32 | "$ref": "brick.schema.json" 33 | }, 34 | { 35 | "$ref": "graph.schema.json" 36 | }, 37 | { 38 | "$ref": "map.schema.json" 39 | }, 40 | { 41 | "$ref": "conditional.schema.json" 42 | } 43 | ] 44 | } 45 | }, 46 | "allOf": [{ "$ref": "metadata.schema.json" }], 47 | "unevaluatedProperties": false, 48 | "required": ["type", "implementation"] 49 | } 50 | -------------------------------------------------------------------------------- /models/spec/componentpostrequest.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "component": { 5 | "$ref": "component.schema.json" 6 | }, 7 | "options": { 8 | "description": "Unspecified for future needs", 9 | "type": "object" 10 | } 11 | }, 12 | "unevaluatedProperties": false, 13 | "required": ["component"] 14 | } 15 | -------------------------------------------------------------------------------- /models/spec/conditional.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "type": { 5 | "type": "string", 6 | "pattern": "^conditional$" 7 | }, 8 | "nodeTrue": { 9 | "oneOf": [ 10 | { 11 | "$ref": "cref.schema.json" 12 | }, 13 | { 14 | "$ref": "crefversion.schema.json" 15 | }, 16 | { 17 | "$ref": "component.schema.json" 18 | } 19 | ] 20 | }, 21 | "nodeFalse": { 22 | "oneOf": [ 23 | { 24 | "$ref": "cref.schema.json" 25 | }, 26 | { 27 | "$ref": "crefversion.schema.json" 28 | }, 29 | { 30 | "$ref": "component.schema.json" 31 | } 32 | ] 33 | }, 34 | "expression": { 35 | "$ref": "expression.schema.json" 36 | }, 37 | "inputMappings": { 38 | "description": "The mapping of input ports to individual graph-node ports", 39 | "type": "array", 40 | "items": { 41 | "$ref": "mapping.schema.json" 42 | } 43 | }, 44 | "outputMappings": { 45 | "description": "The mapping of graph node-ports to component interface ports", 46 | "type": "array", 47 | "items": { 48 | "$ref": "mapping.schema.json" 49 | } 50 | } 51 | }, 52 | "required": ["type", "nodeTrue", "expression"], 53 | "additionalProperties": false 54 | } 55 | -------------------------------------------------------------------------------- /models/spec/cref.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "string", 3 | "format": "uuid", 4 | "description": "A FlowifyObjectReference is any object inside the workflow graph that can accept and/or output a dataflow object.", 5 | "example": "44763f88-7f51-11ec-a8a3-0242ac120002" 6 | } 7 | -------------------------------------------------------------------------------- /models/spec/crefversion.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "version": { 5 | "type": "number", 6 | "minimum": 0 7 | }, 8 | "uid": { 9 | "$ref": "cref.schema.json" 10 | } 11 | }, 12 | "additionalProperties": false 13 | } 14 | -------------------------------------------------------------------------------- /models/spec/data.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "name": { 5 | "type": "string", 6 | "pattern": "^[a-zA-Z][-a-zA-Z0-9_]*$" 7 | }, 8 | "mediatype": { 9 | "type": "array", 10 | "items": { 11 | "type": "string" 12 | } 13 | }, 14 | "type": { 15 | "type": "string", 16 | "pattern": "^(parameter|env_secret|artifact|parameter_array|volume)$" 17 | }, 18 | "userdata": { 19 | "type": "object", 20 | "description": "An opaque field for frontend applications, never touched by the backend" 21 | } 22 | }, 23 | "additionalItems": false, 24 | "required": ["name", "type"] 25 | } 26 | -------------------------------------------------------------------------------- /models/spec/dataarray.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "name": { 5 | "type": "string" 6 | }, 7 | "type": { 8 | "type": "string", 9 | "pattern": "^(parameter|artifact)$" 10 | }, 11 | "userdata": { 12 | "type": "object", 13 | "description": "An opaque field for frontend applications, never touched by the backend" 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /models/spec/edge.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "source": { 5 | "$ref": "port.schema.json" 6 | }, 7 | "target": { 8 | "$ref": "port.schema.json" 9 | } 10 | }, 11 | "additionalProperties": false, 12 | "required": ["source", "target"] 13 | } 14 | -------------------------------------------------------------------------------- /models/spec/expression.schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "object", 3 | "properties": { 4 | "left": { 5 | "oneOf": [ 6 | { "type": "string" }, 7 | { 8 | "$ref": "data.schema.json" 9 | } 10 | ] 11 | }, 12 | "right": { 13 | "oneOf": [ 14 | { "type": "string" }, 15 | { 16 | "$ref": "data.schema.json" 17 | } 18 | ] 19 | }, 20 | "operator": { 21 | "type": "string", 22 | "pattern": "^(==|!=|<|>|<=|>=)$" 23 | } 24 | }, 25 | "additionalProperties": false, 26 | "required": ["left", "right", "operator"] 27 | } 28 | -------------------------------------------------------------------------------- /models/spec/flowify.rapidoc.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 | 6 | 7 | 8 |