├── dev
├── example_data
│ └── dump
│ │ ├── config
│ │ ├── external_validation_keys.bson
│ │ ├── tenantMigrationDonors.bson
│ │ ├── tenantMigrationRecipients.bson
│ │ ├── tenantMigrationDonors.metadata.json
│ │ ├── external_validation_keys.metadata.json
│ │ └── tenantMigrationRecipients.metadata.json
│ │ ├── admin
│ │ ├── system.version.bson
│ │ └── system.version.metadata.json
│ │ └── test
│ │ ├── Jobs.bson
│ │ ├── Components.bson
│ │ ├── Workflows.bson
│ │ ├── Jobs.metadata.json
│ │ ├── Components.metadata.json
│ │ └── Workflows.metadata.json
├── Dockerfile.mongo
├── kind.yaml
├── argo-cluster-install
│ ├── kustomization.yaml
│ ├── sandbox-project-a
│ │ ├── namespace.yaml
│ │ ├── kustomization.yaml
│ │ ├── secrets.yaml
│ │ ├── configmaps.yaml
│ │ └── roles.yaml
│ ├── sandbox-project-b
│ │ ├── namespace.yaml
│ │ ├── kustomization.yaml
│ │ ├── secrets.yaml
│ │ └── configmaps.yaml
│ └── base
│ │ ├── namespace.yaml
│ │ ├── secrets.yaml
│ │ ├── kustomization.yaml
│ │ ├── services.yaml
│ │ ├── deployments.yaml
│ │ ├── clusterroles.yaml
│ │ └── configmaps.yaml
├── Makefile
├── Dockerfile.server_builder
├── flowify_server_runner.sh
├── kind_cluster_config_export.sh
├── Dockerfile.cluster
├── docker-compose-e2e.yaml
├── Dockerfile.server
├── cluster_runner.sh
└── docker-compose.yaml
├── .gitattributes
├── models
├── spec
│ ├── mapping.schema.json
│ ├── port.schema.json
│ ├── any.schema.json
│ ├── cref.schema.json
│ ├── crefversion.schema.json
│ ├── edge.schema.json
│ ├── metadatalist.schema.json
│ ├── metadataworkspacelist.schema.json
│ ├── jobpostrequest.schema.json
│ ├── pageinfo.schema.json
│ ├── volumelist.schema.json
│ ├── workflowpostrequest.schema.json
│ ├── componentpostrequest.schema.json
│ ├── flowify.rapidoc.html
│ ├── secret.schema.json
│ ├── dataarray.schema.json
│ ├── workflow.schema.json
│ ├── volume.schema.json
│ ├── jobstatus.schema.json
│ ├── metadataworkspace.schema.json
│ ├── value.schema.json
│ ├── version.schema.json
│ ├── metadata.schema.json
│ ├── expression.schema.json
│ ├── userinfo.schema.json
│ ├── data.schema.json
│ ├── workspace.schema.json
│ ├── flowify.redoc.html
│ ├── job.schema.json
│ ├── node.schema.json
│ ├── brick.schema.json
│ ├── res.schema.json
│ ├── map.schema.json
│ ├── arg.schema.json
│ ├── graph.schema.json
│ ├── flowify.swagger.html
│ ├── component.schema.json
│ └── conditional.schema.json
├── examples
│ ├── minimal-any-component.json
│ ├── minimal-brick-component.json
│ ├── minimal-any-workflow.json
│ ├── hello-world-workflow.json
│ ├── minimal-graph-component.json
│ ├── single-node-graph-component.json
│ ├── minimal-conditional-component.json
│ ├── minimal-map-component.json
│ ├── job-mounts.json
│ ├── brick-parameter-component.json
│ ├── two-node-graph-component-with-cref.json
│ ├── graph-input-volumes.json
│ ├── two-node-graph-component.json
│ ├── job-map-example.json
│ ├── if-statement.json
│ └── multi-level-secrets.json
├── job.go
└── validate.go
├── sandbox
├── list-flowify-workflows.sh
├── reset.sh
├── list-flowify-workflow-versions.sh
├── userinfo.sh
├── list-workspaces.sh
├── list-workflows.sh
├── get-workflow.sh
├── list-workflow-templates.sh
├── get-flowify-workflow.sh
├── get-workflow-template.sh
├── post-flowify-workflow.sh
├── lint-workflow.sh
├── stop.sh
├── submit-flowify-workflow.sh
├── Makefile
├── secrets.yaml
├── start.sh
└── sandbox-config.yaml
├── .vscode
└── launch.json
├── .githooks
└── pre-commit
├── .gitignore
├── e2etest
├── default-roles.yaml
├── workspace_test.go
├── Makefile
├── workspace_cm_test.yaml
├── artifact_test.go
├── test.sh
├── secret_test.go
└── component_test.go
├── rest
├── userinfo.go
├── secrets.go
└── workspaces.go
├── Dockerfile
├── docker-compose-tests.yaml
├── pkg
├── workspace
│ ├── mock.go
│ └── workspace_test.go
└── secret
│ ├── mock.go
│ ├── config.go
│ └── secret_test.go
├── .github
└── workflows
│ ├── e2etest.yaml
│ ├── prod.yaml
│ ├── test.yaml
│ ├── public_image.yml
│ ├── public_image_dev.yml
│ ├── dev_env.yaml
│ └── deploy.yaml
├── config.yml
├── cmd
├── validate
│ └── main.go
├── transpile
│ └── main.go
└── dereference
│ └── main.go
├── transpiler
└── argo.go
├── apiserver
├── apiserver_test.go
└── config.go
├── user
└── user.go
├── storage
├── storage.go
├── references.go
├── local.go
├── parsequery_test.go
└── parsequery.go
├── auth
├── config.go
├── auth.go
└── azure_token.go
├── Makefile
└── main.go
/dev/example_data/dump/config/external_validation_keys.bson:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/dev/example_data/dump/config/tenantMigrationDonors.bson:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.pdf filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/dev/example_data/dump/config/tenantMigrationRecipients.bson:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/dev/Dockerfile.mongo:
--------------------------------------------------------------------------------
1 | FROM mongo:5.0
2 |
3 | COPY ./dev/example_data/ .
--------------------------------------------------------------------------------
/dev/example_data/dump/admin/system.version.bson:
--------------------------------------------------------------------------------
1 | ; _id featureCompatibilityVersion version 5.0
--------------------------------------------------------------------------------
/dev/kind.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | # - role: worker
--------------------------------------------------------------------------------
/models/spec/mapping.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "allOf": [
3 | {
4 | "$ref": "edge.schema.json"
5 | }
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/dev/example_data/dump/test/Jobs.bson:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/equinor/flowify-workflows-server/HEAD/dev/example_data/dump/test/Jobs.bson
--------------------------------------------------------------------------------
/dev/example_data/dump/test/Components.bson:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/equinor/flowify-workflows-server/HEAD/dev/example_data/dump/test/Components.bson
--------------------------------------------------------------------------------
/dev/example_data/dump/test/Workflows.bson:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/equinor/flowify-workflows-server/HEAD/dev/example_data/dump/test/Workflows.bson
--------------------------------------------------------------------------------
/dev/argo-cluster-install/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - base
6 | - sandbox-project-a
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-a/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | labels:
6 | app.kubernetes.io/part-of: "flowify"
7 | name: "sandbox-project-a"
8 | ---
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-b/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | labels:
6 | app.kubernetes.io/part-of: "flowify"
7 | name: "sandbox-project-b"
8 | ---
9 |
--------------------------------------------------------------------------------
/dev/example_data/dump/test/Jobs.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"e244652e3a7b4cddabe930f4d5bc87ff","collectionName":"Jobs","type":"collection"}
--------------------------------------------------------------------------------
/models/examples/minimal-any-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "Smallest possible component uses the any implementation",
3 | "type": "component",
4 | "implementation": {
5 | "type": "any"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/namespace.yaml:
--------------------------------------------------------------------------------
1 | # Argo server, workflow, configMap, secrets namespace
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | labels:
6 | app.kubernetes.io/part-of: "flowify"
7 | name: "argo"
--------------------------------------------------------------------------------
/dev/example_data/dump/test/Components.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"98689d39dbc04881b98cdb43e6696f73","collectionName":"Components","type":"collection"}
--------------------------------------------------------------------------------
/dev/example_data/dump/test/Workflows.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"7b4157a1f2a64ea098a001f68b3fb279","collectionName":"Workflows","type":"collection"}
--------------------------------------------------------------------------------
/sandbox/list-flowify-workflows.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # token not used
4 | # token=$(yq .token secrets.yaml)
5 |
6 | curl -X 'GET' "http://localhost:8842/api/v1/flowify-workflows/" \
7 | -H 'accept: application/json' \
8 | $@
--------------------------------------------------------------------------------
/dev/example_data/dump/admin/system.version.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"}],"uuid":"031d66b3c1dc4832a0d66f16d7ad0bad","collectionName":"system.version","type":"collection"}
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-b/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: sandbox-project-b
5 | resources:
6 | - namespace.yaml
7 | - configmaps.yaml
8 | - secrets.yaml
--------------------------------------------------------------------------------
/models/spec/port.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "port": {
5 | "type": "string"
6 | },
7 | "node": {
8 | "type": "string"
9 | }
10 | },
11 | "required": ["port"]
12 | }
13 |
--------------------------------------------------------------------------------
/sandbox/reset.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #set +x
3 |
4 |
5 | # Start a kubernetes cluster
6 | minikube start
7 |
8 | # Inject the default service account with the corresponding roles
9 | kubectl delete -f sandbox-config.yaml
10 |
11 |
--------------------------------------------------------------------------------
/models/spec/any.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "type": {
5 | "type": "string",
6 | "pattern": "^any$"
7 | }
8 | },
9 | "required": ["type"],
10 | "additionalProperties": false
11 | }
12 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/secrets.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | labels:
6 | app: minio
7 | name: my-minio-cred
8 | stringData:
9 | accesskey: admin
10 | secretkey: password
11 | type: Opaque
12 | ---
13 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-a/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: sandbox-project-a
5 | resources:
6 | - namespace.yaml
7 | - configmaps.yaml
8 | - roles.yaml
9 | - secrets.yaml
--------------------------------------------------------------------------------
/dev/Makefile:
--------------------------------------------------------------------------------
1 | all: docker
2 |
3 | docker:
4 | # build services
5 | docker-compose -f docker-compose.yaml build
6 | # build e2e-test-runner
7 | docker-compose -f docker-compose.yaml -f docker-compose-e2e.yaml build flowify-e2e-runner
8 |
9 | .PHONY: docker
10 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-a/secrets.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | labels:
6 | app: minio
7 | name: my-minio-cred
8 | stringData:
9 | accesskey: admin
10 | secretkey: password
11 | type: Opaque
12 | ---
13 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-b/secrets.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | labels:
6 | app: minio
7 | name: my-minio-cred
8 | stringData:
9 | accesskey: admin
10 | secretkey: password
11 | type: Opaque
12 | ---
13 |
--------------------------------------------------------------------------------
/models/spec/cref.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "string",
3 | "format": "uuid",
4 | "description": "A FlowifyObjectReference is any object inside the workflow graph that can accept and/or output a dataflow object.",
5 | "example": "44763f88-7f51-11ec-a8a3-0242ac120002"
6 | }
7 |
--------------------------------------------------------------------------------
/sandbox/list-flowify-workflow-versions.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # token not used
4 | # token=$(yq .token secrets.yaml)
5 |
6 | fwf=$1
7 | shift
8 |
9 | curl -X 'GET' "http://localhost:8842/api/v1/flowify-workflows/${fwf}/versions/" \
10 | -H 'accept: application/json' \
11 | $@
--------------------------------------------------------------------------------
/models/spec/crefversion.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "version": {
5 | "type": "number",
6 | "minimum": 0
7 | },
8 | "uid": {
9 | "$ref": "cref.schema.json"
10 | }
11 | },
12 | "additionalProperties": false
13 | }
14 |
--------------------------------------------------------------------------------
/models/spec/edge.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "source": {
5 | "$ref": "port.schema.json"
6 | },
7 | "target": {
8 | "$ref": "port.schema.json"
9 | }
10 | },
11 | "additionalProperties": false,
12 | "required": ["source", "target"]
13 | }
14 |
--------------------------------------------------------------------------------
/models/spec/metadatalist.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "items": {
5 | "type": "array",
6 | "items": {
7 | "$ref": "metadata.schema.json"
8 | }
9 | },
10 | "pageInfo": {
11 | "$ref": "pageinfo.schema.json"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.2.0",
3 | "configurations": [
4 | {
5 | "name": "Launch Package",
6 | "type": "go",
7 | "request": "launch",
8 | "mode": "debug",
9 | "program": "${fileDirname}"
10 | }
11 | ]
12 | }
13 |
--------------------------------------------------------------------------------
/sandbox/userinfo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | if [[ -z $SANDBOX_TOKEN ]]
4 | then
5 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
6 | exit 1
7 | fi
8 | curl -X 'GET' "http://localhost:8842/api/v1/userinfo" \
9 | -H 'accept: application/json' \
10 | -H "authorization: bearer $SANDBOX_TOKEN" \
11 | $@
12 |
--------------------------------------------------------------------------------
/sandbox/list-workspaces.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | if [[ -z $SANDBOX_TOKEN ]]
4 | then
5 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
6 | exit 1
7 | fi
8 | curl -X 'GET' "http://localhost:8842/api/v1/workspaces/" \
9 | -H 'accept: application/json' \
10 | -H "authorization: bearer $SANDBOX_TOKEN" \
11 | $@
--------------------------------------------------------------------------------
/models/spec/metadataworkspacelist.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "items": {
5 | "type": "array",
6 | "items": {
7 | "$ref": "metadataworkspace.schema.json"
8 | }
9 | },
10 | "pageInfo": {
11 | "$ref": "pageinfo.schema.json"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/models/spec/jobpostrequest.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "job": {
5 | "$ref": "job.schema.json"
6 | },
7 | "options": {
8 | "description": "Unspecified for future needs",
9 | "type": "object"
10 | }
11 | },
12 | "unevaluatedProperties": false,
13 | "required": ["job"]
14 | }
15 |
--------------------------------------------------------------------------------
/models/examples/minimal-brick-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "A brick component",
3 | "type": "component",
4 | "implementation": {
5 | "type": "brick",
6 | "container": {
7 | "name": "containername",
8 | "image": "docker/whalesay",
9 | "command": ["cowsay"],
10 | "args": ["Hello Test"]
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/models/spec/pageinfo.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "totalNumber": {
5 | "type": "number"
6 | },
7 | "limit": {
8 | "type": "number"
9 | },
10 | "skip": {
11 | "type": "number"
12 | }
13 | },
14 | "required": ["totalNumber", "limit", "skip"],
15 | "additionalItems": false
16 | }
17 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: argo
5 | resources:
6 | - namespace.yaml
7 | - configmaps.yaml
8 | - secrets.yaml
9 | - https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/install.yaml
10 | - clusterroles.yaml
11 | - deployments.yaml
12 | - services.yaml
--------------------------------------------------------------------------------
/models/spec/volumelist.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "items": {
5 | "type": "array",
6 | "items": {
7 | "$ref": "volume.schema.json"
8 | }
9 | },
10 | "pageInfo": {
11 | "$ref": "pageinfo.schema.json"
12 | }
13 | },
14 | "additionalItems": false,
15 | "required": ["items"]
16 | }
17 |
--------------------------------------------------------------------------------
/models/spec/workflowpostrequest.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "workflow": {
5 | "$ref": "workflow.schema.json"
6 | },
7 | "options": {
8 | "description": "Unspecified for future needs",
9 | "type": "object"
10 | }
11 | },
12 | "unevaluatedProperties": false,
13 | "required": ["workflow"]
14 | }
15 |
--------------------------------------------------------------------------------
/models/spec/componentpostrequest.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "component": {
5 | "$ref": "component.schema.json"
6 | },
7 | "options": {
8 | "description": "Unspecified for future needs",
9 | "type": "object"
10 | }
11 | },
12 | "unevaluatedProperties": false,
13 | "required": ["component"]
14 | }
15 |
--------------------------------------------------------------------------------
/dev/example_data/dump/config/tenantMigrationDonors.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"},{"v":{"$numberInt":"2"},"key":{"expireAt":{"$numberInt":"1"}},"name":"TenantMigrationDonorTTLIndex","expireAfterSeconds":{"$numberInt":"0"}}],"uuid":"ad328b28de834e40900968113dea9e2d","collectionName":"tenantMigrationDonors","type":"collection"}
--------------------------------------------------------------------------------
/dev/example_data/dump/config/external_validation_keys.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"},{"v":{"$numberInt":"2"},"key":{"ttlExpiresAt":{"$numberInt":"1"}},"name":"ExternalKeysTTLIndex","expireAfterSeconds":{"$numberInt":"0"}}],"uuid":"7c0a6005e0f44ed5965e0a320522a822","collectionName":"external_validation_keys","type":"collection"}
--------------------------------------------------------------------------------
/dev/example_data/dump/config/tenantMigrationRecipients.metadata.json:
--------------------------------------------------------------------------------
1 | {"indexes":[{"v":{"$numberInt":"2"},"key":{"_id":{"$numberInt":"1"}},"name":"_id_"},{"v":{"$numberInt":"2"},"key":{"expireAt":{"$numberInt":"1"}},"name":"TenantMigrationRecipientTTLIndex","expireAfterSeconds":{"$numberInt":"0"}}],"uuid":"a4ddd395332a4262828dd57ce1116636","collectionName":"tenantMigrationRecipients","type":"collection"}
--------------------------------------------------------------------------------
/models/spec/flowify.rapidoc.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/models/spec/secret.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "key": {
5 | "description": "The key or name of the secret",
6 | "type": "string"
7 | },
8 | "value": {
9 | "description": "The value of the secret",
10 | "type": "string"
11 | }
12 | },
13 | "additionalProperties": false,
14 | "required": ["key", "value"]
15 | }
16 |
--------------------------------------------------------------------------------
/sandbox/list-workflows.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ns=${1:-sandbox-dev}
3 | shift
4 |
5 | echo "Namespace: $ns"
6 |
7 | if [[ -z $SANDBOX_TOKEN ]]
8 | then
9 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
10 | exit 1
11 | fi
12 | curl -X 'GET' "http://localhost:8842/api/v1/workflows/$ns" \
13 | -H 'accept: application/json' \
14 | -H "authorization: bearer $SANDBOX_TOKEN" \
15 | $@
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/services.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | labels:
6 | app: minio
7 | name: minio
8 | spec:
9 | ports:
10 | - name: api
11 | port: 9000
12 | protocol: TCP
13 | targetPort: 9000
14 | - name: dashboard
15 | port: 9001
16 | protocol: TCP
17 | targetPort: 9001
18 | selector:
19 | app: minio
20 | ---
21 |
--------------------------------------------------------------------------------
/sandbox/get-workflow.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | wft=$1
3 | ns=${2:-sandbox-dev}
4 | shift
5 | shift
6 |
7 | if [[ -z $SANDBOX_TOKEN ]]
8 | then
9 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
10 | exit 1
11 | fi
12 |
13 | path="http://localhost:8842/api/v1/workflows/$ns/$wft"
14 |
15 | curl -X 'GET' $path \
16 | -H 'accept: application/json' \
17 | -H "authorization: bearer $SANDBOX_TOKEN" \
18 | $@
--------------------------------------------------------------------------------
/sandbox/list-workflow-templates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ns=${1:-sandbox-dev}
3 | shift
4 |
5 | echo "Namespace: $ns" >&2
6 |
7 | if [[ -z $SANDBOX_TOKEN ]]
8 | then
9 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
10 | exit 1
11 | fi
12 | curl -X 'GET' "http://localhost:8842/api/v1/workflow-templates/$ns" \
13 | -H 'accept: application/json' \
14 | -H "authorization: bearer $SANDBOX_TOKEN" \
15 | $@
--------------------------------------------------------------------------------
/models/examples/minimal-any-workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "miny-wf",
3 | "description": "A minimal workflow with an inline any-component",
4 | "type": "workflow",
5 | "component": {
6 | "description": "Smallest possible component uses the any implementation",
7 | "type": "component",
8 | "implementation": {
9 | "type": "any"
10 | }
11 | },
12 | "workspace": "sandbox-project-a"
13 | }
14 |
--------------------------------------------------------------------------------
/.githooks/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # https://medium.com/@radlinskii/writing-the-pre-commit-git-hook-for-go-files-810f8d5f1c6f
3 |
4 | # Handle missing files
5 | # https://gist.github.com/radlinskii/0ba6ec694b1e590d8457c98a358f335f
6 | STAGED_GO_FILES=$(git diff --cached --name-status --diff-filter d -- '*.go' | awk '{ print $2 }')
7 |
8 | for FILE in $STAGED_GO_FILES
9 | do
10 | go fmt $FILE
11 | git add $FILE
12 | done
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/sandbox/get-flowify-workflow.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set +x
3 |
4 | wf=${1:-hello-world}
5 | shift
6 | ver=${1}
7 | shift
8 |
9 | path="http://localhost:8842/api/v1/flowify-workflows/$wf"
10 |
11 | if [[ -n $ver ]];
12 | then
13 | #append query
14 | path="$path?version=$ver"
15 | fi
16 |
17 | # token not used
18 | # token=$(yq .token secrets.yaml)
19 |
20 | #echo $path
21 |
22 | curl -X 'GET' "$path" \
23 | -H 'accept: application/json' \
24 | $@
--------------------------------------------------------------------------------
/sandbox/get-workflow-template.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | wft=$1
3 | ns=${2:-sandbox-dev}
4 | shift
5 | shift
6 |
7 | if [[ -z $SANDBOX_TOKEN ]]
8 | then
9 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
10 | exit 1
11 | fi
12 | path="http://localhost:8842/api/v1/workflow-templates/$ns/$wft"
13 | echo crul "-X 'GET' $path"
14 | curl -X 'GET' $path \
15 | -H 'accept: application/json' \
16 | -H "authorization: bearer $SANDBOX_TOKEN" \
17 | $@
--------------------------------------------------------------------------------
/sandbox/post-flowify-workflow.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #set -x
3 |
4 | fn=${1:--}
5 | shift
6 |
7 | if [[ -z $SANDBOX_TOKEN ]]
8 | then
9 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
10 | exit 1
11 | fi
12 |
13 | curl -X 'POST' \
14 | "http://localhost:8842/api/v1/flowify-workflows/" \
15 | -H 'accept: application/json' \
16 | -H 'Content-Type: application/json' \
17 | -H "authorization: bearer $SANDBOX_TOKEN" \
18 | -d @$fn \
19 | $@
20 |
--------------------------------------------------------------------------------
/models/spec/dataarray.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "name": {
5 | "type": "string"
6 | },
7 | "type": {
8 | "type": "string",
9 | "pattern": "^(parameter|artifact)$"
10 | },
11 | "userdata": {
12 | "type": "object",
13 | "description": "An opaque field for frontend applications, never touched by the backend"
14 | }
15 | }
16 | }
--------------------------------------------------------------------------------
/models/spec/workflow.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "allOf": [{ "$ref": "metadata.schema.json" }],
4 | "properties": {
5 | "type": {
6 | "type": "string",
7 | "pattern": "^workflow$"
8 | },
9 | "component": {
10 | "$ref": "component.schema.json"
11 | },
12 | "workspace": {
13 | "type": "string"
14 | }
15 | },
16 | "unevaluatedProperties": false,
17 | "required": ["type", "component", "workspace"]
18 | }
19 |
--------------------------------------------------------------------------------
/models/spec/volume.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "uid": {
5 | "$ref": "cref.schema.json"
6 | },
7 | "workspace": {
8 | "type": "string"
9 | },
10 | "volume": {
11 | "$ref": "https://raw.githubusercontent.com/kubernetes/kubernetes/v1.21.2/api/openapi-spec/swagger.json#/definitions/io.k8s.api.core.v1.Volume"
12 | }
13 | },
14 | "additionalProperties": false,
15 | "required": ["volume", "workspace"]
16 | }
17 |
--------------------------------------------------------------------------------
/sandbox/lint-workflow.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #set -x
3 |
4 | ns=${1:-sandbox-project-a}
5 | shift
6 |
7 | fn=${1:--}
8 | shift
9 |
10 | if [[ -z $SANDBOX_TOKEN ]]
11 | then
12 | echo "Export env SANDBOX_TOKEN to an appropriate jwt token"
13 | exit 1
14 | fi
15 |
16 | curl -X 'POST' \
17 | "http://localhost:8842/api/v1/workflow-templates/$ns/lint" \
18 | -H 'accept: application/json' \
19 | -H 'Content-Type: application/json' \
20 | -H "authorization: bearer $SANDBOX_TOKEN" \
21 | -d @$fn \
22 | $@
--------------------------------------------------------------------------------
/models/spec/jobstatus.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "uid": {
5 | "type": "string"
6 | },
7 | "status": {
8 | "type": "string",
9 | "pattern": "^(Pending|Running|Succeeded|Failed|Error)$",
10 | "description": "Job status (Pending/Running/Succeeded/Failed/Error). Completed job indicated by on of the: Succeeded/Failed/Error"
11 | }
12 | },
13 | "unevaluatedProperties": false,
14 | "required": ["uid", "status"]
15 | }
16 |
--------------------------------------------------------------------------------
/models/spec/metadataworkspace.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "name": {
5 | "type": "string"
6 | },
7 | "description": {
8 | "type": "string"
9 | },
10 | "modifiedBy": {
11 | "type": "string"
12 | },
13 | "uid": {
14 | "type": "string"
15 | },
16 | "previous": {
17 | "type": "string"
18 | },
19 | "timestamp": {
20 | "type": "string"
21 | },
22 | "workspace": {
23 | "type": "string"
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/sandbox/stop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #set +x
3 | set -eu -o pipefail
4 |
5 | killall flowify-workflows-server -v || echo 'flowify-workflows-server already stopped'
6 | pushd $GOPATH > /dev/null
7 | # assumes argo workflow-controller is installed in the same tree
8 | controller=$(find . -wholename "*/dist/workflow-controller" | xargs realpath)
9 | popd > /dev/null
10 |
11 |
12 | # Launch the Argo controller
13 | # Required env: client cert, client key, kub server host and port envs
14 | killall ${controller##*/} -v || echo "${controller##*/} already stopped"
15 |
--------------------------------------------------------------------------------
/models/spec/value.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "value": {
5 | "oneOf": [
6 | {
7 | "type": "string"
8 | },
9 | {
10 | "type": "array",
11 | "minItems": 1,
12 | "uniqueItems": false,
13 | "items": {
14 | "type": "string"
15 | }
16 | }
17 | ]
18 | },
19 | "target": {
20 | "type": "string"
21 | }
22 | },
23 | "required": ["value", "target"]
24 | }
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 |
3 | ### Go ###
4 | # Binaries for programs and plugins
5 | *.exe
6 | *.exe~
7 | *.dll
8 | *.so
9 | *.dylib
10 |
11 | # Test binary, built with `go test -c`
12 | *.test
13 |
14 | # Output of the go coverage tool, specifically when used with LiteIDE
15 | *.out
16 |
17 | # Dependency directories (remove the comment below to include it)
18 | # vendor/
19 |
20 | ### Go Patch ###
21 | /vendor/
22 | /Godeps/
23 | build/
24 |
25 | ### Project files from codegen
26 | *.pb.go
27 | *.pb.gw.go
28 | *.swagger.json
29 |
30 |
31 | ### Jupyter ###
32 | .ipynb_checkpoints
--------------------------------------------------------------------------------
/models/spec/version.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "current": {
5 | "type": "number",
6 | "minimum": 0
7 | },
8 | "tags": {
9 | "type": "array",
10 | "items": {
11 | "type": "string"
12 | }
13 | },
14 | "previous": {
15 | "type": "object",
16 | "properties": {
17 | "version": {
18 | "type": "number",
19 | "minimum": 0
20 | },
21 | "uid": {
22 | "$ref": "cref.schema.json"
23 | }
24 | }
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/models/examples/hello-world-workflow.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hello-world-workflow",
3 | "description": "A hello world workflow (with an inline component)",
4 | "type": "workflow",
5 | "component": {
6 | "description": "A hello world component",
7 | "inputs": [],
8 | "outputs": [],
9 | "type": "component",
10 | "implementation": {
11 | "type": "brick",
12 | "container": {
13 | "name": "saycontainer",
14 | "image": "docker/whalesay",
15 | "command": ["cowsay"],
16 | "args": ["hello world"]
17 | }
18 | }
19 | },
20 | "workspace": "test"
21 | }
22 |
--------------------------------------------------------------------------------
/sandbox/submit-flowify-workflow.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ns=$1
4 | shift
5 | wf=$1
6 | shift
7 |
8 | ver=${1}
9 | shift
10 |
11 | if [[ -z "$ver" ]];
12 | then
13 | data='{
14 | "resourceKind": "WorkflowTemplate",
15 | "ResourceName": '\"$wf\"'
16 | }'
17 | else
18 | data='{
19 | "resourceKind": "WorkflowTemplate",
20 | "ResourceName": '\"$wf\"',
21 | "version": '\"$ver\"'
22 | }'
23 | fi
24 |
25 | path="http://localhost:8842/api/v1/workflows/$ns/submit"
26 | echo $path >&2
27 | echo $data >&2
28 |
29 | curl -X 'POST' \
30 | "$path" \
31 | -H "authorization: bearer $SANDBOX_TOKEN" \
32 | -d "$data" \
33 | $@
--------------------------------------------------------------------------------
/models/spec/metadata.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "name": {
5 | "type": "string"
6 | },
7 | "description": {
8 | "type": "string"
9 | },
10 | "modifiedBy": {
11 | "type": "object",
12 | "properties": {
13 | "oid": {
14 | "type": "string"
15 | },
16 | "email": {
17 | "type": "string"
18 | }
19 | }
20 | },
21 | "uid": {
22 | "type": "string"
23 | },
24 | "version": {
25 | "$ref": "version.schema.json"
26 | },
27 | "timestamp": {
28 | "type": "string"
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/models/spec/expression.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "left": {
5 | "oneOf": [
6 | { "type": "string" },
7 | {
8 | "$ref": "data.schema.json"
9 | }
10 | ]
11 | },
12 | "right": {
13 | "oneOf": [
14 | { "type": "string" },
15 | {
16 | "$ref": "data.schema.json"
17 | }
18 | ]
19 | },
20 | "operator": {
21 | "type": "string",
22 | "pattern": "^(==|!=|<|>|<=|>=)$"
23 | }
24 | },
25 | "additionalProperties": false,
26 | "required": ["left", "right", "operator"]
27 | }
28 |
--------------------------------------------------------------------------------
/models/spec/userinfo.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "email": {
5 | "description": "The users email address",
6 | "type": "string",
7 | "format": "email"
8 | },
9 | "name": {
10 | "description": "The name of the user.",
11 | "type": "string"
12 | },
13 | "roles": {
14 | "description": "The roles the user is assigned.",
15 | "type": "array",
16 | "minItems": 0,
17 | "uniqueItems": true,
18 | "items": {
19 | "schema": { "type": "string" }
20 | }
21 | }
22 | },
23 | "additionalProperties": false,
24 | "required": ["email", "name", "roles"]
25 | }
26 |
--------------------------------------------------------------------------------
/models/spec/data.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "name": {
5 | "type": "string",
6 | "pattern": "^[a-zA-Z][-a-zA-Z0-9_]*$"
7 | },
8 | "mediatype": {
9 | "type": "array",
10 | "items": {
11 | "type": "string"
12 | }
13 | },
14 | "type": {
15 | "type": "string",
16 | "pattern": "^(parameter|env_secret|artifact|parameter_array|volume)$"
17 | },
18 | "userdata": {
19 | "type": "object",
20 | "description": "An opaque field for frontend applications, never touched by the backend"
21 | }
22 | },
23 | "additionalItems": false,
24 | "required": ["name", "type"]
25 | }
26 |
--------------------------------------------------------------------------------
/models/examples/minimal-graph-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "An single node graph component",
3 | "type": "component",
4 | "implementation": {
5 | "type": "graph",
6 | "nodes": [
7 | {
8 | "id": "gaia",
9 | "node": {
10 | "description": "A brick component",
11 | "type": "component",
12 | "implementation": {
13 | "type": "brick",
14 | "container": {
15 | "name": "containername",
16 | "image": "docker/whalesay",
17 | "command": ["cowsay"],
18 | "args": ["Hello Test"]
19 | }
20 | }
21 | }
22 | }
23 | ]
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/models/spec/workspace.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "name": {
5 | "description": "The name of the workspace",
6 | "type": "string"
7 | },
8 | "description": {
9 | "description": "The description of the workspace",
10 | "type": "string"
11 | },
12 | "roles": {
13 | "description": "The access roles user has for the workspace (user or admin).",
14 | "type": "array",
15 | "minItems": 0,
16 | "uniqueItems": true,
17 | "items": {
18 | "type": "string",
19 | "pattern": "^(user|admin)$"
20 | }
21 | }
22 | },
23 | "additionalProperties": false,
24 | "required": ["name", "description", "roles"]
25 | }
26 |
--------------------------------------------------------------------------------
/e2etest/default-roles.yaml:
--------------------------------------------------------------------------------
1 | # Deploy the access permissions for the `default` service account
2 |
3 | ---
4 |
5 | apiVersion: rbac.authorization.k8s.io/v1
6 | kind: RoleBinding
7 | metadata:
8 | name: default-rb
9 | namespace: test
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: Role
13 | name: default-role
14 | subjects:
15 | - kind: ServiceAccount
16 | name: default
17 | namespace: test
18 |
19 | ---
20 |
21 | apiVersion: rbac.authorization.k8s.io/v1
22 | kind: Role
23 | metadata:
24 | name: default-role
25 | namespace: test
26 | rules:
27 | - apiGroups: ["argoproj.io"]
28 | resources: ["workflows"]
29 | verbs: ["get", "create"]
30 | - apiGroups: [""]
31 | resources: ["pods"]
32 | verbs: ["create"]
33 |
--------------------------------------------------------------------------------
/rest/userinfo.go:
--------------------------------------------------------------------------------
1 | package rest
2 |
3 | import (
4 | "net/http"
5 |
6 | "github.com/equinor/flowify-workflows-server/user"
7 | "github.com/gorilla/mux"
8 | )
9 |
10 | func RegisterUserInfoRoutes(r *mux.Route) {
11 | s := r.Subrouter()
12 |
13 | const intype = "application/json"
14 | const outtype = "application/json"
15 |
16 | s.Use(CheckContentHeaderMiddleware(intype))
17 | s.Use(CheckAcceptRequestHeaderMiddleware(outtype))
18 | s.Use(SetContentTypeMiddleware(outtype))
19 |
20 | s.HandleFunc("/userinfo/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
21 | ctx := r.Context()
22 | id := user.GetUser(ctx)
23 |
24 | WriteResponse(w, http.StatusOK, nil, id, "userinfo")
25 | })).Methods(http.MethodGet)
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/dev/Dockerfile.server_builder:
--------------------------------------------------------------------------------
1 | FROM golang:1.18-alpine as base
2 | LABEL description="Flowify dev environment builder"
3 | RUN apk add --no-cache \
4 | bash \
5 | binutils \
6 | curl \
7 | docker \
8 | gcc \
9 | git \
10 | jq \
11 | make \
12 | musl-dev \
13 | openssl \
14 | shadow
15 |
16 | FROM base as buildbase
17 | WORKDIR /root
18 | RUN curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.15.0/kind-linux-amd64 && \
19 | chmod +x ./kind && \
20 | mv ./kind /usr/local/bin/kind
21 | RUN mkdir -p $GOPATH/src/github.com/equinor/
22 | WORKDIR $GOPATH/src/github.com/equinor/flowify-workflows-server
23 | COPY dev/flowify_server_runner.sh .
24 | RUN chmod +x ./flowify_server_runner.sh
25 |
--------------------------------------------------------------------------------
/models/spec/flowify.redoc.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Redoc
5 |
6 |
7 |
8 |
9 |
10 |
13 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/models/examples/single-node-graph-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "An single node graph component",
3 | "inputs": [{ "name": "greeting", "type": "parameter" }],
4 | "type": "component",
5 | "implementation": {
6 | "type": "graph",
7 | "nodes": [
8 | {
9 | "id": "A",
10 | "node": {
11 | "description": "A brick component",
12 | "type": "component",
13 | "implementation": {
14 | "type": "brick",
15 | "container": {
16 | "name": "containername",
17 | "image": "docker/whalesay",
18 | "command": ["cowsay"],
19 | "args": ["hello world"]
20 | }
21 | }
22 | }
23 | }
24 | ]
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/e2etest/workspace_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "net/http"
5 |
6 | "github.com/equinor/flowify-workflows-server/pkg/workspace"
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func (s *e2eTestSuite) Test_Workspaces() {
11 | requestor := make_authenticated_requestor(s.client, mockUser)
12 |
13 | resp, err := requestor(server_addr+"/api/v1/workspaces/", http.MethodGet, "")
14 | require.NoError(s.T(), err, BodyStringer{resp.Body})
15 | require.Equal(s.T(), http.StatusOK, resp.StatusCode, BodyStringer{resp.Body})
16 |
17 | type WorkspaceList struct {
18 | Items []workspace.WorkspaceGetRequest `json:"items"`
19 | }
20 | var list WorkspaceList
21 | err = marshalResponse(ResponseBodyBytes(resp), &list)
22 |
23 | s.NoError(err)
24 | s.NotEmpty(list.Items)
25 | }
26 |
--------------------------------------------------------------------------------
/e2etest/Makefile:
--------------------------------------------------------------------------------
1 | all: e2etest
2 |
3 | # Make sure we inject a sha into the test binaries, if available
4 | ifndef flowify_git_sha
5 | flowify_git_sha=$(shell git rev-parse --short HEAD)
6 | $(info Set flowify_git_sha=$(flowify_git_sha) from git rev-parse /e2etest)
7 | else
8 | $(info Set flowify_git_sha=$(flowify_git_sha) from arg /e2etest)
9 | endif
10 |
11 | TEST_OUTPUT_DIR = ../testoutputs
12 |
13 | e2etest:
14 | mkdir -p $(TEST_OUTPUT_DIR)
15 | (go test -v . -ldflags "-X 'github.com/equinor/flowify-workflows-server/apiserver.CommitSHA=$(flowify_git_sha)' -X 'github.com/equinor/flowify-workflows-server/apiserver.BuildTime=$(shell date -Is)'" | tee $(TEST_OUTPUT_DIR)/e2erun.log) || true
16 | cat $(TEST_OUTPUT_DIR)/e2erun.log | go-junit-report > $(TEST_OUTPUT_DIR)/e2ereport.xml
17 |
18 | .PHONY: e2etest
--------------------------------------------------------------------------------
/models/spec/job.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "allOf": [{ "$ref": "metadata.schema.json" }],
4 | "properties": {
5 | "type": {
6 | "type": "string",
7 | "pattern": "^job$"
8 | },
9 | "inputValues": {
10 | "description": "The list of values for workflow inputs.",
11 | "type": "array",
12 | "minItems": 0,
13 | "uniqueItems": true,
14 | "items": {
15 | "$ref": "value.schema.json"
16 | }
17 | },
18 | "workflow": {
19 | "$ref": "workflow.schema.json"
20 | },
21 | "events": {
22 | "description": "The list of events registered while job was run.",
23 | "type": "array",
24 | "minItems": 0
25 | }
26 | },
27 | "unevaluatedProperties": false,
28 | "required": ["type", "workflow"]
29 | }
30 |
--------------------------------------------------------------------------------
/models/spec/node.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "id": {
5 | "type": "string",
6 | "description": "A locally unique identifier for nodes, required to start with a letter",
7 | "pattern": "^[a-zA-Z][-a-zA-Z0-9]*$"
8 | },
9 | "node": {
10 | "oneOf": [
11 | {
12 | "$ref": "cref.schema.json"
13 | },
14 | {
15 | "$ref": "crefversion.schema.json"
16 | },
17 | {
18 | "$ref": "component.schema.json"
19 | }
20 | ]
21 | },
22 | "userdata": {
23 | "type": "object",
24 | "description": "An opaque field for frontend applications, never touched by the backend"
25 | }
26 | },
27 | "additionalItems": false,
28 | "required": ["id", "node"]
29 | }
30 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.18-alpine as base
2 | LABEL description="Flowify build test environment"
3 | LABEL org.opencontainers.image.source = "https://github.com/equinor/flowify-workflows-server"
4 |
5 | RUN apk add git make binutils gcc musl-dev
6 |
7 | FROM base as builder
8 | RUN mkdir -p $GOPATH/src/github.com/equinor/
9 | WORKDIR $GOPATH/src/github.com/equinor/flowify-workflows-server
10 | # We should tighten this up
11 | COPY . .
12 |
13 | ARG FLOWIFY_GIT_SHA
14 | RUN make strip=1 flowify_git_sha=${FLOWIFY_GIT_SHA}
15 |
16 | FROM builder as tester
17 | RUN go install github.com/jstemmer/go-junit-report@v0.9.1
18 | RUN go install github.com/jandelgado/gcov2lcov@v1.0.5
19 | #RUN apk add nodejs
20 |
21 | COPY --from=builder /go/src/github.com/equinor/flowify-workflows-server/build ./
22 | CMD ["./flowify-workflows-server"]
23 |
--------------------------------------------------------------------------------
/docker-compose-tests.yaml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 | services:
3 | mongo:
4 | # one node mongoDB replica set for local development
5 | container_name: mongodb
6 | image: mongo:5
7 | healthcheck:
8 | test: test $$(echo "rs.initiate().ok || rs.status().ok" | mongo --quiet) -eq 1
9 | interval: 10s
10 | command: ["--replSet", "rs0", "--bind_ip_all"]
11 | app:
12 | build:
13 | context: .
14 | target: tester
15 | args:
16 | - FLOWIFY_GIT_SHA=${FLOWIFY_GIT_SHA}
17 | environment:
18 | FLOWIFY_DB_CONFIG_ADDRESS: mongo
19 | FLOWIFY_DB_CONFIG_PORT: 27017
20 | depends_on:
21 | - mongo
22 | volumes:
23 | - ./testoutputs:/go/src/github.com/equinor/flowify-workflows-server/testoutputs
24 | command: make UNITTEST_COVERAGE=1 unittest flowify_git_sha=${FLOWIFY_GIT_SHA}
25 |
--------------------------------------------------------------------------------
/models/examples/minimal-conditional-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "If/else component",
3 | "type": "component",
4 | "implementation": {
5 | "type": "conditional",
6 | "nodeTrue": {
7 | "description": "",
8 | "type": "component",
9 | "implementation": {
10 | "type": "brick",
11 | "container": {
12 | "name": "containername",
13 | "image": "alpine:latest",
14 | "command": ["sh"]
15 | }
16 | }
17 | },
18 | "nodeFalse": {
19 | "version": 2,
20 | "uid": "44763f88-7f51-11ec-a8a3-0242ac120002"
21 | },
22 | "expression": {
23 | "left": {
24 | "name": "valFromParam",
25 | "mediatype": ["number"],
26 | "type": "parameter"
27 | },
28 | "operator": ">=",
29 | "right": "5"
30 | }
31 | }
32 | }
--------------------------------------------------------------------------------
/models/spec/brick.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "type": {
5 | "type": "string",
6 | "pattern": "^brick$"
7 | },
8 | "container": {
9 | "$ref": "https://raw.githubusercontent.com/kubernetes/kubernetes/v1.21.2/api/openapi-spec/swagger.json#/definitions/io.k8s.api.core.v1.Container"
10 | },
11 | "args": {
12 | "type": "array",
13 | "description": "An array of arguments that are appended to the k8s container.args above",
14 | "items": { "$ref": "arg.schema.json" }
15 | },
16 | "results": {
17 | "type": "array",
18 | "description": "An array of results that are mapped to the component output interface",
19 | "items": { "$ref": "res.schema.json" }
20 | }
21 | },
22 | "additionalProperties": false,
23 | "required": ["type", "container"]
24 | }
25 |
--------------------------------------------------------------------------------
/models/spec/res.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "source": {
5 | "oneOf": [
6 | { "type": "string" },
7 | {
8 | "type": "object",
9 | "properties": {
10 | "file": { "type": "string" }
11 | },
12 | "required": ["file"],
13 | "additionalProperties": false
14 | },
15 | {
16 | "type": "object",
17 | "properties": {
18 | "volume": { "type": "string" }
19 | },
20 | "required": ["volume"],
21 | "additionalProperties": false
22 | }
23 | ]
24 | },
25 | "target": {
26 | "$ref": "port.schema.json"
27 | },
28 | "description": {
29 | "type": "string"
30 | }
31 | },
32 | "additionalProperties": false,
33 | "required": ["source", "target"]
34 | }
35 |
--------------------------------------------------------------------------------
/e2etest/workspace_cm_test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | labels:
7 | app.kubernetes.io/component: workspace-config
8 | app.kubernetes.io/part-of: flowify
9 | name: test-no-access
10 | namespace: test
11 | data:
12 | roles: "[\"role-x\", \"role-y\", \"role-z\"]"
13 | projectName: test-no-access
14 | description: >
15 | The e2e injected token should not be able to read/write to this workspace
16 |
17 | ---
18 |
19 | apiVersion: v1
20 | kind: ConfigMap
21 | metadata:
22 | labels:
23 | app.kubernetes.io/component: workspace-config
24 | app.kubernetes.io/part-of: flowify
25 | name: test
26 | namespace: test
27 | data:
28 | roles: "[[\"role-x\"], [\"role-y\"]]"
29 | projectName: test
30 | description: bla
31 | hideForUnauthorized: "false"
32 | serviceAccountName: default
33 |
--------------------------------------------------------------------------------
/models/examples/minimal-map-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "An map component",
3 | "version": {
4 | "current": 5,
5 | "tags": ["tag1", "tag2"],
6 | "previous": {
7 | "version": 10,
8 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0"
9 | }
10 | },
11 | "type": "component",
12 | "implementation": {
13 | "type": "map",
14 | "node": {
15 | "description": "A brick component",
16 | "version": {
17 | "current": 1,
18 | "tags": ["tag3", "tag2"],
19 | "previous": {
20 | "version": 0
21 | }
22 | },
23 | "type": "component",
24 | "implementation": {
25 | "type": "brick",
26 | "container": {
27 | "name": "containername",
28 | "image": "docker/whalesay",
29 | "command": ["cowsay"],
30 | "args": ["Hello Test"]
31 | }
32 | }
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-a/configmaps.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | annotations:
6 | workflows.argoproj.io/default-artifact-repository: default-v1
7 | name: artifact-repositories
8 | data:
9 | default-v1: |
10 | archiveLogs: true
11 | s3:
12 | bucket: my-bucket
13 | endpoint: minio.argo.svc.cluster.local:9000
14 | insecure: true
15 | accessKeySecret:
16 | name: my-minio-cred
17 | key: accesskey
18 | secretKeySecret:
19 | name: my-minio-cred
20 | key: secretkey
21 | empty: ""
22 | my-key: |
23 | archiveLogs: true
24 | s3:
25 | bucket: my-bucket
26 | endpoint: minio.argo.svc.cluster.local:9000
27 | insecure: true
28 | accessKeySecret:
29 | name: my-minio-cred
30 | key: accesskey
31 | secretKeySecret:
32 | name: my-minio-cred
33 | key: secretkey
34 | ---
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-b/configmaps.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | annotations:
6 | workflows.argoproj.io/default-artifact-repository: default-v1
7 | name: artifact-repositories
8 | data:
9 | default-v1: |
10 | archiveLogs: true
11 | s3:
12 | bucket: my-bucket
13 | endpoint: minio.argo.svc.cluster.local:9000
14 | insecure: true
15 | accessKeySecret:
16 | name: my-minio-cred
17 | key: accesskey
18 | secretKeySecret:
19 | name: my-minio-cred
20 | key: secretkey
21 | empty: ""
22 | my-key: |
23 | archiveLogs: true
24 | s3:
25 | bucket: my-bucket
26 | endpoint: minio.argo.svc.cluster.local:9000
27 | insecure: true
28 | accessKeySecret:
29 | name: my-minio-cred
30 | key: accesskey
31 | secretKeySecret:
32 | name: my-minio-cred
33 | key: secretkey
34 | ---
--------------------------------------------------------------------------------
/dev/flowify_server_runner.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | RED='\033[0;31m'
4 | GREEN='\033[0;32m'
5 | BLUE='\033[0;34m'
6 | PURPLE='\033[0;35m'
7 | CYAN='\033[0;36m'
8 | WHITE='\033[0;37m'
9 | NOCOLOR='\033[0m' # No Color
10 |
11 | bash kind_cluster_config_export.sh
12 | cluster_exists=$?
13 |
14 | if [ "$cluster_exist" -neq 0 ]
15 | then
16 | echo -e ${RED}
17 | echo =====================================================================
18 | echo Cluster does not exist, cannot continue
19 | echo =====================================================================
20 | echo -e ${NOCOLOR}
21 | exit $cluster_exists
22 | fi
23 |
24 | echo -e ${BLUE}
25 | echo =====================================================================
26 | echo Deploying flowify server
27 | echo =====================================================================
28 | echo -e ${NOCOLOR}
29 |
30 | bash -c '$GOPATH/src/github.com/equinor/flowify-workflows-server/build/flowify-workflows-server'
31 |
--------------------------------------------------------------------------------
/models/examples/job-mounts.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "Job example with volume mounts",
3 | "type": "job",
4 | "inputValues": [
5 | {
6 | "value": "{\"name\":\"workdir\",\"persistentVolumeClaim\":{\"claimName\":\"my-existing-volume\"}}",
7 | "target": "mount-a"
8 | }
9 | ],
10 | "workflow": {
11 | "name": "hello-mount-example",
12 | "description": "Test workflow with a mounted volume",
13 | "type": "workflow",
14 | "workspace": "argo",
15 | "component": {
16 | "description": "My cool component, that can read from a mount.",
17 | "inputs": [{ "name": "mount-a", "type": "volume" }],
18 | "outputs": [],
19 | "type": "component",
20 | "implementation": {
21 | "type": "brick",
22 | "container": {
23 | "name": "containername",
24 | "image": "alpine:latest",
25 | "command": ["sh", "-c", "ls /volumes/mount-a"]
26 | }
27 | }
28 | }
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/pkg/workspace/mock.go:
--------------------------------------------------------------------------------
1 | package workspace
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/stretchr/testify/mock"
7 | )
8 |
9 | type WorkspaceClientMock struct {
10 | mock.Mock
11 | }
12 |
13 | func NewDefaultWorkspaceClientMock() *WorkspaceClientMock {
14 | obj := &WorkspaceClientMock{}
15 | obj.On("ListWorkspaces", mock.Anything, mock.Anything).Return(nil, nil)
16 |
17 | return obj
18 | }
19 |
20 | func (m *WorkspaceClientMock) ListWorkspaces(ctx context.Context, userTokens []string) ([]Workspace, error) {
21 | args := m.Called(ctx, userTokens)
22 | return args.Get(0).([]Workspace), args.Error(1)
23 | }
24 |
25 | func (m *WorkspaceClientMock) HasAccessToWorkspace(ctx context.Context, workspaceName string, userTokens []string) (bool, error) {
26 | args := m.Called(ctx, workspaceName, userTokens)
27 | return args.Bool(0), args.Error(1)
28 | }
29 |
30 | func (m *WorkspaceClientMock) GetNamespace() string {
31 | args := m.Called()
32 | return args.String(0)
33 | }
34 |
--------------------------------------------------------------------------------
/.github/workflows/e2etest.yaml:
--------------------------------------------------------------------------------
1 | name: End-to-end test
2 | on:
3 | workflow_dispatch: {}
4 | push:
5 | paths-ignore:
6 | - ".github/**"
7 | - "dev/**"
8 | jobs:
9 | tests:
10 | name: Run end-to-end tests
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v3
15 | - name: Run try to build e2e-container
16 | run: make docker_e2e_build flowify_git_sha=${{ github.sha }}
17 | - name: Run tests and generate report
18 | run: make docker_e2e_test flowify_git_sha=${{ github.sha }}
19 | - name: Archive code test results
20 | uses: actions/upload-artifact@v2
21 | if: always()
22 | with:
23 | name: report
24 | path: testoutputs/e2ereport.xml
25 | - name: Publish end-to-end test results
26 | uses: EnricoMi/publish-unit-test-result-action@v2
27 | if: always()
28 | with:
29 | files: testoutputs/e2ereport.xml
30 | check_name: End-to-end test results
31 |
32 |
--------------------------------------------------------------------------------
/models/spec/map.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "type": {
5 | "type": "string",
6 | "pattern": "^map$"
7 | },
8 | "node": {
9 | "oneOf": [
10 | {
11 | "$ref": "cref.schema.json"
12 | },
13 | {
14 | "$ref": "crefversion.schema.json"
15 | },
16 | {
17 | "$ref": "component.schema.json"
18 | }
19 | ]
20 | },
21 | "inputMappings": {
22 | "description": "The mapping of input ports to individual graph-node ports",
23 | "type": "array",
24 | "items": {
25 | "$ref": "mapping.schema.json"
26 | }
27 | },
28 | "outputMappings": {
29 | "description": "The mapping of graph node-ports to component interface ports",
30 | "type": "array",
31 | "items": {
32 | "$ref": "mapping.schema.json"
33 | }
34 | }
35 | },
36 | "required": ["type"],
37 | "additionalProperties": false
38 | }
39 |
--------------------------------------------------------------------------------
/.github/workflows/prod.yaml:
--------------------------------------------------------------------------------
1 | name: Prod
2 | on:
3 | workflow_dispatch: {}
4 |
5 | jobs:
6 | deploy:
7 | name: Update deployment
8 | runs-on: ubuntu-latest
9 | env:
10 | EMAIL: ${{ github.event.head_commit.author.email }}
11 | NAME: ${{ github.event.head_commit.author.name }}
12 | steps:
13 | - name: Checkout infra
14 | uses: actions/checkout@v2
15 | with:
16 | ref: main
17 | repository: equinor/flowify-infrastructure
18 | ssh-key: ${{ secrets.FLOWIFY_INFRA_DEPLOY_KEY }}
19 | - name: Update infra
20 | run: |
21 | SHA_SHORT=$(echo ${{ github.sha }} | cut -c1-8)
22 | SHA_LONG=${{ github.sha }}
23 | git config --global user.email "${EMAIL}"
24 | git config --global user.name "GitHub Actions (${NAME})"
25 | sed -i "s/imageTag:.*/imageTag: $SHA_LONG/g" kube/server/values-prod.yaml
26 | git add kube/server/values-prod.yaml
27 | git commit --message "GHA: Update production imageTag" || true
28 | git push
29 |
--------------------------------------------------------------------------------
/models/spec/arg.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "source": {
5 | "oneOf": [
6 | { "type": "string" },
7 | {
8 | "$ref": "port.schema.json"
9 | }
10 | ]
11 | },
12 | "target": {
13 | "type": "object",
14 | "properties": {
15 | "type": { "type": "string" },
16 | "prefix": {
17 | "type": "string",
18 | "description": "Prefix added in front of the value extracted from the argument (e.g. prefix \"--value=\" will result in \"--value={{parameter_value}}\")"
19 | },
20 | "suffix": {
21 | "type": "string",
22 | "description": "Suffix added at the end of the value extracted from the argument (e.g. prefix \"/file.txt\" will result in \"{{parameter_value}}/file.txt\")"
23 | }
24 | },
25 | "required": ["type"],
26 | "additionalItems": false
27 | },
28 | "description": {
29 | "type": "string"
30 | }
31 | },
32 | "additionalProperties": false,
33 | "required": ["source"]
34 | }
35 |
--------------------------------------------------------------------------------
/config.yml:
--------------------------------------------------------------------------------
1 | db:
2 | # select which db to use
3 | select: mongo
4 | # the flowify document database
5 | dbname: test
6 | # mongo:
7 | config:
8 | # Mongo fields
9 | # (FLOWIFY_)DB_CONFIG_ADDRESS=...
10 | # url to database
11 | address: localhost
12 | # port where mongo is listening
13 | port: 27017
14 |
15 | # Cosmos fields
16 | # export (FLOWIFY_)DB_CONFIG_CREDENTIALS=...
17 | credentials: SET_FROM_ENV
18 |
19 | kubernetes:
20 | # how to locate the kubernetes server
21 | kubeconfigpath: SET_FROM_ENV
22 | # the namespace containing the flowify configuration and setup
23 | namespace: flowify
24 |
25 | auth:
26 | handler: azure-oauth2-openid-token
27 | config:
28 | issuer: sandbox
29 | audience: flowify
30 | # keysurl: http://localhost:32023/jwkeys/
31 | keysurl: SET_FROM_ENV
32 |
33 | #auth:
34 | # handler: disabled-auth
35 | # config:
36 | # uid: "0"
37 | # name: Auth Disabled
38 | # email: auth@disabled.com
39 | # roles:
40 | # - tester
41 | # - dummy
42 |
43 | logging:
44 | loglevel: info
45 |
46 | server:
47 | port: 8842
48 |
49 |
--------------------------------------------------------------------------------
/dev/kind_cluster_config_export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | RED='\033[0;31m'
4 | GREEN='\033[0;32m'
5 | BLUE='\033[0;34m'
6 | PURPLE='\033[0;35m'
7 | CYAN='\033[0;36m'
8 | WHITE='\033[0;37m'
9 | NOCOLOR='\033[0m' # No Color
10 |
11 | bash -c 'kind export --name cluster kubeconfig 2>/dev/null'
12 | cluster_exist=$?
13 |
14 | if [ "$cluster_exist" -eq 0 ]
15 | then
16 | echo -e ${GREEN}
17 | echo =====================================================================
18 | echo Kind cluster exist, getting kubeconfig from cluster
19 | echo Modifying Kubernetes config to point to Kind master node
20 | echo =====================================================================
21 | echo -e ${NOCOLOR}
22 | sed -i "s/^ server:.*/ server: https:\/\/$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/" $HOME/.kube/config
23 | else
24 | echo -e ${RED}
25 | echo =====================================================================
26 | echo Kind cluster doesn\'t exist, server cannot be run
27 | echo =====================================================================
28 | echo -e ${NOCOLOR}
29 | exit -1
30 | fi
31 |
--------------------------------------------------------------------------------
/models/spec/graph.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "type": {
5 | "type": "string",
6 | "pattern": "^graph$"
7 | },
8 | "nodes": {
9 | "type": "array",
10 | "description": "The component-nodes of the graph, either inline or references to external storage",
11 | "items": {
12 | "$ref": "node.schema.json"
13 | }
14 | },
15 | "edges": {
16 | "description": "The topology of the graph",
17 | "type": "array",
18 | "items": {
19 | "$ref": "edge.schema.json"
20 | }
21 | },
22 | "inputMappings": {
23 | "description": "The mapping of input ports to individual graph-node ports",
24 | "type": "array",
25 | "items": {
26 | "$ref": "mapping.schema.json"
27 | }
28 | },
29 | "outputMappings": {
30 | "description": "The mapping of graph node-ports to component interface ports",
31 | "type": "array",
32 | "items": {
33 | "$ref": "mapping.schema.json"
34 | }
35 | }
36 | },
37 | "required": ["type"],
38 | "additionalProperties": false
39 | }
40 |
--------------------------------------------------------------------------------
/pkg/secret/mock.go:
--------------------------------------------------------------------------------
1 | package secret
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/stretchr/testify/mock"
7 | )
8 |
9 | type SecretClientMock struct {
10 | mock.Mock
11 | }
12 |
13 | func NewDefaultSecretClientMock() *SecretClientMock {
14 | obj := &SecretClientMock{}
15 | obj.On("ListAvailableKeys", mock.Anything, mock.Anything).Return([]string{"key1", "key2", "key3"}, nil)
16 | obj.On("AddSecretKey", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
17 | obj.On("DeleteSecretKey", mock.Anything, mock.Anything, "key-ok").Return(nil)
18 |
19 | return obj
20 | }
21 |
22 | func (m *SecretClientMock) ListAvailableKeys(ctx context.Context, group string) ([]string, error) {
23 | args := m.Called(ctx, group)
24 | return args.Get(0).([]string), args.Error(1)
25 | }
26 |
27 | func (m *SecretClientMock) AddSecretKey(ctx context.Context, group, name, key string) error {
28 | args := m.Called(ctx, group, name, key)
29 | return args.Error(0)
30 | }
31 |
32 | func (m *SecretClientMock) DeleteSecretKey(ctx context.Context, group, name string) error {
33 | args := m.Called(ctx, group, name)
34 | return args.Error(0)
35 | }
36 |
--------------------------------------------------------------------------------
/cmd/validate/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "flag"
6 | "fmt"
7 | "os"
8 |
9 | "github.com/equinor/flowify-workflows-server/models"
10 | log "github.com/sirupsen/logrus"
11 | )
12 |
13 | func myUsage() {
14 | fmt.Printf("Usage: %s [OPTIONS] filename\n", os.Args[0])
15 | flag.PrintDefaults()
16 | }
17 |
18 | func main() {
19 | log.SetLevel(log.InfoLevel)
20 |
21 | schemaFilePtr := flag.String("schema", "", "a file path")
22 | flag.Parse()
23 | flag.Usage = myUsage
24 | if flag.NArg() > 1 {
25 | flag.Usage()
26 | return
27 | }
28 |
29 | rawbytes, err := os.ReadFile(flag.Arg(0))
30 | if err != nil {
31 | log.Fatal(err.Error())
32 | return
33 | }
34 | log.SetLevel(log.DebugLevel)
35 | schema := models.FindSchema(*schemaFilePtr)
36 | if schema != nil {
37 | var v interface{}
38 | if err := json.Unmarshal(rawbytes, &v); err != nil {
39 | log.Fatal("Cannot unmarshal JSON: ", err.Error())
40 | }
41 |
42 | err := schema.Validate(v)
43 | if err != nil {
44 | log.Fatalf("Validation errorv: %#v", err)
45 | }
46 | log.Info("schema validates")
47 | } else {
48 | log.Info("schema not validated")
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/models/spec/flowify.swagger.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Flowify API
6 |
7 |
8 |
9 |
10 |
11 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/e2etest/artifact_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "net/http"
5 | )
6 |
7 | func (s *e2eTestSuite) Test_getArtifact() {
8 | s.T().Skip("Artifact test known to fail")
9 | requestor := make_requestor(s.client)
10 |
11 | defer func() {
12 | resp, err := requestor("http://localhost:8842/api/v1/workflows/test/workflow1", http.MethodDelete, "")
13 |
14 | s.NoError(err)
15 | s.Equal(http.StatusOK, resp.StatusCode, "Expected and known to fail")
16 | }()
17 | /*
18 | // Push a workflow
19 | resp, err := requestor("http://localhost:8842/api/v1/workflows/test", http.MethodPost, mockdata.WorkflowWithOutputArtifact)
20 | s.NoError(err)
21 |
22 | s.Equal(http.StatusOK, resp.StatusCode)
23 |
24 | if err != nil {
25 | s.T().Fatalf("Error reaching the flowify server: %v", err)
26 | }
27 |
28 | s.Equal(http.StatusOK, resp.StatusCode)
29 |
30 | // Give container time to spin up and do stuff. Should be changed for a
31 | // wait condition at some point.
32 | time.Sleep(10 * time.Second)
33 | resp, err = requestor("http://localhost:8842/artifacts/test/artifact-passing/artifact-passing/hello-art", http.MethodGet, "")
34 | s.NoError(err)
35 | */
36 | }
37 |
--------------------------------------------------------------------------------
/dev/Dockerfile.cluster:
--------------------------------------------------------------------------------
1 | FROM alpine:latest as base
2 | LABEL description="Flowify cluster environment"
3 | RUN apk add --no-cache \
4 | bash \
5 | bash-completion \
6 | curl \
7 | docker \
8 | openssl \
9 | vim
10 |
11 | FROM base as buildbase
12 | WORKDIR /root
13 | # Install kubectl
14 | RUN curl -LO https://dl.k8s.io/release/v1.25.0/bin/linux/amd64/kubectl && \
15 | chmod +x ./kubectl && \
16 | mv ./kubectl /usr/local/bin/kubectl
17 | # Install Kubernetes in Docker (kind)
18 | RUN curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.15.0/kind-linux-amd64 && \
19 | chmod +x ./kind && \
20 | mv ./kind /usr/local/bin/kind
21 | # Install argo
22 | RUN curl -LO https://github.com/argoproj/argo-workflows/releases/download/v3.4.0/argo-linux-amd64.gz && \
23 | gunzip argo-linux-amd64.gz && \
24 | chmod +x argo-linux-amd64 && \
25 | mv ./argo-linux-amd64 /usr/local/bin/argo
26 | RUN echo 'source <(kubectl completion bash)' >>~/.bashrc
27 | RUN echo 'source <(argo completion bash)' >>~/.bashrc
28 | COPY dev/cluster_runner.sh .
29 | COPY dev/kind.yaml .
30 | COPY dev/argo-cluster-install/ ./argo-cluster-install
31 | RUN chmod +x ./cluster_runner.sh
32 |
33 | ENTRYPOINT ["/bin/bash", "cluster_runner.sh"]
34 |
--------------------------------------------------------------------------------
/transpiler/argo.go:
--------------------------------------------------------------------------------
1 | package transpiler
2 |
3 | import (
4 | wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
5 | // corev1 "k8s.io/api/core/v1"
6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7 | )
8 |
9 | const (
10 | DAGName = "DAG-generated"
11 | js BrickType = "js"
12 | k8s BrickType = "k8s"
13 | )
14 |
15 | // --- Argo Brick---------------------------------------------------------------
16 | type BrickType string
17 |
18 | // TODO: Add possibility to use BrickType !!!
19 |
20 | func GenerateArgo(name string, workspace string, labels map[string]string, annotations map[string]string) *wfv1.Workflow {
21 | wf := wfv1.Workflow{TypeMeta: metav1.TypeMeta{Kind: "Workflow", APIVersion: "argoproj.io/v1alpha1"}}
22 | wf.SetNamespace(workspace)
23 | wf.SetName(name)
24 | wf.SetLabels(labels)
25 | wf.SetAnnotations(annotations)
26 |
27 | return &wf
28 | }
29 |
30 | func RemoveDuplicatedTemplates(templates []wfv1.Template) []wfv1.Template {
31 | keys := make(map[string]bool)
32 | utemplates := []wfv1.Template{}
33 | for _, entry := range templates {
34 | if _, value := keys[entry.Name]; !value {
35 | keys[entry.Name] = true
36 | utemplates = append(utemplates, entry)
37 | }
38 | }
39 | return utemplates
40 | }
41 |
--------------------------------------------------------------------------------
/models/spec/component.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "type": {
5 | "type": "string",
6 | "pattern": "^component$"
7 | },
8 | "inputs": {
9 | "description": "The output interface; the data objects going out.",
10 | "type": "array",
11 | "minItems": 0,
12 | "uniqueItems": true,
13 | "items": {
14 | "$ref": "data.schema.json"
15 | }
16 | },
17 | "outputs": {
18 | "description": "The input interface; the data objects going in.",
19 | "type": "array",
20 | "minItems": 0,
21 | "uniqueItems": true,
22 | "items": {
23 | "$ref": "data.schema.json"
24 | }
25 | },
26 | "implementation": {
27 | "oneOf": [
28 | {
29 | "$ref": "any.schema.json"
30 | },
31 | {
32 | "$ref": "brick.schema.json"
33 | },
34 | {
35 | "$ref": "graph.schema.json"
36 | },
37 | {
38 | "$ref": "map.schema.json"
39 | },
40 | {
41 | "$ref": "conditional.schema.json"
42 | }
43 | ]
44 | }
45 | },
46 | "allOf": [{ "$ref": "metadata.schema.json" }],
47 | "unevaluatedProperties": false,
48 | "required": ["type", "implementation"]
49 | }
50 |
--------------------------------------------------------------------------------
/.github/workflows/test.yaml:
--------------------------------------------------------------------------------
1 | name: Unit tests
2 | on:
3 | workflow_dispatch: {}
4 | push:
5 | paths-ignore:
6 | - ".github/**"
7 | - "dev/**"
8 | jobs:
9 | tests:
10 | name: Run all unit tests
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v3
15 | - name: Run try to build container
16 | run: docker build --build-arg FLOWIFY_GIT_SHA=${{ github.sha }} .
17 | - name: Run tests and generate report
18 | run: make docker_unittest
19 | - name: Upload coverage
20 | uses: romeovs/lcov-reporter-action@v0.2.21
21 | if: always()
22 | with:
23 | github-token: ${{ secrets.GITHUB_TOKEN }}
24 | lcov-file: testoutputs/coverage.lcov
25 | - name: Archive coverage
26 | uses: actions/upload-artifact@v2
27 | if: always()
28 | with:
29 | name: coverage
30 | path: testoutputs/coverage.lcov
31 | - name: Archive code test results
32 | uses: actions/upload-artifact@v2
33 | if: always()
34 | with:
35 | name: report
36 | path: testoutputs/report.xml
37 | - name: Publish Unit Test Results
38 | uses: EnricoMi/publish-unit-test-result-action@v1
39 | if: always()
40 | with:
41 | files: testoutputs/report.xml
42 |
43 |
--------------------------------------------------------------------------------
/dev/docker-compose-e2e.yaml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 | # This test runner needs to be merged with the docker-compose defining the cluster and mongo services
3 | # Usage: docker-compose -f docker-compose.yaml -f docker-compose-e2e.yaml ...
4 | services:
5 | flowify-e2e-runner:
6 | container_name: flowify_e2e-runner
7 | build:
8 | context: ../
9 | dockerfile: dev/Dockerfile.server
10 | args:
11 | - FLOWIFY_GIT_SHA=${FLOWIFY_GIT_SHA}
12 | volumes:
13 | - /var/run/docker.sock:/var/run/docker.sock
14 | - ../testoutputs:/go/src/github.com/equinor/flowify-workflows-server/testoutputs
15 | depends_on:
16 | cluster:
17 | condition: service_healthy
18 | mongo:
19 | condition: service_healthy
20 | environment:
21 | - KUBERNETES_SERVICE_HOST=cluster-control-plane
22 | - KUBERNETES_SERVICE_PORT=6443
23 | - FLOWIFY_DB_SELECT=mongo
24 | - FLOWIFY_DB_CONFIG_ADDRESS=mongo_server
25 | - FLOWIFY_DB_CONFIG_PORT=27017
26 | - FLOWIFY_SERVER_PORT=8842
27 | - FLOWIFY_KUBERNETES_NAMESPACE=argo
28 | - FLOWIFY_KUBERNETES_KUBECONFIGPATH=/root/.kube/config
29 | - FLOWIFY_AUTH_HANDLER=azure-oauth2-openid-token
30 | - FLOWIFY_AUTH_CONFIG_KEYSURL=DISABLE_JWT_SIGNATURE_VERIFICATION
31 | command: bash -c "./kind_cluster_config_export.sh; make e2etest flowify_git_sha=$FLOWIFY_GIT_SHA"
32 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/deployments.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | app: minio
7 | name: minio
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: minio
12 | template:
13 | metadata:
14 | labels:
15 | app: minio
16 | spec:
17 | containers:
18 | - command:
19 | - minio
20 | - server
21 | - --console-address
22 | - :9001
23 | - /data
24 | env:
25 | - name: MINIO_ACCESS_KEY
26 | value: admin
27 | - name: MINIO_SECRET_KEY
28 | value: password
29 | image: minio/minio
30 | lifecycle:
31 | postStart:
32 | exec:
33 | command:
34 | - mkdir
35 | - -p
36 | - /data/my-bucket
37 | livenessProbe:
38 | httpGet:
39 | path: /minio/health/live
40 | port: 9000
41 | initialDelaySeconds: 5
42 | periodSeconds: 10
43 | name: main
44 | ports:
45 | - containerPort: 9000
46 | name: api
47 | - containerPort: 9001
48 | name: dashboard
49 | readinessProbe:
50 | httpGet:
51 | path: /minio/health/ready
52 | port: 9000
53 | initialDelaySeconds: 5
54 | periodSeconds: 10
55 | ---
56 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/sandbox-project-a/roles.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Role for sandbox-project-a
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: Role
5 | metadata:
6 | name: workflow-role
7 | namespace: sandbox-project-a
8 | rules:
9 | - apiGroups:
10 | - ""
11 | resources:
12 | - pods
13 | verbs:
14 | - get
15 | - watch
16 | - patch
17 | - apiGroups:
18 | - ""
19 | resources:
20 | - pods/log
21 | verbs:
22 | - get
23 | - watch
24 | - apiGroups:
25 | - ""
26 | resources:
27 | - pods/exec
28 | verbs:
29 | - create
30 | - apiGroups:
31 | - ""
32 | resources:
33 | - configmaps
34 | verbs:
35 | - create
36 | - get
37 | - update
38 | - apiGroups:
39 | - argoproj.io
40 | resources:
41 | - workflows
42 | verbs:
43 | - create
44 | - get
45 | - apiGroups:
46 | - argoproj.io
47 | resources:
48 | - workflowtasksets
49 | - workflowtasksets/finalizers
50 | verbs:
51 | - list
52 | - watch
53 | - get
54 | - update
55 | - patch
56 | ---
57 | # Role binding for sandbox-project-a
58 | apiVersion: rbac.authorization.k8s.io/v1
59 | kind: RoleBinding
60 | metadata:
61 | name: workflow-project-a-binding
62 | namespace: sandbox-project-a
63 | roleRef:
64 | apiGroup: rbac.authorization.k8s.io
65 | kind: Role
66 | name: workflow-role
67 | subjects:
68 | - kind: ServiceAccount
69 | name: default
70 | namespace: sandbox-project-a
71 | ---
--------------------------------------------------------------------------------
/models/spec/conditional.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "object",
3 | "properties": {
4 | "type": {
5 | "type": "string",
6 | "pattern": "^conditional$"
7 | },
8 | "nodeTrue": {
9 | "oneOf": [
10 | {
11 | "$ref": "cref.schema.json"
12 | },
13 | {
14 | "$ref": "crefversion.schema.json"
15 | },
16 | {
17 | "$ref": "component.schema.json"
18 | }
19 | ]
20 | },
21 | "nodeFalse": {
22 | "oneOf": [
23 | {
24 | "$ref": "cref.schema.json"
25 | },
26 | {
27 | "$ref": "crefversion.schema.json"
28 | },
29 | {
30 | "$ref": "component.schema.json"
31 | }
32 | ]
33 | },
34 | "expression": {
35 | "$ref": "expression.schema.json"
36 | },
37 | "inputMappings": {
38 | "description": "The mapping of input ports to individual graph-node ports",
39 | "type": "array",
40 | "items": {
41 | "$ref": "mapping.schema.json"
42 | }
43 | },
44 | "outputMappings": {
45 | "description": "The mapping of graph node-ports to component interface ports",
46 | "type": "array",
47 | "items": {
48 | "$ref": "mapping.schema.json"
49 | }
50 | }
51 | },
52 | "required": ["type", "nodeTrue", "expression"],
53 | "additionalProperties": false
54 | }
55 |
--------------------------------------------------------------------------------
/.github/workflows/public_image.yml:
--------------------------------------------------------------------------------
1 | name: Build public docker images to ghcr
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | permissions:
8 | id-token: write
9 | contents: read
10 | packages: write
11 | jobs:
12 | build:
13 | runs-on: ubuntu-20.04
14 |
15 | steps:
16 | - name: Checkout branch
17 | uses: actions/checkout@v1
18 |
19 | - name: GitHub Container Registry login
20 | uses: docker/login-action@v1
21 | with:
22 | registry: ghcr.io
23 | username: ${{ github.actor }}
24 | password: ${{ secrets.GITHUB_TOKEN }}
25 |
26 | - name: Initialize BuildX
27 | uses: docker/setup-buildx-action@v1
28 |
29 | - name: Build and push docker image (for k8s deploy)
30 | uses: docker/build-push-action@v2
31 | with:
32 | context: .
33 | push: true
34 | tags: |
35 | ghcr.io/equinor/flowify-workflows-server:${{ github.event.release.tag_name }}
36 | ghcr.io/equinor/flowify-workflows-server:latest
37 |
38 | - name: Build and push docker image (for local run)
39 | uses: docker/build-push-action@v2
40 | with:
41 | context: .
42 | file: ./dev/Dockerfile.server
43 | push: true
44 | tags: |
45 | ghcr.io/equinor/flowify-workflows-server-local:${{ github.event.release.tag_name }}
46 | ghcr.io/equinor/flowify-workflows-server-local:latest
--------------------------------------------------------------------------------
/sandbox/Makefile:
--------------------------------------------------------------------------------
1 | EXECUTABLES = yq jwt docker kubectl minikube
2 | K := $(foreach exec,$(EXECUTABLES),\
3 | $(if $(shell PATH=$(PATH) which $(exec)),$(info Found `$(exec) => $(shell PATH=$(PATH) which $(exec))`),$(error "No $(exec) in PATH, install and/or set search path")))
4 |
5 | SECRET := $(shell PATH=$(PATH) yq -r .secret secrets.yaml)
6 | TOKEN_FLOWE := $(shell PATH=$(PATH) yq .flowe.payload secrets.yaml | jwt --encode --secret $(SECRET))
7 | TOKEN_SWIRL := $(shell PATH=$(PATH) yq .swirl.payload secrets.yaml | jwt --encode --secret $(SECRET))
8 |
9 | red=$(shell tput setaf 1)
10 | green=$(shell tput setaf 2)
11 | blue=$(shell tput setaf 33)
12 | normal=$(shell tput sgr0)
13 |
14 | start: server
15 | ./start.sh
16 | @printf "$(blue)SANDBOX:$(normal) %s\n" "$(green)Sandbox started"
17 | @printf "$(blue)SANDBOX:$(normal) %s\n" "To access the sandbox one of the following token is needed:"
18 | @printf "$(blue)SANDBOX:$(normal) %s\n" "User F Lowe: $(TOKEN_FLOWE)"
19 | @printf "$(blue)SANDBOX:$(normal) %s\n" "User S Wirlop: $(TOKEN_SWIRL)"
20 |
21 | @printf "$(blue)SANDBOX:$(normal) %s\n" "Try it out!"
22 | @printf "%s\n" " export SANDBOX_TOKEN=$(TOKEN_FLOWE)"
23 | @printf "%s\n" " sh list-flowify-workflows.sh"
24 | @printf "$(blue)SANDBOX: $(normal)%s\n" "$(red)To stop the sandbox processes, use: make stop$(normal)"
25 |
26 | stop:
27 | ./stop.sh
28 | @printf "$(blue)SANDBOX:$(normal) %s\n" "$(green)Sandbox stopped$(normal)"
29 |
30 | server:
31 | @make -B -C .. server
32 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/clusterroles.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | annotations:
6 | workflows.argoproj.io/description: |
7 | Minimum recommended permissions to use artifact GC.
8 | name: artifactgc
9 | rules:
10 | - apiGroups:
11 | - argoproj.io
12 | resources:
13 | - workflowartifactgctasks
14 | verbs:
15 | - list
16 | - watch
17 | - apiGroups:
18 | - argoproj.io
19 | resources:
20 | - workflowartifactgctasks/status
21 | verbs:
22 | - patch
23 | ---
24 | apiVersion: rbac.authorization.k8s.io/v1
25 | kind: ClusterRole
26 | metadata:
27 | annotations:
28 | workflows.argoproj.io/description: |
29 | Recomended minimum permissions for the `emissary` executor.
30 | name: executor
31 | rules:
32 | - apiGroups:
33 | - argoproj.io
34 | resources:
35 | - workflowtaskresults
36 | verbs:
37 | - create
38 | - patch
39 | ---
40 | apiVersion: rbac.authorization.k8s.io/v1
41 | kind: ClusterRoleBinding
42 | metadata:
43 | name: artifactgc-default
44 | roleRef:
45 | apiGroup: rbac.authorization.k8s.io
46 | kind: ClusterRole
47 | name: artifactgc
48 | subjects:
49 | - kind: ServiceAccount
50 | name: default
51 | namespace: argo
52 | ---
53 | apiVersion: rbac.authorization.k8s.io/v1
54 | kind: ClusterRoleBinding
55 | metadata:
56 | name: executor-default
57 | roleRef:
58 | apiGroup: rbac.authorization.k8s.io
59 | kind: ClusterRole
60 | name: executor
61 | subjects:
62 | - kind: ServiceAccount
63 | name: default
64 | namespace: argo
65 | ---
--------------------------------------------------------------------------------
/dev/Dockerfile.server:
--------------------------------------------------------------------------------
1 | FROM golang:1.18-alpine as base
2 | LABEL description="Flowify dev environment"
3 | RUN apk add --no-cache \
4 | bash \
5 | binutils \
6 | curl \
7 | docker \
8 | gcc \
9 | git \
10 | jq \
11 | make \
12 | musl-dev \
13 | openssl \
14 | shadow
15 |
16 | FROM base as buildbase
17 | WORKDIR /root
18 | RUN curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.15.0/kind-linux-amd64 && \
19 | chmod +x ./kind && \
20 | mv ./kind /usr/local/bin/kind
21 | RUN mkdir -p $GOPATH/src/github.com/equinor/
22 | WORKDIR $GOPATH/src/github.com/equinor/flowify-workflows-server
23 | COPY dev/flowify_server_runner.sh .
24 | RUN chmod +x ./flowify_server_runner.sh
25 | COPY dev/kind_cluster_config_export.sh .
26 | RUN chmod +x ./kind_cluster_config_export.sh
27 | COPY go.mod .
28 | COPY go.sum .
29 | RUN go mod download
30 | ENV PATH="${PATH}:/root"
31 |
32 | FROM buildbase as devbase
33 | RUN go install github.com/jstemmer/go-junit-report@latest
34 | RUN go install github.com/jandelgado/gcov2lcov@latest
35 |
36 | FROM devbase as devserver
37 | COPY apiserver ./apiserver
38 | COPY auth ./auth
39 | COPY cmd ./cmd
40 | COPY models ./models
41 | COPY pkg ./pkg
42 | COPY rest ./rest
43 | COPY storage ./storage
44 | COPY transpiler ./transpiler
45 | COPY user ./user
46 | COPY config.yml .
47 | COPY main.go .
48 | COPY Makefile .
49 | COPY e2etest ./e2etest
50 |
51 |
52 | ARG FLOWIFY_GIT_SHA
53 | RUN ["/bin/bash", "-c", "make server strip=1 flowify_git_sha=${FLOWIFY_GIT_SHA}"]
54 |
55 | CMD ["./flowify_server_runner.sh"]
--------------------------------------------------------------------------------
/.github/workflows/public_image_dev.yml:
--------------------------------------------------------------------------------
1 | name: Build public dev docker images to ghcr
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | paths-ignore:
7 | - '.github/**'
8 |
9 | permissions:
10 | id-token: write
11 | contents: read
12 | packages: write
13 | jobs:
14 | build:
15 | runs-on: ubuntu-20.04
16 |
17 | steps:
18 | - name: Checkout branch
19 | uses: actions/checkout@v1
20 |
21 | - name: Set image tag and short sha
22 | run: |
23 | echo "DOCKER_IMG_VERSION=$(cat ./aim/version.txt)" >> $GITHUB_ENV
24 | echo "SHORT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
25 |
26 | - name: GitHub Container Registry login
27 | uses: docker/login-action@v1
28 | with:
29 | registry: ghcr.io
30 | username: ${{ github.actor }}
31 | password: ${{ secrets.GITHUB_TOKEN }}
32 |
33 | - name: Initialize BuildX
34 | uses: docker/setup-buildx-action@v1
35 |
36 | - name: Build and push docker image (for k8s deploy)
37 | uses: docker/build-push-action@v2
38 | with:
39 | context: .
40 | push: true
41 | tags: |
42 | ghcr.io/equinor/flowify-workflows-server:sha-${{ env.SHORT_SHA}}
43 | ghcr.io/equinor/flowify-workflows-server:dev
44 |
45 | - name: Build and push docker image (for local run)
46 | uses: docker/build-push-action@v2
47 | with:
48 | context: .
49 | file: ./dev/Dockerfile.server
50 | push: true
51 | tags: |
52 | ghcr.io/equinor/flowify-workflows-server-local:sha-${{ env.SHORT_SHA}}
53 | ghcr.io/equinor/flowify-workflows-server-local:dev
--------------------------------------------------------------------------------
/models/examples/brick-parameter-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "A brick component with an input parameter",
3 | "inputs": [
4 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" },
5 | { "name": "sender", "mediatype": ["string"], "type": "parameter" }
6 | ],
7 | "type": "component",
8 | "implementation": {
9 | "type": "brick",
10 | "container": {
11 | "name": "anyname",
12 | "image": "docker/whalesay",
13 | "command": ["cowsay"],
14 | "args": [
15 | "Hello static I will be appended (TBD?) by potentially variable flowify-args"
16 | ]
17 | },
18 | "args": [
19 | {
20 | "source": "Hello static text.",
21 | "description": "A static argument"
22 | },
23 | {
24 | "source": { "port": "greeting" },
25 | "target": { "type": "env_secret" },
26 | "description": "A variable stored in env $(GREET) and expanded by k8s upon execution"
27 | },
28 | {
29 | "source": { "port": "sender" },
30 | "target": { "type": "parameter" }
31 | },
32 | {
33 | "source": { "port": "sender" },
34 | "target": { "type": "file" },
35 | "description": "A variable stored as an argo artefact in /tmp/sender"
36 | }
37 | ],
38 | "results": [
39 | {
40 | "source": "Hello static text.",
41 | "target": { "port": "outputport" },
42 | "description": "A static result, eg for mocking"
43 | },
44 | {
45 | "source": { "file": "/tmp/res.txt" },
46 | "target": { "port": "outputport" },
47 | "description": "A result from file mapped to component output interface"
48 | }
49 | ]
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/models/examples/two-node-graph-component-with-cref.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "An single node graph component",
3 | "inputs": [
4 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" },
5 | { "name": "sender", "mediatype": ["string"], "type": "parameter" }
6 | ],
7 | "type": "component",
8 | "implementation": {
9 | "type": "graph",
10 | "inputMappings": [
11 | {
12 | "source": { "port": "greeting" },
13 | "target": { "node": "greeter", "port": "greeting" }
14 | }
15 | ],
16 | "nodes": [
17 | {
18 | "id": "greeter-node",
19 | "node": {
20 | "description": "A brick component",
21 | "inputs": [
22 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" }
23 | ],
24 | "type": "component",
25 | "implementation": {
26 | "type": "brick",
27 | "container": {
28 | "name": "containername",
29 | "image": "docker/whalesay",
30 | "command": ["cowsay"],
31 | "args": ["Hello Test!"]
32 | },
33 | "args": [
34 | {
35 | "source": "Hello static text.",
36 | "description": "A static argument"
37 | },
38 | {
39 | "source": { "port": "GREETING" },
40 | "target": { "type": "env", "name": "GREET" },
41 | "description": "A variable stored in env $(GREET) and expanded by k8s upon execution"
42 | }
43 | ]
44 | }
45 | }
46 | },
47 | {
48 | "id": "responder-node",
49 | "node": "44763f88-7f51-11ec-a8a3-0242ac120002"
50 | }
51 | ]
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/dev/argo-cluster-install/base/configmaps.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: "workspace-config"
7 | app.kubernetes.io/part-of: "flowify"
8 | name: "sandbox-project-a"
9 | namespace: "argo"
10 | data:
11 | roles: "[[\"sandbox-developer\"]]"
12 | ---
13 | apiVersion: v1
14 | kind: ConfigMap
15 | metadata:
16 | labels:
17 | app.kubernetes.io/component: "workspace-config"
18 | app.kubernetes.io/part-of: "flowify"
19 | name: "sandbox-project-b"
20 | namespace: "argo"
21 | data:
22 | roles: "[[\"sandbox\"]]"
23 |
24 | ---
25 | apiVersion: v1
26 | kind: ConfigMap
27 | metadata:
28 | labels:
29 | app.kubernetes.io/part-of: "flowify"
30 | name: "role-descriptions"
31 | namespace: "argo"
32 | data:
33 | "sandbox-developer": "Need to play in the sandbox"
34 | "sandbox-admin": "Required for God-mode"
35 | ---
36 | apiVersion: v1
37 | kind: ConfigMap
38 | metadata:
39 | annotations:
40 | workflows.argoproj.io/default-artifact-repository: default-v1
41 | name: artifact-repositories
42 | namespace: argo
43 | data:
44 | default-v1: |
45 | archiveLogs: true
46 | s3:
47 | bucket: my-bucket
48 | endpoint: minio.argo.svc.cluster.local:9000
49 | insecure: true
50 | accessKeySecret:
51 | name: my-minio-cred
52 | key: accesskey
53 | secretKeySecret:
54 | name: my-minio-cred
55 | key: secretkey
56 | empty: ""
57 | my-key: |
58 | archiveLogs: true
59 | s3:
60 | bucket: my-bucket
61 | endpoint: minio.argo.svc.cluster.local:9000
62 | insecure: true
63 | accessKeySecret:
64 | name: my-minio-cred
65 | key: accesskey
66 | secretKeySecret:
67 | name: my-minio-cred
68 | key: secretkey
69 | ---
--------------------------------------------------------------------------------
/.github/workflows/dev_env.yaml:
--------------------------------------------------------------------------------
1 | name: build dev environment images
2 | on:
3 | workflow_dispatch: {}
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - 'dev/**'
9 | permissions:
10 | id-token: write
11 | contents: read
12 | packages: write
13 | jobs:
14 | build:
15 | runs-on: ubuntu-20.04
16 |
17 | steps:
18 | - name: Checkout branch
19 | uses: actions/checkout@v1
20 |
21 | - name: Set image tag and short sha
22 | run: |
23 | echo "SHORT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
24 |
25 | - name: GitHub Container Registry login
26 | uses: docker/login-action@v1
27 | with:
28 | registry: ghcr.io
29 | username: ${{ github.actor }}
30 | password: ${{ secrets.GITHUB_TOKEN }}
31 |
32 | - name: Initialize BuildX
33 | uses: docker/setup-buildx-action@v1
34 |
35 | - name: Build and push kind cluster image
36 | uses: docker/build-push-action@v2
37 | with:
38 | context: .
39 | file: ./dev/Dockerfile.cluster
40 | push: true
41 | tags: |
42 | ghcr.io/equinor/flowify-dev-cluster:${{ env.SHORT_SHA}}
43 | ghcr.io/equinor/flowify-dev-cluster:latest
44 |
45 | - name: Build and push dev env builder image
46 | uses: docker/build-push-action@v2
47 | with:
48 | context: .
49 | file: ./dev/Dockerfile.server_builder
50 | push: true
51 | tags: |
52 | ghcr.io/equinor/flowify-dev-builder:${{ env.SHORT_SHA}}
53 | ghcr.io/equinor/flowify-dev-builder:latest
54 |
55 | - name: Build and push mongo image
56 | uses: docker/build-push-action@v2
57 | with:
58 | context: .
59 | file: ./dev/Dockerfile.mongo
60 | push: true
61 | tags: |
62 | ghcr.io/equinor/flowify-mongo:${{ env.SHORT_SHA}}
63 | ghcr.io/equinor/flowify-mongo:latest
64 |
--------------------------------------------------------------------------------
/models/job.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "encoding/json"
5 |
6 | wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
7 | "github.com/pkg/errors"
8 | )
9 |
10 | type Value struct {
11 | Value interface{} `json:"value" bson:"value"`
12 | Target string `json:"target" bson:"target"`
13 | }
14 |
15 | func (v *Value) UnmarshalJSON(document []byte) error {
16 | var partialValue struct {
17 | Target string `json:"target"`
18 | Value json.RawMessage `json:"value"`
19 | }
20 |
21 | err := json.Unmarshal(document, &partialValue)
22 | if err != nil {
23 | return errors.Wrapf(err, "cannot unmarshal partial value")
24 | }
25 | v.Target = partialValue.Target
26 |
27 | var arr []string
28 | err = json.Unmarshal(partialValue.Value, &arr)
29 | if err == nil {
30 | v.Value = arr
31 | return nil
32 | }
33 |
34 | var str string
35 | err = json.Unmarshal(partialValue.Value, &str)
36 | if err == nil {
37 | v.Value = str
38 | return nil
39 | }
40 |
41 | return err
42 | }
43 |
44 | type JobEvent wfv1.Workflow
45 |
46 | type Job struct {
47 | Metadata `json:",inline" bson:",inline"`
48 | // the workflow is either a workflow or a reference to one in the database
49 | Type ComponentType `json:"type" bson:"type"`
50 | InputValues []Value `json:"inputValues,omitempty" bson:"inputValues,omitempty"`
51 | Workflow Workflow `json:"workflow" bson:"workflow"`
52 | Events []JobEvent `json:"events,omitempty" bson:"events,omitempty"`
53 | }
54 |
55 | type JobStatus struct {
56 | Uid ComponentReference `json:"uid" bson:"uid"`
57 | Status wfv1.WorkflowPhase `json:"status" bson:"status"`
58 | }
59 |
60 | type JobPostRequest struct {
61 | Job Job `json:"job"`
62 | SubmitOptions JobPostOptions `json:"options"`
63 | }
64 |
65 | type JobPostOptions struct {
66 | Constants []interface{} `json:"constants"`
67 | Tags []string `json:"tags"`
68 | }
69 |
--------------------------------------------------------------------------------
/models/validate.go:
--------------------------------------------------------------------------------
1 | package models
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "os"
7 |
8 | "github.com/pkg/errors"
9 | jsonschema "github.com/santhosh-tekuri/jsonschema/v5"
10 | "github.com/sirupsen/logrus"
11 | log "github.com/sirupsen/logrus"
12 |
13 | _ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
14 | )
15 |
16 | func findFileSchema(path string) (*jsonschema.Schema, error) {
17 | if _, err := os.Stat(path); os.IsNotExist(err) {
18 | logrus.Debugf("no file path for %s", path)
19 | return nil, err
20 | }
21 | schema, err := jsonschema.Compile(path)
22 | if err != nil {
23 | return nil, err
24 | }
25 | logrus.Debugf("found schema in file %s", path)
26 | return schema, nil
27 | }
28 |
29 | func findPrecompiledSchema(name string) (*jsonschema.Schema, error) {
30 | for k, v := range RegisteredSchemas {
31 | log.Debugf("comparing: %s == %s", name, k.Name())
32 | if k.Name() == name {
33 | logrus.Debugf("found registered schema for type %s and name %s", k, name)
34 | return v, nil
35 | }
36 | }
37 | logrus.Debugf("found no registered schemas for name %s", name)
38 | return nil, nil
39 | }
40 |
41 | func FindSchema(nameOrPath string) *jsonschema.Schema {
42 |
43 | fileSchema, _ := findFileSchema(nameOrPath)
44 | precompiledSchema, _ := findPrecompiledSchema(nameOrPath)
45 | if fileSchema != nil {
46 | logrus.Infof("using schema from file: %s", nameOrPath)
47 | return fileSchema
48 | }
49 | if precompiledSchema != nil {
50 | logrus.Infof("using precompiled schema for type: %s", nameOrPath)
51 | return precompiledSchema
52 | }
53 | logrus.Debugf("found no schemas for %s", nameOrPath)
54 | return nil
55 | }
56 |
57 | func Validate(data []byte, schemaFilePath string) error {
58 |
59 | schema := FindSchema(schemaFilePath)
60 | if schema == nil {
61 | return fmt.Errorf("could not find schema from: %s", schemaFilePath)
62 | }
63 |
64 | var v interface{}
65 | if err := json.Unmarshal(data, &v); err != nil {
66 | return errors.Wrap(err, "validate unmarshal")
67 | }
68 |
69 | return schema.Validate(v)
70 | }
71 |
--------------------------------------------------------------------------------
/pkg/secret/config.go:
--------------------------------------------------------------------------------
1 | package secret
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net"
7 | "os"
8 |
9 | "github.com/pkg/errors"
10 | "k8s.io/api/core/v1"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "k8s.io/client-go/kubernetes"
13 | "k8s.io/client-go/rest"
14 | )
15 |
16 | const (
17 | DefaultServiceAccountName = "default"
18 | AccountKey = "account"
19 | cmKeyServiceAccount = "serviceAccountName"
20 | )
21 |
22 | func InClusterConfig(ctx context.Context, client kubernetes.Interface, namespace, workspace string) (*rest.Config, error) {
23 | sa, err := lookupServiceAccount(ctx, client, workspace, namespace)
24 |
25 | if err != nil {
26 | return nil, errors.Wrap(err, "cannot look-up service account")
27 | }
28 |
29 | if len(sa.Secrets) == 0 {
30 | return nil, errors.New(fmt.Sprintf("service account %s has no attached secret", workspace))
31 | }
32 |
33 | secret, err := client.CoreV1().Secrets(workspace).Get(ctx, sa.Secrets[0].Name, metav1.GetOptions{})
34 |
35 | if err != nil {
36 | return nil, errors.Wrapf(err, "cannot get secret %s/%s", workspace, sa.Secrets[0].Name)
37 | }
38 |
39 | host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
40 | if len(host) == 0 || len(port) == 0 {
41 | return nil, rest.ErrNotInCluster
42 | }
43 |
44 | tlsClientConfig := rest.TLSClientConfig{}
45 | tlsClientConfig.CAData = secret.Data["ca.crt"]
46 |
47 | return &rest.Config{
48 | Host: "https://" + net.JoinHostPort(host, port),
49 | TLSClientConfig: tlsClientConfig,
50 | BearerToken: string(secret.Data["token"]),
51 | }, nil
52 | }
53 |
54 | func lookupServiceAccount(ctx context.Context, client kubernetes.Interface, workspace, namespace string) (*v1.ServiceAccount, error) {
55 | cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, workspace, metav1.GetOptions{})
56 |
57 | if err != nil {
58 | return nil, err
59 | }
60 |
61 | serviceAccountName, ok := cm.Data[cmKeyServiceAccount]
62 |
63 | if !ok {
64 | serviceAccountName = DefaultServiceAccountName
65 | }
66 |
67 | return client.CoreV1().ServiceAccounts(workspace).Get(ctx, serviceAccountName, metav1.GetOptions{})
68 | }
69 |
--------------------------------------------------------------------------------
/sandbox/secrets.yaml:
--------------------------------------------------------------------------------
1 | # https://jwt.io/
2 | # http://jwtbuilder.jamiekurtz.com/
3 | # https://github.com/mattroberts297/jsonwebtokencli
4 | # https://github.com/kislyuk/yq
5 |
6 | command: yq .$user.payload secrets.yaml | jwt --encode --secret $(yq -r .secret secrets.yaml)
7 |
8 | # user flowe
9 | flowe:
10 | payload:
11 | aud: flowify.io
12 | iss: sandbox
13 |
14 | # Timestamps for issue and expiry
15 | iat: 1663674547
16 | nbf: 1663674547
17 | exp: 2610445747
18 |
19 | email: flow@flowify.io
20 | name: F. Lowe
21 | roles:
22 | - sandbox-developer
23 | - sandbox-admin
24 |
25 | command: yq .flowe.payload secrets.yaml | jwt --encode -t --secret $(yq -r .secret secrets.yaml)
26 | token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJmbG93aWZ5LmlvIiwiaXNzIjoic2FuZGJveCIsImlhdCI6MTY2MzY3NDU0NywibmJmIjoxNjYzNjc0NTQ3LCJleHAiOjI2MTA0NDU3NDcsImVtYWlsIjoiZmxvd0BmbG93aWZ5LmlvIiwibmFtZSI6IkYuIExvd2UiLCJyb2xlcyI6WyJzYW5kYm94LWRldmVsb3BlciIsInNhbmRib3gtYWRtaW4iXX0.RsSK97RyPw6vrMbVem3ouaVwSrMuQjFq3RaFvyD3u4A
27 | roundtrip: yq .flowe.payload secrets.yaml | jwt --encode -t --secret $(yq -r .secret secrets.yaml) | jwt --decode --complete --secret $(yq -r .secret secrets.yaml)
28 |
29 | # user swirl
30 | swirl:
31 | payload:
32 | aud: flowify.io
33 | iss: sandbox
34 |
35 | # Timestamps for issue and expiry
36 | iat: 1663674547
37 | nbf: 1663674547
38 | exp: 2610445747
39 |
40 | email: swirl@flowify.io
41 | name: S. Wirlop
42 | roles:
43 | - sandbox-developer
44 |
45 | command: yq .swirl.payload secrets.yaml | jwt --encode -t --secret $(yq -r .secret secrets.yaml)
46 | token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJmbG93aWZ5LmlvIiwiaXNzIjoic2FuZGJveCIsImlhdCI6MTY2MzY3NDU0NywibmJmIjoxNjYzNjc0NTQ3LCJleHAiOjI2MTA0NDU3NDcsImVtYWlsIjoic3dpcmxAZmxvd2lmeS5pbyIsIm5hbWUiOiJTLiBXaXJsb3AiLCJyb2xlcyI6WyJzYW5kYm94LWRldmVsb3BlciJdfQ.Ap7chB9VNxucGAUkboqPFiZXhT6yCTYuLWjErqHcFOA
47 | roundtrip: yq .swirl.payload secrets.yaml | jwt --encode -t --secret $(yq -r .secret secrets.yaml) | jwt --decode --complete --secret $(yq -r .secret secrets.yaml)
48 |
49 |
50 | secret: flowify.io.1234567890
51 |
52 | # should be round trippable
53 |
--------------------------------------------------------------------------------
/dev/cluster_runner.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | RED='\033[0;31m'
4 | GREEN='\033[0;32m'
5 | BLUE='\033[0;34m'
6 | PURPLE='\033[0;35m'
7 | CYAN='\033[0;36m'
8 | WHITE='\033[0;37m'
9 | NOCOLOR='\033[0m' # No Color
10 |
11 | bash -c 'kind export --name cluster kubeconfig 2>/dev/null'
12 | cluster_exist=$?
13 |
14 | if [ $cluster_exist -eq 0 ]
15 | then
16 | echo -e ${GREEN}
17 | echo =====================================================================
18 | echo Kind cluster exist, getting kubeconfig from cluster
19 | echo =====================================================================
20 | echo -e ${NOCOLOR}
21 | else
22 | echo -e ${BLUE}
23 | echo =====================================================================
24 | echo Bringing up a cluster
25 | echo =====================================================================
26 | echo -e ${NOCOLOR}
27 | bash -c '/usr/local/bin/kind create cluster --name cluster --config /root/kind.yaml'
28 | fi
29 |
30 | # Set a trap for SIGTERM signal
31 | if ! [[ "$KEEP_KIND_CLUSTER_ALIVE" = true ]]
32 | then
33 | trap "docker rm -f cluster-control-plane" SIGTERM
34 | fi
35 |
36 | echo -e ${GREEN}
37 | echo =====================================================================
38 | echo Modifying Kubernetes config to point to Kind master node
39 | echo =====================================================================
40 | echo -e ${NOCOLOR}
41 | sed -i "s/^ server:.*/ server: https:\/\/$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/" $HOME/.kube/config
42 |
43 | if [ $cluster_exist -ne 0 ]
44 | then
45 | echo -e ${BLUE}
46 | echo =====================================================================
47 | echo Deploying argo
48 | echo =====================================================================
49 | echo -e ${NOCOLOR}
50 | kubectl apply -k /root/argo-cluster-install
51 |
52 | echo -e ${PURPLE}
53 | echo =====================================================================
54 | echo "Waiting for deployment..."
55 | echo =====================================================================
56 | echo -e ${NOCOLOR}
57 | kubectl rollout status deployments -n argo
58 | fi
59 |
60 | while true
61 | do
62 | sleep 1
63 | done
64 |
--------------------------------------------------------------------------------
/apiserver/apiserver_test.go:
--------------------------------------------------------------------------------
1 | package apiserver
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | "net/http"
8 | "testing"
9 |
10 | "github.com/equinor/flowify-workflows-server/auth"
11 | "github.com/stretchr/testify/assert"
12 | "github.com/stretchr/testify/require"
13 | "k8s.io/client-go/kubernetes/fake"
14 | )
15 |
16 | const (
17 | test_server_port = 1234
18 | mongo_test_host = "localhost"
19 | mongo_test_port = 27017
20 | test_db_name = "test"
21 | test_namespace = "testing-namespace"
22 | n_items = 5
23 | ext_mongo_hostname_env = "FLOWIFY_MONGO_ADDRESS"
24 | ext_mongo_port_env = "FLOWIFY_MONGO_PORT"
25 | )
26 |
27 | type testCase struct {
28 | Name string
29 | URL string
30 | StatusCode int
31 | Body string
32 | }
33 |
34 | func Test_ApiServer(t *testing.T) {
35 | server, err := NewFlowifyServer(
36 | fake.NewSimpleClientset(),
37 | "not-used", /* config namespace for k8s */
38 | nil, /* wfclient cs_workflow.Interface */
39 | nil, /* storage */
40 | nil, /* volumeStorage */
41 | 1234,
42 | auth.AzureTokenAuthenticator{},
43 | )
44 | require.NoError(t, err)
45 |
46 | /*
47 | spin up a apiserver server with some functionality not connected
48 | */
49 |
50 | ready := make(chan bool, 1)
51 | go server.Run(context.TODO(), &ready)
52 |
53 | require.True(t, <-ready, "make sure the server started before we continue")
54 |
55 | testcases := []testCase{
56 | {Name: "z-page/live", URL: "livez", StatusCode: http.StatusOK, Body: "alive"},
57 | {Name: "z-page/ready", URL: "readyz", StatusCode: http.StatusOK, Body: "ready"},
58 | {Name: "z-page/version", URL: "versionz", StatusCode: http.StatusOK, Body: CommitSHA},
59 | }
60 |
61 | for _, test := range testcases {
62 | t.Run(test.Name, func(t *testing.T) {
63 | endpoint := fmt.Sprintf("http://localhost:%d/%s", test_server_port, test.URL)
64 | resp, err := http.Get(endpoint)
65 | require.NoError(t, err)
66 | require.NotNil(t, resp)
67 |
68 | assert.Equal(t, test.StatusCode, resp.StatusCode)
69 | payload, err := io.ReadAll(resp.Body)
70 | assert.NoError(t, err)
71 | assert.Equal(t, test.Body, string(payload))
72 | })
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/models/examples/graph-input-volumes.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "job",
3 | "uid": "00000000-0000-0000-0000-000000000001",
4 | "description": "A job with volume input used as input into graph components",
5 | "inputValues": [
6 | { "value": "{\"name\":\"vol-config-0\"}", "target": "mount-0" },
7 | { "value": "{\"name\":\"vol-config-1\"}", "target": "mount-1" }
8 | ],
9 | "workflow": {
10 | "uid": "00000000-0000-0000-0000-000000000002",
11 | "component": {
12 | "type": "component",
13 | "uid": "00000000-0000-0000-0000-000000000003",
14 | "inputs": [
15 | { "name": "mount-0", "type": "volume" },
16 | { "name": "mount-1", "type": "volume" }
17 | ],
18 | "implementation": {
19 | "type": "graph",
20 | "inputMappings": [
21 | {
22 | "source": { "port": "mount-0" },
23 | "target": { "node": "a1", "port": "mount-a" }
24 | },
25 | {
26 | "source": { "port": "mount-1" },
27 | "target": { "node": "a1", "port": "mount-b" }
28 | }
29 | ],
30 | "nodes": [
31 | {
32 | "id": "a1",
33 | "node": {
34 | "uid": "00000000-0000-0000-0000-000000000004",
35 | "type": "component",
36 | "inputs": [
37 | { "name": "mount-a", "type": "volume" },
38 | { "name": "mount-b", "type": "volume" }
39 | ],
40 | "implementation": {
41 | "type": "brick",
42 | "container": {
43 | "name": "whale",
44 | "image": "docker/whalesay",
45 | "command": ["cowsay"]
46 | },
47 | "args": [
48 | {
49 | "target": {
50 | "type": "volume",
51 | "prefix": "/opt/volumes/"
52 | },
53 | "source": { "port": "mount-a" }
54 | },
55 | {
56 | "target": {
57 | "type": "volume",
58 | "prefix": "/mnt"
59 | },
60 | "source": { "port": "mount-b" }
61 | }
62 | ]
63 | }
64 | }
65 | }
66 | ]
67 | }
68 | },
69 | "type": "workflow",
70 | "workspace": "test"
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/user/user.go:
--------------------------------------------------------------------------------
1 | package user
2 |
3 | import (
4 | "context"
5 | "strings"
6 | )
7 |
8 | type ContextKey int
9 |
10 | const (
11 | UserKey ContextKey = iota
12 | )
13 |
14 | const CustomRolePattern string = "--$"
15 |
16 | type Role string
17 |
18 | var ListGroupsCanCreateWorkspaces = [3]string{"admin", "developer-admin", "sandbox-developer"}
19 | var ListGroupsCanSeeCustomRoles = [3]string{"admin", "developer-admin", "sandbox-developer"}
20 |
21 | // tightly modelled on JWT: http://jwt.io
22 | type User interface {
23 | GetUid() string
24 | GetName() string
25 | GetEmail() string
26 | GetRoles() []Role
27 | }
28 |
29 | func CanCreateWorkspaces(user User) bool {
30 | for _, role := range user.GetRoles() {
31 | for _, r := range ListGroupsCanCreateWorkspaces {
32 | if string(role) == r {
33 | return true
34 | }
35 | }
36 | }
37 | return false
38 | }
39 |
40 | func GetUser(ctx context.Context) User {
41 | val := ctx.Value(UserKey)
42 |
43 | if val == nil {
44 | return nil
45 | } else {
46 | return val.(User)
47 | }
48 | }
49 |
50 | func HasOwnership(u User, roles []Role) bool {
51 | var hasOwnership bool
52 | for _, r := range roles {
53 | if strings.Contains(string(r), CustomRolePattern) {
54 | cr := u.GetEmail() + CustomRolePattern + "owner"
55 | if string(r) == cr {
56 | hasOwnership = true
57 | }
58 | }
59 | }
60 | return hasOwnership
61 | }
62 |
63 | func UserHasRole(u User, entry Role) bool {
64 | for _, item := range u.GetRoles() {
65 | if entry == item {
66 | return true
67 | }
68 | }
69 | return false
70 | }
71 |
72 | func CanSeeCustomRoles(u User) bool {
73 | canSeeRoles := ListGroupsCanSeeCustomRoles
74 | for _, role := range u.GetRoles() {
75 | for _, r := range canSeeRoles {
76 | if string(role) == r {
77 | return true
78 | }
79 | }
80 | }
81 | return false
82 | }
83 |
84 | // adds the User to the given context
85 | func UserContext(user User, ctx context.Context) context.Context {
86 | return context.WithValue(ctx, UserKey, user)
87 | }
88 |
89 | // a mock user for tests
90 | type MockUser struct {
91 | Uid string
92 | Name string
93 | Email string
94 | Roles []Role
95 | }
96 |
97 | func (u MockUser) GetUid() string { return u.Uid }
98 | func (u MockUser) GetName() string { return u.Name }
99 | func (u MockUser) GetEmail() string { return u.Email }
100 | func (u MockUser) GetRoles() []Role { return u.Roles }
101 |
--------------------------------------------------------------------------------
/dev/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 | services:
3 | mongo:
4 | # one node mongoDB replica set for local development
5 | container_name: mongo_server
6 | image: mongo:5.0
7 | restart: unless-stopped
8 | ports:
9 | - "27017:27017"
10 | # volumes:
11 | # - ./database-rs:/data/db
12 | healthcheck:
13 | test: test $$(echo "rs.initiate().ok || rs.status().ok" | mongo --quiet) -eq 1
14 | interval: 10s
15 | command: ["--replSet", "rs0", "--bind_ip_all"]
16 |
17 | cluster:
18 | container_name: kind_cluster
19 | # build:
20 | # context: ../
21 | # dockerfile: dev/Dockerfile.cluster
22 | # image: dev_cluster
23 | image: ghcr.io/equinor/flowify-dev-cluster:latest
24 | volumes:
25 | - /var/run/docker.sock:/var/run/docker.sock
26 | depends_on:
27 | - mongo
28 | environment:
29 | - KUBERNETES_SERVICE_HOST=cluster-control-plane
30 | - KUBERNETES_SERVICE_PORT=6443
31 | - KEEP_KIND_CLUSTER_ALIVE=false
32 | healthcheck:
33 | test: kubectl rollout status deployments -n argo --timeout=1s || exit 1
34 | interval: 5s
35 | retries: 25
36 | start_period: 1s
37 | timeout: 120s
38 |
39 | server:
40 | container_name: flowify_server
41 | build:
42 | context: ../
43 | dockerfile: dev/Dockerfile.server
44 | image: dev_server
45 | ports:
46 | - "8842:8842"
47 | volumes:
48 | - /var/run/docker.sock:/var/run/docker.sock
49 | depends_on:
50 | cluster:
51 | condition: service_healthy
52 | environment:
53 | - KUBERNETES_SERVICE_HOST=cluster-control-plane
54 | - KUBERNETES_SERVICE_PORT=6443
55 | - FLOWIFY_DB_SELECT=mongo
56 | - FLOWIFY_DB_CONFIG_ADDRESS=mongo_server
57 | - FLOWIFY_DB_CONFIG_PORT=27017
58 | - FLOWIFY_SERVER_PORT=8842
59 | - FLOWIFY_KUBERNETES_NAMESPACE=argo
60 | - FLOWIFY_KUBERNETES_KUBECONFIGPATH=/root/.kube/config
61 | - KUBECONFIG=/root/.kube/config
62 | - FLOWIFY_AUTH_HANDLER=azure-oauth2-openid-token
63 | - FLOWIFY_AUTH_CONFIG_ISSUER=sandbox
64 | - FLOWIFY_AUTH_CONFIG_AUDIENCE=flowify
65 | - FLOWIFY_AUTH_CONFIG_KEYSURL=DISABLE_JWT_SIGNATURE_VERIFICATION
66 | healthcheck:
67 | test: curl -sL 127.0.0.1:8842 -o /dev/null || exit 1
68 | interval: 5s
69 | retries: 5
70 | start_period: 1s
71 | timeout: 30s
72 |
73 | networks:
74 | default:
75 | name: kind
76 | external: false
77 | driver: bridge
78 |
--------------------------------------------------------------------------------
/cmd/transpile/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "flag"
6 | "fmt"
7 | "os"
8 |
9 | "sigs.k8s.io/yaml"
10 |
11 | // "github.com/google/uuid"
12 |
13 | "github.com/equinor/flowify-workflows-server/models"
14 | "github.com/equinor/flowify-workflows-server/transpiler"
15 | log "github.com/sirupsen/logrus"
16 | )
17 |
18 | func myUsage() {
19 | fmt.Printf("Usage: %s [OPTIONS] filename\n", os.Args[0])
20 | flag.PrintDefaults()
21 | }
22 |
23 | func main() {
24 | log.SetLevel(log.InfoLevel)
25 |
26 | schemaFilePtr := flag.String("schema", "", "a file path")
27 | flag.Parse()
28 | flag.Usage = myUsage
29 | if flag.NArg() > 1 {
30 | flag.Usage()
31 | return
32 | }
33 |
34 | rawbytes, err := os.ReadFile(flag.Arg(0))
35 | if err != nil {
36 | log.Fatal(err.Error())
37 | return
38 | }
39 |
40 | schema := models.FindSchema(*schemaFilePtr)
41 | log.Infof("schema from: %s", *schemaFilePtr)
42 |
43 | if schema != nil {
44 | log.Infof("schema: %s", *schemaFilePtr)
45 | var v interface{}
46 | if err := json.Unmarshal(rawbytes, &v); err != nil {
47 | log.Fatalf(err.Error())
48 | }
49 |
50 | err := schema.Validate(v)
51 | if err != nil {
52 | b := fmt.Sprintf("%#v\n", err)
53 | log.Fatal(string(b))
54 | }
55 | log.Info("schema validates")
56 |
57 | }
58 | var job models.Job
59 | var workflow models.Workflow
60 | var component models.Component
61 |
62 | err = json.Unmarshal(rawbytes, &job)
63 | if err == nil && job.Type == models.ComponentType("job") {
64 | log.Info("Job in the input file.")
65 | } else {
66 | err = json.Unmarshal(rawbytes, &workflow)
67 | if err == nil && workflow.Type == "workflow" {
68 | log.Info("Workflow in the input file.")
69 | job = models.Job{Metadata: models.Metadata{Description: "Empty job from workflow"}, Type: "job", InputValues: nil, Workflow: workflow}
70 | } else {
71 | err = json.Unmarshal(rawbytes, &component)
72 | if err == nil {
73 | log.Info("Component in the input file.")
74 | workflow = models.Workflow{Metadata: models.Metadata{}, Component: component, Workspace: ""}
75 | job = models.Job{Metadata: models.Metadata{Description: "Empty job from component"}, Type: "job", InputValues: nil, Workflow: workflow}
76 | } else {
77 | log.Fatal("Can't convert file content to Job/Workflow/Component object.")
78 | }
79 | }
80 | }
81 |
82 | ajob, err := transpiler.GetArgoWorkflow(job)
83 | if err != nil {
84 | log.Fatal(err.Error())
85 | }
86 |
87 | outBytes, _ := yaml.Marshal(ajob)
88 | fmt.Print(string(outBytes))
89 | }
90 |
--------------------------------------------------------------------------------
/storage/storage.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/equinor/flowify-workflows-server/models"
9 | )
10 |
11 | type Pagination struct {
12 | Limit int
13 | Skip int
14 | }
15 |
16 | type ComponentClient interface {
17 | ListComponentsMetadata(ctx context.Context, pagination Pagination, filters []string, sorts []string) (models.MetadataList, error)
18 | ListComponentVersionsMetadata(ctx context.Context, id models.ComponentReference, pagination Pagination, sorts []string) (models.MetadataList, error)
19 | GetComponent(ctx context.Context, id interface{}) (models.Component, error)
20 | CreateComponent(ctx context.Context, node models.Component) error
21 | PutComponent(ctx context.Context, node models.Component) error
22 | PatchComponent(ctx context.Context, node models.Component, oldTimestamp time.Time) (models.Component, error)
23 |
24 | ListWorkflowsMetadata(ctx context.Context, pagination Pagination, filter []string, sorts []string) (models.MetadataWorkspaceList, error)
25 | ListWorkflowVersionsMetadata(ctx context.Context, id models.ComponentReference, pagination Pagination, sorts []string) (models.MetadataWorkspaceList, error)
26 | GetWorkflow(ctx context.Context, id interface{}) (models.Workflow, error)
27 | CreateWorkflow(ctx context.Context, node models.Workflow) error
28 | PutWorkflow(ctx context.Context, node models.Workflow) error
29 | PatchWorkflow(ctx context.Context, node models.Workflow, oldTimestamp time.Time) (models.Workflow, error)
30 |
31 | ListJobsMetadata(ctx context.Context, pagination Pagination, filter []string, sorts []string) (models.MetadataWorkspaceList, error)
32 | GetJob(ctx context.Context, id models.ComponentReference) (models.Job, error)
33 | CreateJob(ctx context.Context, node models.Job) error
34 |
35 | DeleteDocument(ctx context.Context, kind DocumentKind, id models.CRefVersion) (models.CRefVersion, error)
36 |
37 | AddJobEvents(ctx context.Context, id models.ComponentReference, events []models.JobEvent) error
38 | }
39 |
40 | var (
41 | ErrNotFound = fmt.Errorf("not found")
42 | ErrNoAccess = fmt.Errorf("no access")
43 | ErrNewerDocumentExists = fmt.Errorf("newer document exists")
44 | )
45 |
46 | type VolumeClient interface {
47 | ListVolumes(ctx context.Context, pagination Pagination, filters []string, sorts []string) (models.FlowifyVolumeList, error)
48 | GetVolume(ctx context.Context, id models.ComponentReference) (models.FlowifyVolume, error)
49 | PutVolume(ctx context.Context, vol models.FlowifyVolume) error
50 | DeleteVolume(ctx context.Context, id models.ComponentReference) error
51 | }
52 |
--------------------------------------------------------------------------------
/models/examples/two-node-graph-component.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "An single node graph component",
3 | "inputs": [
4 | { "name": "greeting", "mediatype": ["string"], "type": "parameter" },
5 | { "name": "sender", "mediatype": ["string"], "type": "parameter" }
6 | ],
7 | "type": "component",
8 | "implementation": {
9 | "type": "graph",
10 | "inputMappings": [
11 | {
12 | "source": { "port": "greeting" },
13 | "target": { "node": "greeter", "port": "greeting" }
14 | }
15 | ],
16 | "nodes": [
17 | {
18 | "id": "greeter-node",
19 | "node": {
20 | "description": "A brick component",
21 | "inputs": [
22 | {
23 | "name": "greeting",
24 | "mediatype": ["string"],
25 | "type": "parameter",
26 | "userdata": {"position":{"x":3,"y":6,"note":"save test without formating to avoid breaking the tests."}}
27 | }
28 | ],
29 | "type": "component",
30 | "implementation": {
31 | "type": "brick",
32 | "container": {
33 | "name": "containername",
34 | "image": "docker/whalesay",
35 | "command": ["cowsay"],
36 | "args": ["Hello Test!"]
37 | },
38 | "args": [
39 | {
40 | "source": "Hello static text.",
41 | "description": "A static argument"
42 | },
43 | {
44 | "source": { "port": "GREETING" },
45 | "target": { "type": "env", "name": "GREET" },
46 | "description": "A variable stored in env $(GREET) and expanded by k8s upon execution"
47 | }
48 | ]
49 | }
50 | }
51 | },
52 | {
53 | "id": "responder-node",
54 | "node": {
55 | "description": "A brick component",
56 | "inputs": [
57 | { "name": "sender", "mediatype": ["string"], "type": "parameter" }
58 | ],
59 | "type": "component",
60 | "implementation": {
61 | "type": "brick",
62 | "container": {
63 | "name": "containername",
64 | "image": "docker/whalesay",
65 | "command": ["cowsay"],
66 | "args": ["Hello there! From... "]
67 | },
68 | "args": [
69 | {
70 | "source": { "port": "sender" },
71 | "target": { "name": "from", "type": "parameter" }
72 | }
73 | ]
74 | }
75 | }
76 | }
77 | ]
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/auth/config.go:
--------------------------------------------------------------------------------
1 | package auth
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/MicahParks/keyfunc"
8 | "github.com/equinor/flowify-workflows-server/user"
9 | "github.com/mitchellh/mapstructure"
10 | "github.com/pkg/errors"
11 | log "github.com/sirupsen/logrus"
12 | )
13 |
14 | type AuthConfig struct {
15 | Handler string `mapstructure:"handler"`
16 | // the config is polymorphic based on the handler string
17 | Config map[string]interface{} `mapstructure:"config"`
18 | }
19 |
20 | type AzureConfig struct {
21 | Issuer string
22 | Audience string
23 | KeysUrl string
24 | }
25 |
26 | func NewAuthClientFromConfig(config AuthConfig) (AuthenticationClient, error) {
27 |
28 | switch config.Handler {
29 | case "azure-oauth2-openid-token":
30 | {
31 | var azData AzureConfig
32 | err := mapstructure.Decode(config.Config, &azData)
33 | if err != nil {
34 | return nil, errors.Wrapf(err, "could not decode AuthConfig: %v", config.Config)
35 | }
36 |
37 | opts := AzureTokenAuthenticatorOptions{}
38 | var jwks AzureKeyFunc
39 | if azData.KeysUrl == "DISABLE_JWT_SIGNATURE_VERIFICATION" {
40 | log.Warn("running the authenticator without signature verification is UNSAFE")
41 | opts.DisableVerification = true
42 | } else {
43 | // Create the JWKS from the resource at the given URL.
44 | JWKS, err := keyfunc.Get(azData.KeysUrl, keyfunc.Options{
45 | // best practices for azure key roll-over: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-signing-key-rollover
46 | RefreshInterval: time.Hour * 24,
47 | RefreshRateLimit: time.Minute * 5,
48 | // when encountering a "new" key id, allow immediate refresh (rate limited)
49 | RefreshUnknownKID: true,
50 | // make sure errors make it into the log
51 | RefreshErrorHandler: func(err error) { log.Error("jwks refresh error:", err) },
52 | })
53 | if err != nil {
54 | return nil, errors.Wrap(err, "failed to get the JWKS")
55 | }
56 | jwks = JWKS.Keyfunc
57 | }
58 | return AzureTokenAuthenticator{Issuer: azData.Issuer, Audience: azData.Audience, KeyFunc: jwks, Options: opts}, nil
59 | }
60 |
61 | case "disabled-auth":
62 | {
63 | var muser user.MockUser
64 | err := mapstructure.Decode(config.Config, &muser)
65 | if err != nil {
66 | return nil, errors.Wrapf(err, "could not decode AuthConfig: %v", config.Config)
67 | }
68 | log.Warn("flowify using no authentication and static dummy-authorization: User = ", muser)
69 |
70 | return MockAuthenticator{
71 | User: muser,
72 | }, nil
73 | }
74 | default:
75 | {
76 | return nil, fmt.Errorf("auth handler (%s) not supported", config.Handler)
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/e2etest/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eu -o pipefail
4 |
5 | function cleanup {
6 | set +e # Continue cleaning up if there is an issue
7 |
8 | printf 'Test shell script cleanup\n'
9 |
10 | kubectl delete ns test --ignore-not-found
11 | kubectl delete ns test-no-access --ignore-not-found
12 |
13 | # Quit the Argo conroller
14 |
15 | argopid=$(ps -ef | grep [w]orkflow-controller | tr -s ' '| cut -f 2 -d ' ')
16 |
17 | if [[ ! -z ${argopid} ]]; then
18 | kill $argopid
19 | fi
20 |
21 | # Quit the Flowify server
22 | flowifypid=$(ps -ef | grep [f]lowify-server | tr -s ' '| cut -f 2 -d ' ')
23 |
24 | if [[ ! -z ${flowifypid} ]]; then
25 | kill $flowifypid
26 | fi
27 |
28 | }
29 |
30 | trap cleanup EXIT
31 |
32 | pushd ..
33 |
34 | # Start a kubernetes cluster
35 | minikube start
36 | kubectl create namespace test --dry-run=client -o yaml | kubectl apply -f -
37 | kubectl create namespace test-no-access --dry-run=client -o yaml | kubectl apply -f -
38 |
39 | # Inject the default service account with the corresponding roles
40 | kubectl apply -f e2etest/default-roles.yaml
41 |
42 | # Copy artifact configmap to test namespace
43 | kubectl get cm artifact-repositories --namespace=argo -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=test -f -
44 | kubectl get secret my-minio-cred --namespace=argo -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=test -f -
45 |
46 | # Launch the flowify server
47 | export KUBERNETES_SERVICE_HOST=$(kubectl config view --minify | grep server | cut -f 3 -d "/" | cut -d ":" -f 1)
48 | export KUBERNETES_SERVICE_PORT=$(kubectl config view --minify | grep server | cut -f 4 -d ":")
49 |
50 | export FLOWIFY_MONGO_ADDRESS=localhost
51 | export FLOWIFY_MONGO_PORT=27017
52 |
53 | ./build/flowify-workflows-server -v 7 > /dev/null 2>& 1 &
54 |
55 | # Prints the PID of the flowify server so we can hook-up a debugger
56 | ps -ef | grep [f]lowify-server | tr -s ' ' | cut -f 2 -d ' '
57 |
58 | # Start a MongoDB server
59 | docker container start $(docker container ls --all | grep mongo | cut -f 1 -d ' ') > /dev/null 2>& 1
60 |
61 | cd $GOPATH
62 | controller=$(find . -wholename "*/dist/workflow-controller")
63 |
64 | # Launch the Argo controller
65 | PNS_PRIVILEGED=true DEFAULT_REQUEUE_TIME=100ms LEADER_ELECTION_IDENTITY=local ALWAYS_OFFLOAD_NODE_STATUS=false OFFLOAD_NODE_STATUS_TTL=30s WORKFLOW_GC_PERIOD=30s UPPERIO_DB_DEBUG=0 ARCHIVED_WORKFLOW_GC_PERIOD=30s $controller --executor-image argoproj/argoexec:v3.1.13 --namespaced=true --namespace test > /dev/null 2>& 1 &
66 |
67 | popd
68 |
69 | unset KUBERNETES_SERVICE_HOST
70 | unset KUBERNETES_SERVICE_PORT
71 |
72 | # Run all e2e tests (the tests in this directory)
73 | go test .
74 |
75 |
76 |
--------------------------------------------------------------------------------
/sandbox/start.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #set +x
3 | set -eu -o pipefail
4 |
5 | # Start a kubernetes cluster
6 | minikube start
7 |
8 | # Inject the default service account with the corresponding roles
9 | configns=sandbox-config
10 | kubectl apply -f sandbox-config.yaml
11 |
12 | # Copy artifact configmap to test namespace
13 |
14 | # Launch the flowify server
15 | export KUBERNETES_SERVICE_HOST=$(kubectl config view --minify | grep server | cut -f 3 -d "/" | cut -d ":" -f 1)
16 | export KUBERNETES_SERVICE_PORT=$(kubectl config view --minify | grep server | cut -f 4 -d ":")
17 |
18 | export FLOWIFY_MONGO_ADDRESS=localhost
19 | export FLOWIFY_MONGO_PORT=27017
20 |
21 | killall flowify-workflows-server -vq || printf "flowify-workflows-server not running, restarting\n"
22 | ../build/flowify-workflows-server -flowify-auth azure-oauth2-openid-token -namespace $configns & #> /tmp/test.out 2>& 1 &
23 | printf "flowify-workflows-server started: $!\n" # Prints the PID of the flowify server so we can hook-up a debugger
24 |
25 | # Start a MongoDB server
26 | docker container start $(docker container ls --all | grep mongo | cut -f 1 -d ' ') > /dev/null 2>& 1
27 |
28 | pushd $GOPATH > /dev/null
29 | # assumes argo workflow-controller is installed in the same tree
30 | controller=$(find . -wholename "*/dist/workflow-controller" | xargs realpath)
31 | printf "Controller: $controller \n"
32 | ARGO_CONTROLLER_VERSION=$($controller version 2> /dev/null)
33 | printf "Version info: \n"
34 | printf "$ARGO_CONTROLLER_VERSION\n"
35 |
36 | popd > /dev/null
37 |
38 | red=$(tput setaf 1)
39 | green=$(tput setaf 2)
40 | blue=$(tput setaf 33)
41 | normal=$(tput sgr0)
42 |
43 | # Launch the Argo controller, and check that versions match the executor
44 | killall ${controller##*/} -vq || printf "${controller##*/} not running, restarting\n"
45 | ARGO_EXECUTOR_VERSION=v3.2.3
46 | if [[ "$ARGO_CONTROLLER_VERSION" == *"$ARGO_EXECUTOR_VERSION"* ]]; then
47 | printf "${green}Argo controller/executor version check passed.${normal}\n"
48 | else
49 | printf "${red}Argo controller/executor version check failed:${normal}\n"
50 | printf "$ARGO_CONTROLLER_VERSION ${red}does not match '${normal}$ARGO_EXECUTOR_VERSION'.\n"
51 | printf "${green}Either checkout the local controller at version matching ${normal}'${ARGO_EXECUTOR_VERSION}'${green},\n"
52 | printf "or update the variable ${normal}'ARGO_EXECUTOR_VERSION'${green} in this script.${normal}\n\n"
53 | exit 1
54 | fi
55 |
56 | PNS_PRIVILEGED=true DEFAULT_REQUEUE_TIME=100ms LEADER_ELECTION_IDENTITY=local ALWAYS_OFFLOAD_NODE_STATUS=false OFFLOAD_NODE_STATUS_TTL=30s WORKFLOW_GC_PERIOD=30s UPPERIO_DB_DEBUG=0 ARCHIVED_WORKFLOW_GC_PERIOD=30s $controller --executor-image argoproj/argoexec:$ARGO_EXECUTOR_VERSION --namespaced=false > /dev/null 2>& 1 &
57 | printf "workflow-controller started: $!\n"
58 |
59 |
--------------------------------------------------------------------------------
/pkg/secret/secret_test.go:
--------------------------------------------------------------------------------
1 | package secret
2 |
3 | import (
4 | "context"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/client-go/kubernetes/fake"
10 | )
11 |
12 | const (
13 | workspaceName = "dummy-workspace"
14 | workspaceName2 = "dummy-workspace2"
15 | )
16 |
17 | func Test_SecretClientRoundTrip(t *testing.T) {
18 | clientSet := fake.NewSimpleClientset()
19 | client := NewSecretClient(clientSet)
20 | ctx := context.TODO()
21 |
22 | err := client.AddSecretKey(ctx, workspaceName, "key1", "value1")
23 | require.NoError(t, err)
24 |
25 | err = client.AddSecretKey(ctx, workspaceName, "key2", "value2")
26 | require.NoError(t, err)
27 |
28 | err = client.AddSecretKey(ctx, workspaceName, "key3", "value3")
29 | require.NoError(t, err)
30 |
31 | err = client.AddSecretKey(ctx, workspaceName2, "key3", "value1")
32 | require.NoError(t, err)
33 |
34 | err = client.AddSecretKey(ctx, workspaceName2, "key4", "value4")
35 | require.NoError(t, err)
36 |
37 | keys, err := client.ListAvailableKeys(ctx, workspaceName)
38 | require.NoError(t, err)
39 |
40 | require.Len(t, keys, 3)
41 | require.ElementsMatch(t, keys, []string{"key1", "key2", "key3"})
42 |
43 | lst1, err := clientSet.CoreV1().Secrets(workspaceName).List(ctx, metav1.ListOptions{})
44 | require.NoError(t, err)
45 | require.Len(t, lst1.Items, 1)
46 |
47 | lst2, err := clientSet.RbacV1().Roles(workspaceName).List(ctx, metav1.ListOptions{})
48 | require.NoError(t, err)
49 | require.Len(t, lst2.Items, 1)
50 |
51 | lst3, err := clientSet.RbacV1().RoleBindings(workspaceName).List(ctx, metav1.ListOptions{})
52 | require.NoError(t, err)
53 | require.Len(t, lst3.Items, 1)
54 | }
55 |
56 | func Test_SecretDelete(t *testing.T) {
57 | clientSet := fake.NewSimpleClientset()
58 | client := NewSecretClient(clientSet)
59 | ctx := context.TODO()
60 |
61 | // Fill db with dummy values
62 | for _, key := range []string{"key1", "key2"} {
63 | require.NoError(t, client.AddSecretKey(ctx, workspaceName, key, "value1"))
64 | }
65 |
66 | require.NoError(t, client.DeleteSecretKey(ctx, workspaceName, "key1"))
67 |
68 | secret, err := clientSet.CoreV1().Secrets(workspaceName).Get(ctx, DefaultObjectName, metav1.GetOptions{})
69 | require.NoError(t, err)
70 |
71 | require.Len(t, secret.Data, 1, "key1 should be removed")
72 | require.Equal(t, "value1", string(secret.Data["key2"]), "key2 should be unaffected")
73 |
74 | require.Error(t, client.DeleteSecretKey(ctx, workspaceName, "key1"), "Delete previously existing key")
75 | require.NoError(t, client.DeleteSecretKey(ctx, workspaceName, "key2"), "Delete remaining key")
76 |
77 | secret, err = clientSet.CoreV1().Secrets(workspaceName).Get(ctx, DefaultObjectName, metav1.GetOptions{})
78 | require.NoError(t, err)
79 |
80 | require.Len(t, secret.Data, 0, "Both keys deleted, secret should be empty")
81 | }
82 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .DEFAULT_GOAL := all
2 |
3 | # Make sure we inject a sha into the binary, if available
4 | ifndef flowify_git_sha
5 | flowify_git_sha=$(shell git rev-parse --short HEAD)
6 | $(info Set flowify_git_sha=$(flowify_git_sha) from git rev-parse /)
7 | else
8 | $(info Set flowify_git_sha=$(flowify_git_sha) from arg /)
9 | endif
10 |
11 | SRCS := $(shell find . -name "*.go" -not -path "./vendor/*" -not -path "./test/*" ! -name '*_test.go' -not -path "./mock/*")
12 |
13 | ifdef strip
14 | STRIP=strip
15 | else
16 | STRIP=true
17 | endif
18 |
19 | all: server
20 |
21 | server: build/flowify-workflows-server
22 |
23 | build/flowify-workflows-server: $(SRCS)
24 | CGO_ENABLED=0 go build -v -o $@ -ldflags "-X 'github.com/equinor/flowify-workflows-server/apiserver.CommitSHA=$(flowify_git_sha)' -X 'github.com/equinor/flowify-workflows-server/apiserver.BuildTime=$(shell date -Is)'"
25 | $(STRIP) $@
26 |
27 | init:
28 | git config core.hooksPath .githooks
29 |
30 | clean:
31 | @go clean
32 | @rm -rf build
33 | @rm -rf docs/*.json
34 | @rm -rf docs/*.yaml
35 |
36 | TEST_OUTPUT_DIR = ./testoutputs
37 |
38 | # exclude slow e2e tests depending on running server infrastructure
39 | # define the UNITTEST_COVERAGE variable to output coverage
40 | unittest:
41 | ifdef UNITTEST_COVERAGE
42 | mkdir -p $(TEST_OUTPUT_DIR)
43 | rm -f pipe1
44 | mkfifo pipe1
45 | (tee $(TEST_OUTPUT_DIR)/unittest.log | go-junit-report > $(TEST_OUTPUT_DIR)/report.xml) < pipe1 &
46 | go test $(UNITTEST_FLAGS) `go list ./... | grep -v e2etest` -covermode=count -coverprofile=coverage.out -ldflags "-X 'github.com/equinor/flowify-workflows-server/apiserver.CommitSHA=$(flowify_git_sha)' -X 'github.com/equinor/flowify-workflows-server/apiserver.BuildTime=$(shell date -Is)'" 2>&1 -v > pipe1
47 | gcov2lcov -infile=coverage.out -outfile=$(TEST_OUTPUT_DIR)/coverage.lcov
48 | else
49 | go test $(UNITTEST_FLAGS) `go list ./... | grep -v e2etest`
50 | endif
51 |
52 | e2etest: server
53 | $(MAKE) -C e2etest all flowify_git_sha=$(flowify_git_sha)
54 |
55 | test: unittest e2etest
56 |
57 | # the docker tests run the unittests and e2etest in a dockerized environment
58 |
59 | docker_unittest:
60 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f docker-compose-tests.yaml build
61 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f docker-compose-tests.yaml up --exit-code-from app
62 |
63 |
64 | docker_e2e_build:
65 | # build base services
66 | docker-compose -f dev/docker-compose.yaml build
67 | # build composed testrunner image
68 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f dev/docker-compose.yaml -f dev/docker-compose-e2e.yaml build flowify-e2e-runner
69 |
70 |
71 | docker_e2e_test: docker_e2e_build
72 | # explicit 'up' means we stop (but don't remove) containers afterwards
73 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f dev/docker-compose.yaml -f dev/docker-compose-e2e.yaml up --timeout 5 --exit-code-from flowify-e2e-runner cluster mongo flowify-e2e-runner
74 |
75 | docker_e2e_test_run: docker_e2e_build
76 | # explicit 'run' means we dont stop other containers afterwards
77 | FLOWIFY_GIT_SHA=$(flowify_git_sha) docker-compose -f dev/docker-compose.yaml -f dev/docker-compose-e2e.yaml run --rm flowify-e2e-runner
78 |
79 |
80 | .PHONY: all server init clean test docker_unittest e2etest
81 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yaml:
--------------------------------------------------------------------------------
1 | name: Deploy
2 | on:
3 | workflow_dispatch: {}
4 | push:
5 | paths-ignore:
6 | - ".github/**"
7 | - "dev/**"
8 | tags: '*'
9 | branches:
10 | - main
11 | env:
12 | PROJECT: flowify
13 |
14 | jobs:
15 | build:
16 | name: Push image
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: Checkout
20 | uses: actions/checkout@v2
21 | with:
22 | fetch-depth: 0
23 | - name: Login to the dev container registry
24 | run: |
25 | echo "${{ secrets.DOCKER_PASSWORD }}" \
26 | | docker login --username "${{ secrets.DOCKER_USERNAME }}" \
27 | --password-stdin \
28 | auroradevacr.azurecr.io
29 | - name: Build dev image
30 | uses: docker/build-push-action@v2
31 | with:
32 | labels: |
33 | com.equinor.aurora.project=${{ env.PROJECT }}
34 | org.opencontainers.image.created=${{ env.DATE }}
35 | org.opencontainers.image.revision=${{ github.sha }}
36 | tags: auroradevacr.azurecr.io/flowify/flowify-workflows-server:${{ github.sha }}
37 | build-args: FLOWIFY_GIT_SHA=${{ github.sha }}
38 | push: true
39 | - name: Logout from the container registry
40 | run: |
41 | docker logout
42 | - name: Login to the prod container registry
43 | run: |
44 | echo "${{ secrets.DOCKER_PASSWORD }}" \
45 | | docker login --username "${{ secrets.DOCKER_USERNAME }}" \
46 | --password-stdin \
47 | auroraprodacr.azurecr.io
48 | - name: Build prod image
49 | uses: docker/build-push-action@v2
50 | with:
51 | labels: |
52 | com.equinor.aurora.project=${{ env.PROJECT }}
53 | org.opencontainers.image.created=${{ env.DATE }}
54 | org.opencontainers.image.revision=${{ github.sha }}
55 | tags: auroraprodacr.azurecr.io/flowify/flowify-workflows-server:${{ github.sha }}
56 | build-args: FLOWIFY_GIT_SHA=${{ github.sha }}
57 | push: true
58 | - name: Logout from the container registry
59 | run: |
60 | docker logout
61 | deploy:
62 | name: Update deployment
63 | runs-on: ubuntu-latest
64 | needs: build
65 | env:
66 | EMAIL: ${{ github.event.head_commit.author.email }}
67 | NAME: ${{ github.event.head_commit.author.name }}
68 | steps:
69 | - name: Checkout infra
70 | uses: actions/checkout@v2
71 | with:
72 | ref: main
73 | repository: equinor/flowify-infrastructure
74 | ssh-key: ${{ secrets.FLOWIFY_INFRA_DEPLOY_KEY }}
75 | - name: Update infra
76 | run: |
77 | SHA_SHORT=$(echo ${{ github.sha }} | cut -c1-8)
78 | SHA_LONG=${{ github.sha }}
79 | git config --global user.email "${EMAIL}"
80 | git config --global user.name "GitHub Actions (${NAME})"
81 | sed -i "s/imageTag:.*/imageTag: $SHA_LONG/g" kube/server/values-dev.yaml
82 | git add kube/server/values-dev.yaml
83 | git commit --message "GHA: Update development imageTag" || true
84 | git push
85 |
--------------------------------------------------------------------------------
/e2etest/secret_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "net/http"
7 |
8 | wf "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
9 | )
10 |
11 | func ignore[T any](T) {}
12 | func (s *e2eTestSuite) Test_SecretHandling_live_system() {
13 | requestor := make_requestor(s.client)
14 | ignore(requestor)
15 | // Push some secrets
16 |
17 | type SecretField struct {
18 | Key string `json:"key"`
19 | Value string `json:"value"`
20 | }
21 |
22 | type SecretFieldList struct {
23 | Items []SecretField `json:"items"`
24 | }
25 |
26 | payload_obj1 := SecretFieldList{
27 | Items: []SecretField{
28 | {Key: "key1", Value: "val1"},
29 | {Key: "key2", Value: "val2"},
30 | {Key: "fake_key1", Value: "dummyload"}},
31 | }
32 |
33 | ignore(payload_obj1)
34 | /*
35 |
36 | // There is no check on write, so this is legit. But the token cannot read it back again...
37 |
38 | payload_obj2 := secret.SecretFieldList{
39 | Items: []secret.SecretField{secret.SecretField{"key1", "val1"}, secret.SecretField{"key2", "val2"}, secret.SecretField{"fake_key1", "valX"}}}
40 |
41 | workspaces := []string{"test", "test-no-access", "not-existing-workspace"}
42 | statuses := []int{http.StatusCreated, http.StatusForbidden, http.StatusNotFound}
43 |
44 | for i, obj := range []secret.SecretFieldList{payload_obj1, payload_obj2, payload_obj2} {
45 | payload_json, err := json.Marshal(obj)
46 | s.NoError(err)
47 | resp, err := requestor("http://localhost:8842/api/v1/secrets/"+workspaces[i], http.MethodPost, string(payload_json))
48 | s.NoError(err)
49 | s.Equal(statuses[i], resp.StatusCode)
50 | }
51 |
52 | // Read back available fields
53 | resp, err := requestor("http://localhost:8842/api/v1/secrets/test", http.MethodGet, "")
54 |
55 | s.NoError(err)
56 | s.Equal(http.StatusOK, resp.StatusCode)
57 |
58 | var list secret.SecretKeyList
59 | marshalResponse(resp, &list)
60 |
61 | s.ElementsMatch(list.Keys, []string{"key1", "key2", "fake_key1"})
62 |
63 | // Run a workflow with valid secret access
64 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test", http.MethodPost, mockdata.WorkflowWithSecret)
65 |
66 | s.NoError(err)
67 | s.Equal(http.StatusOK, resp.StatusCode)
68 |
69 | checkLogMessage(s, "wfwithsecret", "dummyload")
70 |
71 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/wfwithsecret", http.MethodDelete, "")
72 | s.NoError(err)
73 | s.Equal(http.StatusOK, resp.StatusCode)
74 | */
75 | }
76 |
77 | func checkLogMessage(s *e2eTestSuite, wfName, expectedMessage string) {
78 | requestor := make_requestor(s.client)
79 | resp, err := requestor("http://localhost:8842/api/v1/workflows/test/"+wfName+"/log?logOptions.container=main&logOptions.follow=true", http.MethodGet, "")
80 | s.NoError(err)
81 | s.Equal(http.StatusOK, resp.StatusCode)
82 | s.Equal("text/event-stream", resp.Header.Get("Content-Type"))
83 |
84 | buf_log := new(bytes.Buffer)
85 | buf_log.ReadFrom(resp.Body)
86 | buf_log.Next(6) // remove data prefix
87 | var objmap map[string]json.RawMessage
88 |
89 | err = json.Unmarshal(buf_log.Bytes(), &objmap)
90 | s.NoError(err)
91 |
92 | var entry wf.LogEntry
93 | s.NoError(json.Unmarshal(objmap["result"], &entry))
94 | s.Equal(expectedMessage, entry.Content)
95 | }
96 |
--------------------------------------------------------------------------------
/sandbox/sandbox-config.yaml:
--------------------------------------------------------------------------------
1 | # Flowify needs a namespace to store config maps.
2 | # No other data will be stored in the config ns
3 |
4 | ---
5 | # Namespace
6 | apiVersion: v1
7 | kind: Namespace
8 | metadata:
9 | labels:
10 | app.kubernetes.io/part-of: "flowify"
11 | name: "sandbox-config"
12 |
13 |
14 | # Each workspace consists of a namespace
15 | # and a config map (stored in the config namespace)
16 |
17 | ---
18 | # Namespace 'sandbox-project-a'
19 | apiVersion: v1
20 | kind: Namespace
21 | metadata:
22 | labels:
23 | app.kubernetes.io/part-of: "flowify"
24 | name: "sandbox-project-a"
25 |
26 | ---
27 | # Developer workspace environment
28 | apiVersion: v1
29 | kind: ConfigMap
30 | metadata:
31 | labels:
32 | app.kubernetes.io/component: "workspace-config"
33 | app.kubernetes.io/part-of: "flowify"
34 | name: "sandbox-project-a"
35 | # config lookup via command-line flag `namespace`, default to test
36 | namespace: "sandbox-config"
37 | data:
38 | roles: "[[\"sandbox-developer\"]]"
39 |
40 |
41 |
42 | ---
43 | # Namespace 'sandbox-project-b'
44 | apiVersion: v1
45 | kind: Namespace
46 | metadata:
47 | labels:
48 | app.kubernetes.io/part-of: "flowify"
49 | name: "sandbox-project-b"
50 |
51 | ---
52 | # Workdspce/project
53 | apiVersion: v1
54 | kind: ConfigMap
55 | metadata:
56 | labels:
57 | app.kubernetes.io/component: "workspace-config"
58 | app.kubernetes.io/part-of: "flowify"
59 | name: "sandbox-project-b"
60 | # config lookup via command-line flag `namespace`, default to test
61 | namespace: "sandbox-config"
62 | data:
63 | roles: "[\"sandbox-admin\"]"
64 |
65 | ---
66 | # Role for sandbox-project-a
67 | apiVersion: rbac.authorization.k8s.io/v1
68 | kind: Role
69 | metadata:
70 | name: workflow-role
71 | namespace: sandbox-project-a
72 | rules:
73 | - apiGroups:
74 | - ""
75 | resources:
76 | - pods
77 | verbs:
78 | - get
79 | - watch
80 | - patch
81 | - apiGroups:
82 | - ""
83 | resources:
84 | - pods/log
85 | verbs:
86 | - get
87 | - watch
88 | - apiGroups:
89 | - ""
90 | resources:
91 | - pods/exec
92 | verbs:
93 | - create
94 | - apiGroups:
95 | - ""
96 | resources:
97 | - configmaps
98 | verbs:
99 | - create
100 | - get
101 | - update
102 | - apiGroups:
103 | - argoproj.io
104 | resources:
105 | - workflows
106 | verbs:
107 | - create
108 | - get
109 | - apiGroups:
110 | - argoproj.io
111 | resources:
112 | - workflowtasksets
113 | - workflowtasksets/finalizers
114 | verbs:
115 | - list
116 | - watch
117 | - get
118 | - update
119 | - patch
120 |
121 |
122 | ---
123 | # Role binding for sandbox-project-a
124 | apiVersion: rbac.authorization.k8s.io/v1
125 | kind: RoleBinding
126 | metadata:
127 | name: workflow-project-a-binding
128 | namespace: sandbox-project-a
129 | roleRef:
130 | apiGroup: rbac.authorization.k8s.io
131 | kind: Role
132 | name: workflow-role
133 | subjects:
134 | - kind: ServiceAccount
135 | name: default
136 | namespace: sandbox-project-a
137 |
138 | ---
139 | # Role descriptions
140 | apiVersion: v1
141 | kind: ConfigMap
142 | metadata:
143 | labels:
144 | app.kubernetes.io/part-of: "flowify"
145 | name: "role-descriptions"
146 | namespace: "sandbox-config"
147 | data:
148 | "sandbox-developer": "Need to play in the sandbox"
149 | "sandbox-admin": "Required for God-mode"
150 |
--------------------------------------------------------------------------------
/storage/references.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "context"
5 | "reflect"
6 |
7 | "github.com/equinor/flowify-workflows-server/models"
8 | "github.com/pkg/errors"
9 | )
10 |
11 | func drefComponent(ctx context.Context, client ComponentClient, cmp interface{}) (models.Component, error) {
12 | switch v := cmp.(type) {
13 | case models.Component:
14 | out, _ := cmp.(models.Component)
15 | return out, nil
16 | case models.ComponentReference:
17 | id := models.CRefVersion{Uid: cmp.(models.ComponentReference)}
18 | out, err := client.GetComponent(ctx, id)
19 | return out, err
20 | case models.CRefVersion:
21 | out, err := client.GetComponent(ctx, cmp.(models.CRefVersion))
22 | return out, err
23 | default:
24 | return models.Component{}, errors.Errorf("Cannot convert to component object. Incorect type: %s", v)
25 | }
26 | }
27 |
28 | func drefNode(ctx context.Context, client ComponentClient, node *models.Node) (models.Node, error) {
29 | switch v := node.Node.(type) {
30 | case models.Component:
31 | return *node, nil
32 | case models.ComponentReference:
33 | cmp, err := drefComponent(ctx, client, node.Node.(models.ComponentReference))
34 | if err != nil {
35 | return models.Node{}, errors.Wrapf(err, "Cannot dereference node, id: %s", node.Id)
36 | }
37 | obj := models.Node{Id: node.Id, Node: cmp}
38 | return obj, nil
39 | case models.CRefVersion:
40 | cmp, err := drefComponent(ctx, client, node.Node.(models.CRefVersion))
41 | if err != nil {
42 | return models.Node{}, errors.Wrapf(err, "Cannot dereference node, id: %s", node.Id)
43 | }
44 | obj := models.Node{Id: node.Id, Node: cmp}
45 | return obj, nil
46 | default:
47 | return models.Node{}, errors.Errorf("Cannot convert to component object. Incorect node type: %s", v)
48 | }
49 | }
50 |
51 | func traverseComponent(ctx context.Context, client ComponentClient, cmp models.Component) (models.Component, error) {
52 |
53 | switch impCmp := cmp.Implementation.(type) {
54 | case models.Graph:
55 | new_nodes := []models.Node{}
56 | for _, node := range impCmp.Nodes {
57 | sub, err := drefNode(ctx, client, &node)
58 | if err != nil {
59 | return models.Component{}, err
60 | }
61 | _, ok := sub.Node.(models.Component)
62 | if ok {
63 | nc, err := traverseComponent(ctx, client, sub.Node.(models.Component))
64 | if err != nil {
65 | return models.Component{}, err
66 | }
67 | sub.Node = nc
68 | }
69 | new_nodes = append(new_nodes, sub)
70 | }
71 | impCmp.Nodes = new_nodes
72 | cmp.Implementation = impCmp
73 | case models.Map:
74 | newNode, err := drefComponent(ctx, client, impCmp.Node)
75 | if err != nil {
76 | return models.Component{}, err
77 | }
78 | nc, err := traverseComponent(ctx, client, newNode)
79 | if err != nil {
80 | return models.Component{}, err
81 | }
82 | newNode = nc
83 | impCmp.Node = newNode
84 | cmp.Implementation = impCmp
85 | case models.Brick:
86 | // no subcomponents for dereference
87 | default:
88 | return models.Component{}, errors.Errorf("Dereference of type '%s' is not implemented.", reflect.TypeOf(impCmp))
89 | }
90 | return cmp, nil
91 | }
92 |
93 | func DereferenceComponent(ctx context.Context, client ComponentClient, cmp interface{}) (models.Component, error) {
94 | out, err := drefComponent(ctx, client, cmp)
95 | if err != nil {
96 | return models.Component{}, err
97 | }
98 | out, err = traverseComponent(ctx, client, out)
99 | if err != nil {
100 | return models.Component{}, err
101 | }
102 | return out, err
103 | }
104 |
--------------------------------------------------------------------------------
/storage/local.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/equinor/flowify-workflows-server/models"
8 |
9 | "github.com/pkg/errors"
10 | )
11 |
12 | type LocalStorageClientImpl struct {
13 | c_store map[models.ComponentReference]models.Component
14 | wf_store map[models.ComponentReference]models.Workflow
15 | job_store map[models.ComponentReference]models.Job
16 | }
17 |
18 | func NewLocalNodeStorageClient() *LocalStorageClientImpl {
19 | return &LocalStorageClientImpl{c_store: make(map[models.ComponentReference]models.Component),
20 | wf_store: make(map[models.ComponentReference]models.Workflow)}
21 | }
22 |
23 | // Component storage impl
24 |
25 | func (c *LocalStorageClientImpl) CreateComponent(ctx context.Context, node models.Component, workspace string) error {
26 | key := node.Metadata.Uid
27 |
28 | c.c_store[key] = node
29 |
30 | return nil
31 | }
32 |
33 | func (c *LocalStorageClientImpl) GetComponent(ctx context.Context, id models.ComponentReference) (models.Component, error) {
34 | v, ok := c.c_store[id]
35 |
36 | if !ok {
37 | return models.Component{}, errors.New(fmt.Sprintf("component %s not found", id))
38 | }
39 |
40 | return v, nil
41 | }
42 |
43 | func (c *LocalStorageClientImpl) ListComponentsMetadata(ctx context.Context, pagination Pagination, workspaceFilter []string) ([]models.Metadata, error) {
44 | res := []models.Metadata{}
45 |
46 | pos := 0
47 | i := 0
48 | for _, v := range c.c_store {
49 | if pos >= pagination.Skip && i < pagination.Limit {
50 | res = append(res, v.Metadata)
51 | i++
52 | }
53 | pos++
54 | }
55 |
56 | return res, nil
57 | }
58 |
59 | // Workflow storage impl
60 |
61 | func (c *LocalStorageClientImpl) CreateWorkflow(ctx context.Context, node models.Workflow) error {
62 | key := node.Metadata.Uid
63 |
64 | c.wf_store[key] = node
65 |
66 | return nil
67 | }
68 |
69 | func (c *LocalStorageClientImpl) GetWorkflow(ctx context.Context, id models.ComponentReference) (models.Workflow, error) {
70 | v, ok := c.wf_store[id]
71 |
72 | if !ok {
73 | return models.Workflow{}, errors.New(fmt.Sprintf("component %s not found", id))
74 | }
75 |
76 | return v, nil
77 | }
78 |
79 | func (c *LocalStorageClientImpl) ListWorkflowsMetadata(ctx context.Context, pagination Pagination, workspaceFilter []string) ([]models.Metadata, error) {
80 | res := []models.Metadata{}
81 |
82 | pos := 0
83 | i := 0
84 | for _, v := range c.wf_store {
85 | if pos >= pagination.Skip && i < pagination.Limit {
86 | res = append(res, v.Metadata)
87 | i++
88 | }
89 | pos++
90 | }
91 |
92 | return res, nil
93 | }
94 |
95 | // jobs
96 | func (c *LocalStorageClientImpl) GetJob(ctx context.Context, id models.ComponentReference) (models.Job, error) {
97 | v, ok := c.job_store[id]
98 |
99 | if !ok {
100 | return models.Job{}, errors.New(fmt.Sprintf("job %s not found", id))
101 | }
102 |
103 | return v, nil
104 | }
105 |
106 | func (c *LocalStorageClientImpl) CreateJob(ctx context.Context, node models.Job) error {
107 | key := node.Metadata.Uid
108 |
109 | c.job_store[key] = node
110 |
111 | return nil
112 | }
113 |
114 | func (c *LocalStorageClientImpl) ListJobsMetadata(ctx context.Context, pagination Pagination, workspaceFilter []string) ([]models.Metadata, error) {
115 | res := []models.Metadata{}
116 |
117 | pos := 0
118 | i := 0
119 | for _, v := range c.job_store {
120 | if pos >= pagination.Skip && i < pagination.Limit {
121 | res = append(res, v.Metadata)
122 | i++
123 | }
124 | pos++
125 | }
126 |
127 | return res, nil
128 | }
129 |
--------------------------------------------------------------------------------
/storage/parsequery_test.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | "time"
7 |
8 | "github.com/sirupsen/logrus"
9 | "github.com/stretchr/testify/assert"
10 | "go.mongodb.org/mongo-driver/bson"
11 | )
12 |
13 | func init() {
14 |
15 | }
16 |
17 | func Test_SortParse(t *testing.T) {
18 | makeErr := func(query string, no int, part string) error {
19 | // just copied from the internal function
20 | return fmt.Errorf("cannot parse sort (%d:%s) from query (%s)", no, part, query)
21 | }
22 | emptyErr := makeErr("", 0, "")
23 |
24 | var testCases = []struct {
25 | Name string // name
26 | Query string // input
27 | ExpectedError error // expected error
28 | ExpectedResult bson.D // expected return
29 | }{
30 | {"Empty query", "", emptyErr, bson.D{}},
31 | {"Simple ascending", "+timestamp", nil, bson.D{bson.E{Key: "timestamp", Value: int(ASC)}}},
32 | {"No ascending", "timestamp", makeErr("timestamp", 0, "timestamp"), bson.D{}},
33 | {"Bad direction", ".timestamp", makeErr(".timestamp", 0, ".timestamp"), bson.D{}},
34 | {"Simple descending", "-timestamp", nil, bson.D{bson.E{Key: "timestamp", Value: int(DESC)}}},
35 | {"Double ascending", "+modifiedBy,+timestamp", nil, bson.D{bson.E{Key: "modifiedBy", Value: int(ASC)}, bson.E{Key: "timestamp", Value: int(ASC)}}},
36 | {"Trailing comma", "+modifiedBy,", makeErr("+modifiedBy,", 1, ""), bson.D{}},
37 | {"Double error", "+modifiedBy+timestamp", makeErr("+modifiedBy+timestamp", 0, "+modifiedBy+timestamp"), bson.D{}},
38 | }
39 |
40 | for _, test := range testCases {
41 | t.Run(test.Name, func(t *testing.T) {
42 | logrus.Info("Test: ", test.Name, ": ", test.Query)
43 |
44 | res, err := parse_sort_query(test.Query)
45 |
46 | assert.Equal(t, test.ExpectedResult, res)
47 | assert.Equal(t, test.ExpectedError, err)
48 | })
49 | }
50 | }
51 |
52 | func Test_FilterParse(t *testing.T) {
53 | makeErr := func(query string, no int, part string) error {
54 | // just copied from the internal function
55 | return fmt.Errorf("cannot parse filter (%d:%s) from query (%s)", no, part, query)
56 | }
57 | emptyErr := makeErr("", 0, "")
58 | isoNow := time.Now().UTC() //.Truncate(time.Second) // RFC3339 has no subsecond precision
59 |
60 | var testCases = []struct {
61 | Name string // name
62 | Query string // input
63 | ExpectedError error // expected error
64 | ExpectedResult bson.D // expected return
65 | }{
66 | {"Empty filter", "", emptyErr, bson.D{}},
67 | {"Filter on exact modifiedBy", "modifiedBy[==]=flow@equinor.com", nil,
68 | bson.D{{Key: "modifiedBy", Value: bson.D{{Key: "$eq", Value: "flow@equinor.com"}}}}},
69 | {"Filter on regexp modifiedBy", `modifiedBy[search]=\w@equinor.com`, nil,
70 | bson.D{bson.E{Key: "modifiedBy", Value: bson.D{{Key: "$regex", Value: `\w@equinor.com`}, {Key: "$options", Value: "i"}}}}},
71 | {"Timestamp", fmt.Sprintf("timestamp[<=]=%s", isoNow.Format(time.RFC3339Nano)), nil, bson.D{bson.E{Key: "timestamp", Value: bson.D{{Key: "$lte", Value: isoNow}}}}},
72 | {"Combined", fmt.Sprintf("timestamp[<=]=%s,modifiedBy[==]=flow@equinor.com", isoNow.Format(time.RFC3339Nano)), nil,
73 | bson.D{
74 | {Key: "timestamp", Value: bson.D{{Key: "$lte", Value: isoNow}}},
75 | {Key: "modifiedBy", Value: bson.D{{Key: "$eq", Value: "flow@equinor.com"}}},
76 | }},
77 | }
78 |
79 | for _, test := range testCases {
80 | t.Run(test.Name, func(t *testing.T) {
81 | logrus.Info("Test: ", test.Name, ": ", test.Query)
82 |
83 | res, err := parse_filter_query(test.Query)
84 |
85 | assert.Equal(t, test.ExpectedResult, res)
86 | assert.Equal(t, test.ExpectedError, err)
87 | })
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/apiserver/config.go:
--------------------------------------------------------------------------------
1 | package apiserver
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 | "reflect"
8 | "strconv"
9 | "strings"
10 |
11 | "github.com/equinor/flowify-workflows-server/auth"
12 | "github.com/equinor/flowify-workflows-server/storage"
13 | "github.com/mitchellh/mapstructure"
14 | "github.com/pkg/errors"
15 | log "github.com/sirupsen/logrus"
16 | "github.com/spf13/viper"
17 | "gopkg.in/yaml.v3"
18 | )
19 |
20 | type KubernetesKonfig struct {
21 | KubeConfigPath string `mapstructure:"kubeconfigpath"`
22 | Namespace string `mapstructure:"namespace"`
23 | }
24 |
25 | type LogConfig struct {
26 | LogLevel string `mapstructure:"loglevel"`
27 | }
28 |
29 | type ServerConfig struct {
30 | Port int `mapstructure:"port"`
31 | }
32 |
33 | type Config struct {
34 | DbConfig storage.DbConfig `mapstructure:"db"`
35 | KubernetesKonfig KubernetesKonfig `mapstructure:"kubernetes"`
36 | AuthConfig auth.AuthConfig `mapstructure:"auth"`
37 |
38 | LogConfig LogConfig `mapstructure:"logging"`
39 | ServerConfig ServerConfig `mapstructure:"server"`
40 | }
41 |
42 | func (cfg Config) String() string {
43 | bytes, err := yaml.Marshal(cfg)
44 | if err != nil {
45 | log.Error("Could not stringify config", err)
46 | return ""
47 | }
48 | return string(bytes)
49 | }
50 |
51 | func (cfg Config) Dump(path string) error {
52 | str := cfg.String()
53 | switch path {
54 | case "-":
55 | // stdout
56 | fmt.Println(str)
57 | default:
58 | err := os.WriteFile(path, []byte(str), 0666)
59 | if err != nil {
60 | log.Error("Could write config to file ", path)
61 | return err
62 | }
63 | }
64 | return nil
65 | }
66 |
67 | func viperConfig() {
68 | viper.SetConfigType("yaml")
69 | viper.AutomaticEnv() // let env override config if available
70 |
71 | // to allow environment parse nested config
72 | viper.SetEnvKeyReplacer(strings.NewReplacer(`.`, `_`))
73 |
74 | // prefix all envs for uniqueness
75 | viper.SetEnvPrefix("FLOWIFY")
76 | }
77 |
78 | func viperDecodeHook() viper.DecoderConfigOption {
79 | return viper.DecodeHook(
80 | mapstructure.ComposeDecodeHookFunc(
81 | // Try to silent convert string to int
82 | // Port env var can be set as the string, not as required int
83 | func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
84 | if f.Kind() != reflect.String {
85 | return data, nil
86 | }
87 | if t.Kind() != reflect.Interface {
88 | return data, nil
89 | }
90 | v, err := strconv.Atoi(data.(string))
91 | //fmt.Printf("Converting (%v, %v) %v => %d. (%v)\n", f, t, data, v, err)
92 | if err != nil {
93 | return data, nil
94 | }
95 | return v, nil
96 | },
97 | ),
98 | )
99 | }
100 |
101 | func LoadConfigFromReader(stream io.Reader) (Config, error) {
102 | viperConfig()
103 | config := Config{}
104 | if err := viper.ReadConfig(stream); err != nil {
105 | return Config{}, errors.Wrap(err, "Cannot load config from reader")
106 | }
107 |
108 | err := viper.Unmarshal(&config, viperDecodeHook())
109 | if err != nil {
110 | return Config{}, errors.Wrap(err, "Cannot load config from reader")
111 | }
112 |
113 | return config, nil
114 |
115 | }
116 |
117 | func LoadConfigFromPath(path string) (Config, error) {
118 | viper.AddConfigPath(path)
119 | viperConfig()
120 |
121 | err := viper.ReadInConfig()
122 | if err != nil {
123 | return Config{}, errors.Wrap(err, "Cannot not read config from path")
124 | }
125 |
126 | config := Config{}
127 | err = viper.Unmarshal(&config, viperDecodeHook())
128 | if err != nil {
129 | return Config{}, errors.Wrap(err, "Cannot not read config from path")
130 | }
131 | return config, nil
132 | }
133 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "os"
7 | "syscall"
8 | "time"
9 |
10 | log "github.com/sirupsen/logrus"
11 |
12 | "github.com/equinor/flowify-workflows-server/apiserver"
13 | )
14 |
15 | const (
16 | maxWait = time.Second * 10
17 | )
18 |
19 | var status = 0
20 |
21 | func logFatalHandler() {
22 | status = 1
23 | // send SIGTERM to itself to exit gracefully
24 | syscall.Kill(os.Getpid(), syscall.SIGTERM)
25 | select {}
26 | }
27 |
28 | func isFlagPassed(name string) bool {
29 | found := false
30 | flag.Visit(func(f *flag.Flag) {
31 | if f.Name == name {
32 | found = true
33 | }
34 | })
35 | return found
36 | }
37 |
38 | func first[T any, S any](t T, s S) T { return t }
39 |
40 | func main() {
41 | log.Infof("Starting process with pid %d", os.Getpid())
42 | log.RegisterExitHandler(logFatalHandler)
43 |
44 | // read config, possible overloaded by ENV VARS
45 | cfg, err := apiserver.LoadConfigFromPath(".")
46 | if err != nil {
47 | log.Error("could not load config, ", err)
48 | return
49 | }
50 |
51 | // Set some common flags
52 | logLevel := flag.String("loglevel", "info", "Set the printout level for the logger (trace, debug, info, warn, error, fatal, panic)")
53 | portNumber := flag.Int("port", 8842, "Set the TCP port nubmer accepting connections")
54 | dbName := flag.String("db", "Flowify", "Set the name of the database to use")
55 | k8sConfigNamespace := flag.String("namespace", "test", "K8s configuration namespace to use")
56 | authHandlerSelector := flag.String("auth", "azure-oauth2-openid-token", "Set the security handler for the backend")
57 | kubeconfig := flag.String("kubeconfig", "~/kube/config", "path to kubeconfig file")
58 | dumpConfig := flag.String("dumpconfig", "", "Dump the config in yaml format to filename or stdout '-'")
59 | flag.Parse()
60 |
61 | // Connect flags to override config (flags > env > configfile )
62 | // viper nested keys dont work well with flags so do it explicitly: https://github.com/spf13/viper/issues/368
63 | if isFlagPassed("loglevel") {
64 | cfg.LogConfig.LogLevel = *logLevel
65 | }
66 | if isFlagPassed("port") {
67 | cfg.ServerConfig.Port = *portNumber
68 | }
69 | if isFlagPassed("db") {
70 | cfg.DbConfig.DbName = *dbName
71 | }
72 | if isFlagPassed("kubeconfig") {
73 | cfg.KubernetesKonfig.KubeConfigPath = *kubeconfig
74 | }
75 | if isFlagPassed("namespace") {
76 | cfg.KubernetesKonfig.Namespace = *k8sConfigNamespace
77 | }
78 | if isFlagPassed("auth") {
79 | cfg.AuthConfig.Handler = *authHandlerSelector
80 | }
81 |
82 | // handle config output
83 | if isFlagPassed("dumpconfig") {
84 | cfg.Dump(*dumpConfig)
85 | }
86 |
87 | // LogConfig is handled directly
88 | level, err := log.ParseLevel(cfg.LogConfig.LogLevel)
89 | if err != nil {
90 | log.Errorf("could not parse log level: %s", cfg.LogConfig)
91 | }
92 | log.SetLevel(level)
93 | log.WithFields(log.Fields{"Loglevel": log.StandardLogger().Level}).Infof("Setting global loglevel")
94 |
95 | ctx, cancel := context.WithCancel(context.Background())
96 | defer cancel()
97 |
98 | server, err := apiserver.NewFlowifyServerFromConfig(cfg)
99 | if err != nil {
100 | log.Error("Cannot create a Flowify server object", err)
101 | os.Exit(1)
102 | }
103 |
104 | // run is a blocking call, but may return early on error
105 | err = server.Run(ctx, nil)
106 | if err != nil {
107 | log.Error(err)
108 | os.Exit(1)
109 | }
110 |
111 | // Create a deadline to wait for.
112 | ctx, cancel = context.WithTimeout(context.Background(), maxWait)
113 | defer cancel()
114 |
115 | log.Info("Received SIGNAL: waiting for active requests to finish...")
116 | server.HttpServer.Shutdown(ctx)
117 |
118 | os.Exit(status)
119 | }
120 |
--------------------------------------------------------------------------------
/pkg/workspace/workspace_test.go:
--------------------------------------------------------------------------------
1 | package workspace_test
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "testing"
7 |
8 | "github.com/equinor/flowify-workflows-server/pkg/workspace"
9 | "github.com/stretchr/testify/require"
10 | core "k8s.io/api/core/v1"
11 | "k8s.io/client-go/kubernetes/fake"
12 | )
13 |
14 | const (
15 | namespace = "dummy-namespace"
16 | )
17 |
18 | const ConfigMap1 = `{
19 | "apiVersion": "v1",
20 | "kind": "ConfigMap",
21 | "metadata": {
22 | "labels": {
23 | "app.kubernetes.io/component": "workspace-config",
24 | "app.kubernetes.io/part-of": "flowify"
25 | },
26 | "name": "workspace-abc",
27 | "namespace": "dummy-namespace"
28 | },
29 | "data": {
30 | "roles": "[[\"token1\", \"token2\", \"token3\"], [\"token4\"]]",
31 | "projectName": "workspace-abc",
32 | "hideForUnauthorized": "true"
33 | }
34 | }
35 | `
36 |
37 | const ConfigMap2 = `{
38 | "apiVersion": "v1",
39 | "kind": "ConfigMap",
40 | "metadata": {
41 | "labels": {
42 | "app.kubernetes.io/component": "workspace-config",
43 | "app.kubernetes.io/part-of": "flowify"
44 | },
45 | "name": "workspace-xyz",
46 | "namespace": "dummy-namespace"
47 | },
48 | "data": {
49 | "roles": "[\"token1\", \"token4\"]",
50 | "projectName": "workspace-xyz",
51 | "hideForUnauthorized": "false"
52 | }
53 | }
54 | `
55 |
56 | const ConfigMap3 = `{
57 | "apiVersion": "v1",
58 | "kind": "ConfigMap",
59 | "metadata": {
60 | "labels": {
61 | "app.kubernetes.io/component": "workspace-config",
62 | "app.kubernetes.io/part-of": "flowify"
63 | },
64 | "name": "test-workspace",
65 | "namespace": "flowify"
66 | },
67 | "data": {
68 | "roles": "[\"role1\", \"role3\"]",
69 | "projectName": "test-workspace",
70 | "hideForUnauthorized": "true",
71 | "serviceAccountName": "default"
72 | }
73 | }
74 | `
75 |
76 | const WorkspaceDescriptions = `{
77 | "apiVersion": "v1",
78 | "kind": "ConfigMap",
79 | "metadata": {
80 | "labels": {
81 | "app.kubernetes.io/part-of": "flowify"
82 | },
83 | "name": "role-descriptions",
84 | "namespace": "dummy-namespace"
85 | },
86 | "data": {
87 | "token1": "Need superpowers",
88 | "token2": "This is handed out freely",
89 | "token3": "Complain to your boss",
90 | "token4": "Only given to the bravest",
91 | "token5": "Nobody knows how to get this"
92 | }
93 | }
94 | `
95 |
96 | var (
97 | ctx = context.TODO()
98 | )
99 |
100 | func init() {
101 |
102 | }
103 |
104 | func getClient() workspace.WorkspaceClient {
105 | var cm1, cm2, descriptions core.ConfigMap
106 |
107 | json.Unmarshal([]byte(ConfigMap1), &cm1)
108 | json.Unmarshal([]byte(ConfigMap2), &cm2)
109 | json.Unmarshal([]byte(WorkspaceDescriptions), &descriptions)
110 |
111 | clientSet := fake.NewSimpleClientset(&cm1, &cm2, &descriptions)
112 |
113 | return workspace.NewWorkspaceClient(clientSet, namespace)
114 | }
115 |
116 | func Test_WorkspaceClientListWorkspaces(t *testing.T) {
117 | client := getClient()
118 | ws := client.ListWorkspaces()
119 |
120 | // Should return both workspaces
121 | require.Len(t, ws, 2)
122 | for _, w := range ws {
123 | require.Contains(t, []string{"workspace-xyz", "workspace-abc"}, w.Name)
124 | }
125 | }
126 |
127 | func Test_WorkspaceNoRoleConfigMap(t *testing.T) {
128 | var cm1, cm2 core.ConfigMap
129 |
130 | json.Unmarshal([]byte(ConfigMap1), &cm1)
131 | json.Unmarshal([]byte(ConfigMap2), &cm2)
132 |
133 | client := workspace.NewWorkspaceClient(fake.NewSimpleClientset(&cm1, &cm2), namespace)
134 |
135 | ws := client.ListWorkspaces()
136 |
137 | // Should return both workspaces
138 | require.Len(t, ws, 2)
139 | for _, w := range ws {
140 | require.Contains(t, []string{"workspace-xyz", "workspace-abc"}, w.Name)
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/cmd/dereference/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "context"
7 | "encoding/json"
8 | "flag"
9 | "fmt"
10 | "io"
11 | "os"
12 | "strings"
13 |
14 | "github.com/equinor/flowify-workflows-server/models"
15 | "github.com/equinor/flowify-workflows-server/storage"
16 | "github.com/google/uuid"
17 | log "github.com/sirupsen/logrus"
18 | "github.com/spf13/viper"
19 | )
20 |
21 | func myUsage() {
22 | fmt.Printf("Usage: %s [OPTIONS] cmpRef\n", os.Args[0])
23 | flag.PrintDefaults()
24 | }
25 |
26 | // read a reference or an inline component
27 | func parseInput(doc []byte) (interface{}, error) {
28 | {
29 | // try a plain reference
30 | cref, err := uuid.ParseBytes(bytes.TrimSpace(doc))
31 | if err == nil {
32 | return models.ComponentReference(cref), nil
33 | }
34 | log.Info("Not a plain uuid")
35 | }
36 |
37 | {
38 | // try component
39 | var cmp models.Component
40 | err := json.Unmarshal(doc, &cmp)
41 | if err == nil {
42 | return cmp, nil
43 | }
44 | log.Info("Not a component")
45 | }
46 |
47 | return models.ComponentReference{}, fmt.Errorf("could not parse '%s'", doc)
48 | }
49 |
50 | func LoadDbConfig(path string) (config storage.DbConfig, err error) {
51 | viper.AddConfigPath(path)
52 | viper.SetConfigName("config")
53 | viper.SetConfigType("yaml")
54 | viper.AutomaticEnv() // let env override config if available
55 |
56 | // to allow environment parse nested config
57 | viper.SetEnvKeyReplacer(strings.NewReplacer(`.`, `_`))
58 |
59 | // prefix all envs for uniqueness
60 | viper.SetEnvPrefix("FLOWIFY")
61 |
62 | err = viper.ReadInConfig()
63 | if err != nil {
64 | return
65 | }
66 |
67 | err = viper.Unmarshal(&config)
68 | return
69 | }
70 |
71 | func isFlagPassed(name string) bool {
72 | found := false
73 | flag.Visit(func(f *flag.Flag) {
74 | if f.Name == name {
75 | found = true
76 | }
77 | })
78 | return found
79 | }
80 |
81 | func main() {
82 | log.SetLevel(log.InfoLevel)
83 |
84 | // read config, possible overloaded by ENV VARS
85 | cfg, err := LoadDbConfig(".")
86 |
87 | fileName := flag.String("file", "", "Read from file instead of cmd line arg, '-' for stdin")
88 | dbName := flag.String("db", "Flowify", "Set the name of the database to use")
89 | if isFlagPassed("db") {
90 | cfg.DbName = *dbName
91 | }
92 |
93 | flag.Parse()
94 | flag.Usage = myUsage
95 |
96 | // 1. read from arg (typically uid)
97 | // 2. read from file (if selected), - means stdin
98 | if (flag.NArg() == 1) == (*fileName != "") {
99 | flag.Usage()
100 | return
101 | }
102 |
103 | var bytes []byte
104 |
105 | if flag.NArg() == 1 {
106 | // 1. read from arg
107 | bytes = []byte(flag.Arg(0))
108 | } else if *fileName != "" {
109 | // 2. read from file
110 |
111 | var err error // nil error
112 | if *fileName == "-" {
113 | bytes, err = io.ReadAll(bufio.NewReader(os.Stdin))
114 | } else {
115 | bytes, err = os.ReadFile(*fileName)
116 | }
117 | if err != nil {
118 | panic(err)
119 | }
120 | } else {
121 | panic("unexpected")
122 | }
123 |
124 | any, err := parseInput(bytes)
125 | if err != nil {
126 | panic(err)
127 | }
128 |
129 | var component models.Component
130 | cstorage, err := storage.NewMongoStorageClientFromConfig(cfg, nil)
131 | if err != nil {
132 | panic(err)
133 | }
134 |
135 | switch concrete := any.(type) {
136 | case models.ComponentReference:
137 | // retrieve
138 | c, err := cstorage.GetComponent(context.TODO(), concrete)
139 | if err != nil {
140 | fmt.Println("oops!")
141 | panic(err)
142 | }
143 | component = c
144 | case models.Component:
145 | component = concrete
146 | default:
147 | panic("unexpected")
148 | }
149 |
150 | cmpResolved, err := storage.DereferenceComponent(context.TODO(), cstorage, component)
151 | if err != nil {
152 | panic(err)
153 | }
154 |
155 | outBytes, _ := json.MarshalIndent(cmpResolved, "", " ")
156 | fmt.Print(string(outBytes), "\n")
157 | }
158 |
--------------------------------------------------------------------------------
/models/examples/job-map-example.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "Job example",
3 | "type": "job",
4 | "inputValues": [
5 | {
6 | "value": "6",
7 | "target": "numParts"
8 | },
9 | {
10 | "value": "SECRET_PASS",
11 | "target": "secretL1"
12 | },
13 | {
14 | "value": ["A", "B"],
15 | "target": "branch"
16 | }
17 | ],
18 | "workflow":
19 | {
20 | "name": "wf-example",
21 | "description": "Test workflow with an map example",
22 | "type": "workflow",
23 | "workspace": "argo",
24 | "component":
25 | {
26 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0",
27 | "description": "Map component",
28 | "inputs": [
29 | { "name": "numParts", "type": "parameter" },
30 | { "name": "secretL1", "type": "env_secret" },
31 | { "name": "branch", "type": "parameter_array" }
32 | ],
33 | "outputs": [
34 | { "name": "outputParamArray", "type": "parameter_array" }
35 | ],
36 | "type": "component",
37 | "implementation": {
38 | "type": "map",
39 | "inputMappings": [
40 | {
41 | "source": { "port": "numParts" },
42 | "target": { "port": "inputParam" }
43 | },
44 | {
45 | "source": { "port": "branch" },
46 | "target": { "port": "val" }
47 | },
48 | {
49 | "source": { "port": "secretL1" },
50 | "target": { "port": "inputScrt" }
51 | }
52 | ],
53 | "outputMappings": [
54 | {
55 | "source": { "port": "output" },
56 | "target": { "port": "outputParamArray" }
57 | }
58 | ],
59 | "node": {
60 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b2",
61 | "description": "MapNode1",
62 | "inputs": [
63 | { "name": "inputParam", "type": "parameter" },
64 | { "name": "val", "type": "parameter" },
65 | { "name": "inputScrt", "type": "env_secret" }
66 | ],
67 | "outputs": [
68 | { "name": "output", "type": "parameter_array" }
69 | ],
70 | "type": "component",
71 | "implementation": {
72 | "type": "brick",
73 | "container": {
74 | "name": "containername_n1_b1",
75 | "image": "alpine:latest",
76 | "command": ["sh", "-c", "echo $inputScrt; ARR=\"[\"; for i in $(seq $0); do ARR=$ARR\\\"$1$i\\\"\", \" ; done; ARR=${ARR%??}\"]\"; echo $ARR | tee /tmp/prm"],
77 | "args": []
78 | },
79 | "args": [
80 | {
81 | "source": { "port": "inputParam" },
82 | "target": { "type": "parameter" }
83 | },
84 | {
85 | "source": { "port": "val" },
86 | "target": { "type": "parameter" }
87 | }
88 | ],
89 | "results": [
90 | {
91 | "source": { "file": "/tmp/prm" },
92 | "target": { "port": "output" }
93 | }
94 | ]
95 | }
96 | }
97 | }
98 | }
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/auth/auth.go:
--------------------------------------------------------------------------------
1 | package auth
2 |
3 | import (
4 | "context"
5 | "net/http"
6 |
7 | "github.com/equinor/flowify-workflows-server/pkg/workspace"
8 | "github.com/equinor/flowify-workflows-server/user"
9 | "github.com/pkg/errors"
10 | )
11 |
12 | // an authclient either gives an error or an authenticated user
13 | type AuthenticationClient interface {
14 | Authenticate(r *http.Request) (user.User, error)
15 | }
16 |
17 | // the mock authenticator can be used for testing
18 | type MockAuthenticator struct {
19 | User user.MockUser
20 | }
21 |
22 | func (m MockAuthenticator) Authenticate(r *http.Request) (user.User, error) {
23 | return m.User, nil
24 | }
25 |
26 | type ContextKey = int
27 |
28 | const (
29 | AuthorizationKey ContextKey = iota
30 | )
31 |
32 | type Authorization struct {
33 | Action string
34 | Authorized bool
35 | }
36 |
37 | func GetAuthorization(ctx context.Context) *Authorization {
38 | val := ctx.Value(AuthorizationKey)
39 |
40 | if val == nil {
41 | return nil
42 | } else {
43 | return val.(*Authorization)
44 | }
45 | }
46 |
47 | type AuthorizationClient interface {
48 | Authorize(subject Subject, action Action, user user.User, object any) (bool, error)
49 | // AuthorizePath(user user.User, )
50 | }
51 |
52 | type RoleAuthorizer struct {
53 | // map subject -> action -> required permssion
54 | Workspaces workspace.WorkspaceClient
55 | }
56 |
57 | type Action string
58 |
59 | const (
60 | Read Action = "read"
61 | Write Action = "write"
62 | Delete Action = "delete"
63 | List Action = "list"
64 | )
65 |
66 | type Subject string
67 |
68 | const (
69 | Secrets Subject = "secrets"
70 | Volumes Subject = "volumes"
71 | )
72 |
73 | type AccessLevel struct {
74 | User bool
75 | Admin bool
76 | }
77 |
78 | func (ra RoleAuthorizer) GetWorkspacePermissions(wsp string, usr user.User) (AccessLevel, error) {
79 | wss := ra.Workspaces.ListWorkspaces()
80 |
81 | for _, ws := range wss {
82 | var al AccessLevel
83 | if ws.Name == wsp {
84 | al.User = ws.UserHasAccess(usr)
85 | al.Admin = ws.UserHasAdminAccess(usr)
86 | return al, nil
87 | }
88 | }
89 |
90 | return AccessLevel{}, nil
91 | }
92 |
93 | func (ra RoleAuthorizer) GetSecretPermissions(usr user.User, data any) (map[Action]bool, error) {
94 | p := make(map[Action]bool)
95 |
96 | workspace, ok := data.(string)
97 | if !ok {
98 | return map[Action]bool{}, errors.Errorf("could not decode the workspace variable")
99 | }
100 |
101 | al, err := ra.GetWorkspacePermissions(workspace, usr)
102 | if err != nil {
103 | return map[Action]bool{}, errors.Wrap(err, "could not get secret permissions")
104 | }
105 |
106 | // this is where access levels map to actions.
107 | p[Read] = al.User || al.Admin
108 | p[List] = al.User || al.Admin
109 | p[Write] = al.Admin
110 | p[Delete] = al.Admin
111 |
112 | return p, nil
113 | }
114 |
115 | func (ra RoleAuthorizer) GetVolumePermissions(usr user.User, data any) (map[Action]bool, error) {
116 | p := make(map[Action]bool)
117 |
118 | workspace, ok := data.(string)
119 | if !ok {
120 | return map[Action]bool{}, errors.Errorf("could not decode the workspace variable")
121 | }
122 |
123 | al, err := ra.GetWorkspacePermissions(workspace, usr)
124 | if err != nil {
125 | return map[Action]bool{}, errors.Wrap(err, "could not get secret permissions")
126 | }
127 |
128 | // this is where access levels map to actions.
129 | p[Read] = al.User || al.Admin
130 | p[List] = al.Admin || al.User
131 | p[Write] = al.Admin
132 | p[Delete] = al.Admin
133 |
134 | return p, nil
135 | }
136 |
137 | func (ra RoleAuthorizer) GetPermissions(subject Subject, action Action, usr user.User, data any) (bool, error) {
138 | switch subject {
139 | case Secrets:
140 | perms, err := ra.GetSecretPermissions(usr, data)
141 | if err != nil {
142 | return false, err
143 | }
144 | if p, ok := perms[action]; ok {
145 | return p, nil
146 | }
147 | return false, errors.Errorf("Rule %s:%s not found", subject, action)
148 | case Volumes:
149 | perms, err := ra.GetVolumePermissions(usr, data)
150 | if err != nil {
151 | return false, err
152 | }
153 | if p, ok := perms[action]; ok {
154 | return p, nil
155 | }
156 | return false, errors.Errorf("Rule %s:%s not found", subject, action)
157 | default:
158 | return false, errors.Errorf("no such subject '%s'", subject)
159 | }
160 | }
161 |
162 | func (ra RoleAuthorizer) Authorize(subject Subject, action Action, user user.User, object any) (bool, error) {
163 | p, err := ra.GetPermissions(subject, action, user, object)
164 | if err != nil {
165 | return false, errors.Wrapf(err, "could not authorize request for %s:%s", subject, action)
166 | }
167 |
168 | return p, nil
169 | }
170 |
--------------------------------------------------------------------------------
/rest/secrets.go:
--------------------------------------------------------------------------------
1 | package rest
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "net/http"
8 |
9 | "github.com/equinor/flowify-workflows-server/auth"
10 | "github.com/equinor/flowify-workflows-server/pkg/secret"
11 | "github.com/gorilla/mux"
12 | k8serrors "k8s.io/apimachinery/pkg/api/errors"
13 | )
14 |
15 | type SecretField struct {
16 | Key string `json:"key"`
17 | Value string `json:"value"`
18 | }
19 |
20 | func AuthorizationDenied(w http.ResponseWriter, r *http.Request, err error) {
21 | WriteErrorResponse(w, APIError{http.StatusUnauthorized, "Authorization Denied", err.Error()}, "authz middleware")
22 | }
23 |
24 | func SecretsPathAuthorization(action auth.Action, authz auth.AuthorizationClient, next http.HandlerFunc) http.HandlerFunc {
25 | return PathAuthorization(auth.Secrets, action, "workspace", authz, next)
26 | }
27 |
28 | func RegisterSecretRoutes(r *mux.Route, sclient secret.SecretClient, authz auth.AuthorizationClient) {
29 |
30 | s := r.Subrouter()
31 |
32 | const intype = "application/json"
33 | const outtype = "application/json"
34 |
35 | s.Use(CheckContentHeaderMiddleware(intype))
36 | s.Use(CheckAcceptRequestHeaderMiddleware(outtype))
37 | s.Use(SetContentTypeMiddleware(outtype))
38 |
39 | /*
40 | Authorization for secrets is done on path-variable level {workspace}
41 | */
42 | s.HandleFunc("/secrets/{workspace}/", SecretsPathAuthorization(auth.List, authz, SecretListHandler(sclient))).Methods(http.MethodGet)
43 | s.HandleFunc("/secrets/{workspace}/{key}", SecretsPathAuthorization(auth.Write, authz, SecretPutHandler(sclient))).Methods(http.MethodPut)
44 | s.HandleFunc("/secrets/{workspace}/{key}", SecretsPathAuthorization(auth.Delete, authz, SecretDeleteHandler(sclient))).Methods(http.MethodDelete)
45 | // no get handler, secrets not readable
46 | // s.HandleFunc("/secrets/{workspace}/{key}", SecretGetHandler(secretClient)).Methods(http.MethodGet)
47 | }
48 |
49 | func SecretListHandler(client secret.SecretClient) http.HandlerFunc {
50 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
51 | workspace := mux.Vars(r)["workspace"]
52 | keys, err := client.ListAvailableKeys(r.Context(), workspace)
53 | if err != nil {
54 | WriteErrorResponse(w, APIError{http.StatusInternalServerError, "error listing secrets", err.Error()}, "listSecrets")
55 | return
56 | }
57 |
58 | WriteResponse(w, http.StatusOK, nil, struct {
59 | Items []string `json:"items"`
60 | }{Items: keys}, "secrets")
61 | })
62 | }
63 |
64 | func SecretDeleteHandler(client secret.SecretClient) http.HandlerFunc {
65 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
66 | workspace := mux.Vars(r)["workspace"]
67 | keyName := mux.Vars(r)["key"]
68 | err := client.DeleteSecretKey(r.Context(), workspace, keyName)
69 |
70 | if err != nil {
71 | if k8serrors.IsNotFound(err) {
72 | WriteErrorResponse(w, APIError{http.StatusNotFound, "could not delete secret", err.Error()}, "deleteSecret")
73 | return
74 | } else {
75 | WriteErrorResponse(w, APIError{http.StatusInternalServerError, "could not delete secret", err.Error()}, "deleteSecret")
76 | return
77 | }
78 | }
79 |
80 | WriteResponse(w, http.StatusNoContent, nil, nil, "deleteSecret")
81 | })
82 | }
83 |
84 | func SecretPutHandler(client secret.SecretClient) http.HandlerFunc {
85 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
86 | workspace := mux.Vars(r)["workspace"]
87 | key := mux.Vars(r)["key"]
88 |
89 | // read secrets to add from request
90 | buf := new(bytes.Buffer)
91 | buf.ReadFrom(r.Body)
92 |
93 | var secret SecretField
94 | err := json.Unmarshal(buf.Bytes(), &secret)
95 |
96 | if err != nil {
97 | WriteErrorResponse(w, APIError{http.StatusBadRequest, "could not unmarshal secret", err.Error()}, "putSecret")
98 | return
99 | }
100 |
101 | if secret.Key != key {
102 | WriteErrorResponse(w, APIError{http.StatusBadRequest, "secret key URL mismatch", fmt.Sprintf("%s vs %s", key, secret.Key)}, "putSecret")
103 | return
104 | }
105 |
106 | // list available keys
107 | keys, err := client.ListAvailableKeys(r.Context(), workspace)
108 |
109 | if err != nil {
110 | WriteErrorResponse(w, APIError{http.StatusInternalServerError, "could not list secrets", err.Error()}, "putSecret")
111 | return
112 | }
113 |
114 | // compare to discern create/update
115 | create := true
116 | for _, k := range keys {
117 | if secret.Key == k {
118 | create = false
119 | break
120 | }
121 | }
122 |
123 | err = client.AddSecretKey(r.Context(), workspace, secret.Key, secret.Value)
124 |
125 | if err != nil {
126 | WriteErrorResponse(w, APIError{http.StatusInternalServerError, "could not put secret", err.Error()}, "putSecret")
127 | return
128 | }
129 |
130 | if create {
131 | // create a new secret
132 | w.Header().Add("Location", r.URL.RequestURI())
133 | WriteResponse(w, http.StatusCreated, map[string]string{"Location": r.URL.RequestURI()}, buf.Bytes(), "putSecret")
134 | } else {
135 | // update
136 | WriteResponse(w, http.StatusNoContent, nil, nil, "putSecret")
137 |
138 | }
139 | })
140 | }
141 |
--------------------------------------------------------------------------------
/storage/parsequery.go:
--------------------------------------------------------------------------------
1 | // utils for creating filters and sorting parameters from query strings
2 | package storage
3 |
4 | import (
5 | "fmt"
6 | "regexp"
7 | "strings"
8 | "time"
9 |
10 | "github.com/pkg/errors"
11 | "go.mongodb.org/mongo-driver/bson"
12 | )
13 |
14 | type Order int
15 |
16 | const (
17 | ASC Order = +1
18 | DESC Order = -1
19 | notSet Order = 0
20 | fieldDotField = `(\w+(?:\.\w+)*)`
21 | )
22 |
23 | func parse_sort_query(sortstr string) (bson.D, error) {
24 | // ?sort=+modifiedBy,-timestamp
25 | // ?sort=+modifiedBy&sort=-timestamp
26 |
27 | var validQuery = regexp.MustCompile(`^(\+|-)` + fieldDotField + `$`)
28 |
29 | // bson.D is required to keep the order, its a list of bson.E
30 | var result bson.D
31 | parts := strings.Split(sortstr, ",")
32 | for i, p := range parts {
33 | matches := validQuery.FindStringSubmatch(p)
34 | // the full match and the two subgroups should be returned in a valid query
35 | if len(matches) != 3 {
36 | return bson.D{}, fmt.Errorf("cannot parse sort (%d:%s) from query (%s)", i, p, sortstr)
37 | }
38 | order := notSet
39 | switch matches[1] {
40 | case "+":
41 | order = ASC
42 | case "-":
43 | order = DESC
44 | }
45 |
46 | if order == notSet {
47 | return bson.D{}, fmt.Errorf("can never parse sort (%d,%s) from query (%s)", i, p, sortstr)
48 | }
49 |
50 | result = append(result, bson.E{Key: matches[2], Value: int(order)})
51 | }
52 |
53 | return result, nil
54 | }
55 |
56 | func sort_queries(sortstrings []string) (bson.D, error) {
57 | sorts := make([]bson.E, 0, len(sortstrings))
58 |
59 | for _, f := range sortstrings {
60 | mf, err := parse_sort_query(f)
61 | if err != nil {
62 | return nil, errors.Wrap(err, "could not parse filter query")
63 | }
64 | sorts = append(sorts, mf...)
65 | }
66 | return sorts, nil
67 | }
68 |
69 | func mongo_operator(op string, value interface{}) (bson.D, error) {
70 | switch op {
71 | case "==":
72 | // exact match
73 | return bson.D{bson.E{Key: "$eq", Value: value}}, nil
74 | case "!=":
75 | return bson.D{bson.E{Key: "$neq", Value: value}}, nil
76 | case ">=":
77 | return bson.D{bson.E{Key: "$gte", Value: value}}, nil
78 | case "<=":
79 | return bson.D{bson.E{Key: "$lte", Value: value}}, nil
80 | case ">":
81 | return bson.D{bson.E{Key: "$gt", Value: value}}, nil
82 | case "<":
83 | return bson.D{bson.E{Key: "$lt", Value: value}}, nil
84 | case "search":
85 | // make regexp case insensitive
86 | return bson.D{bson.E{Key: "$regex", Value: value}, bson.E{Key: "$options", Value: "i"}}, nil
87 | default:
88 | return bson.D{}, fmt.Errorf("no such filter operator (%s)", op)
89 | }
90 | }
91 |
92 | func mongo_filter(attr string, ops string, value interface{}) (bson.E, error) {
93 | op, err := mongo_operator(ops, value)
94 | if err != nil {
95 | return bson.E{}, errors.Wrap(err, "could not construct filter")
96 | }
97 | return bson.E{Key: attr, Value: op}, nil
98 | }
99 |
100 | func filter_queries(filterstrings []string) ([]bson.D, error) {
101 | filters := make([]bson.D, 0, len(filterstrings))
102 |
103 | for _, f := range filterstrings {
104 | mf, err := parse_filter_query(f)
105 | if err != nil {
106 | return nil, errors.Wrap(err, "could not parse filter query")
107 | }
108 | filters = append(filters, mf)
109 | }
110 | return filters, nil
111 | }
112 |
113 | func parse_filter_query(filter string) (bson.D, error) {
114 | // LHS brackets from https://www.moesif.com/blog/technical/api-design/REST-API-Design-Filtering-Sorting-and-Pagination/
115 | // ?filter=modifiedBy[==]=flow@equinor.com
116 |
117 | var validFilter = regexp.MustCompile(`^` + fieldDotField + `\[(==|>=|<=|search|>|<|!=)\]=(.*)$`)
118 |
119 | parts := strings.Split(filter, ",")
120 |
121 | // bson.D is required to keep the order (its a list of bson.E)
122 | result := make([]bson.E, 0, len(parts))
123 | for i, p := range parts {
124 | matches := validFilter.FindStringSubmatch(p)
125 | // the full match and the three subgroups should be returned for a valid query
126 | if len(matches) != 4 {
127 | return bson.D{}, fmt.Errorf("cannot parse filter (%d:%s) from query (%s)", i, p, filter)
128 | }
129 |
130 | opName := matches[2]
131 | attributeName := matches[1]
132 | var value interface{}
133 | var err error
134 | switch attributeName {
135 | case "timestamp":
136 | value, err = time.Parse(time.RFC3339, matches[3])
137 | if err != nil {
138 | return bson.D{}, errors.Wrapf(err, "cannot parse timestamp (%s) in (%d:%s) from query (%s)", matches[3], i, p, filter)
139 | }
140 |
141 | default:
142 | value = matches[3]
143 | }
144 |
145 | filter, err := mongo_filter(attributeName, opName, value)
146 | if err != nil {
147 | return bson.D{}, errors.Wrapf(err, "cannot parse filter (%d:%s) from query (%s)", i, p, filter)
148 | }
149 |
150 | result = append(result, filter)
151 | }
152 |
153 | return result, nil
154 | }
155 |
156 | type JoinOp string
157 |
158 | const (
159 | AND JoinOp = "$and"
160 | )
161 |
162 | // joins a list of (filter) queries with the specified mongo operator. handles degenerate cases (singular or empty) gracefully
163 | func join_queries(queries []bson.D, op JoinOp) bson.D {
164 | switch len(queries) {
165 | case 0:
166 | return bson.D{}
167 | case 1:
168 | return queries[0]
169 | }
170 | arr := make(bson.A, 0, len(queries))
171 | for _, q := range queries {
172 | arr = append(arr, q)
173 | }
174 | return bson.D{{Key: string(op), Value: arr}}
175 | }
176 |
--------------------------------------------------------------------------------
/models/examples/if-statement.json:
--------------------------------------------------------------------------------
1 | {
2 | "description": "Conditional example",
3 | "type": "job",
4 | "inputValues": [
5 | {
6 | "value": "10",
7 | "target": "max"
8 | }
9 | ],
10 | "workflow": {
11 | "name": "wf-example",
12 | "description": "Test workflow with an if statement example",
13 | "type": "workflow",
14 | "workspace": "argo",
15 | "component": {
16 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0",
17 | "description": "Graph component",
18 | "inputs": [
19 | { "name": "max", "mediatype": ["integer"], "type": "parameter" }
20 | ],
21 | "outputs": [{ "name": "description", "type": "parameter" }],
22 | "type": "component",
23 | "implementation": {
24 | "type": "graph",
25 | "inputMappings": [
26 | {
27 | "source": { "port": "max" },
28 | "target": { "node": "N1", "port": "value" }
29 | }
30 | ],
31 | "outputMappings": [
32 | {
33 | "source": { "node": "If", "port": "ifOut" },
34 | "target": { "port": "description" }
35 | }
36 | ],
37 | "nodes": [
38 | {
39 | "id": "N1",
40 | "node": {
41 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b0",
42 | "description": "Generate",
43 | "inputs": [{ "name": "value", "type": "parameter" }],
44 | "outputs": [{ "name": "rand", "type": "parameter" }],
45 | "type": "component",
46 | "implementation": {
47 | "type": "brick",
48 | "container": {
49 | "name": "containername_n1_b1",
50 | "image": "bash:latest",
51 | "command": ["bash", "-c", "shuf -i 0-$0 -n1 > /tmp/out"]
52 | },
53 | "args": [
54 | {
55 | "source": { "port": "value" },
56 | "target": { "type": "parameter" }
57 | }
58 | ],
59 | "results": [
60 | {
61 | "source": { "file": "/tmp/out" },
62 | "target": { "port": "rand" }
63 | }
64 | ]
65 | }
66 | }
67 | },
68 | {
69 | "id": "If",
70 | "node": {
71 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c2",
72 | "description": "If/else component",
73 | "inputs": [
74 | {
75 | "name": "valFromParam",
76 | "mediatype": ["number"],
77 | "type": "parameter"
78 | }
79 | ],
80 | "outputs": [{ "name": "ifOut", "type": "parameter" }],
81 | "type": "component",
82 | "implementation": {
83 | "type": "conditional",
84 | "inputMappings": [
85 | {
86 | "source": { "port": "valFromParam" },
87 | "target": { "port": "valParam" }
88 | }
89 | ],
90 | "outputMappings": [
91 | {
92 | "source": { "node": "nodeTrue", "port": "out" },
93 | "target": { "port": "ifOut" }
94 | }
95 | ],
96 | "nodeTrue": {
97 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b4",
98 | "description": "",
99 | "inputs": [
100 | {
101 | "name": "valParam",
102 | "mediatype": ["number"],
103 | "type": "parameter"
104 | }
105 | ],
106 | "outputs": [{ "name": "out", "type": "parameter" }],
107 | "type": "component",
108 | "implementation": {
109 | "type": "brick",
110 | "container": {
111 | "name": "containername",
112 | "image": "alpine:latest",
113 | "command": [
114 | "sh",
115 | "-c",
116 | "echo value $0 is huge > /tmp/out"
117 | ]
118 | },
119 | "args": [
120 | {
121 | "source": { "port": "valParam" },
122 | "target": { "type": "parameter" }
123 | }
124 | ],
125 | "results": [
126 | {
127 | "source": { "file": "/tmp/out" },
128 | "target": { "port": "out" }
129 | }
130 | ]
131 | }
132 | },
133 | "expression": {
134 | "left": {
135 | "name": "valFromParam",
136 | "mediatype": ["number"],
137 | "type": "parameter"
138 | },
139 | "operator": ">=",
140 | "right": "5"
141 | }
142 | }
143 | }
144 | }
145 | ],
146 | "edges": [
147 | {
148 | "source": { "node": "N1", "port": "rand" },
149 | "target": { "node": "If", "port": "valFromParam" }
150 | }
151 | ]
152 | }
153 | }
154 | }
155 | }
156 |
--------------------------------------------------------------------------------
/rest/workspaces.go:
--------------------------------------------------------------------------------
1 | package rest
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "github.com/gorilla/mux"
7 | "k8s.io/client-go/kubernetes"
8 | "net/http"
9 |
10 | "github.com/equinor/flowify-workflows-server/models"
11 | "github.com/equinor/flowify-workflows-server/pkg/workspace"
12 | "github.com/equinor/flowify-workflows-server/user"
13 | )
14 |
15 | func RegisterWorkspaceRoutes(r *mux.Route, k8sclient kubernetes.Interface, namespace string, wsClient workspace.WorkspaceClient) {
16 | s := r.Subrouter()
17 |
18 | const intype = "application/json"
19 | const outtype = "application/json"
20 |
21 | s.Use(CheckContentHeaderMiddleware(intype))
22 | s.Use(CheckAcceptRequestHeaderMiddleware(outtype))
23 | s.Use(SetContentTypeMiddleware(outtype))
24 |
25 | s.HandleFunc("/workspaces/", WorkspacesListHandler()).Methods(http.MethodGet)
26 | s.HandleFunc("/workspaces/", CreationPathAuthorization(WorkspacesCreateHandler(k8sclient, namespace, wsClient))).Methods(http.MethodPost)
27 | s.HandleFunc("/workspaces/", WorkspacesUpdateHandler(k8sclient, namespace, wsClient)).Methods(http.MethodPut)
28 | s.HandleFunc("/workspaces/", WorkspacesDeleteHandler(k8sclient, namespace, wsClient)).Methods(http.MethodDelete)
29 | }
30 |
31 | func CreationPathAuthorization(next http.HandlerFunc) http.HandlerFunc {
32 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
33 | u := user.GetUser(r.Context())
34 | if !user.CanCreateWorkspaces(u) {
35 | err := fmt.Errorf("not authorized")
36 | AuthorizationDenied(w, r, err)
37 | return
38 | }
39 | next(w, r)
40 | })
41 | }
42 |
43 | func WorkspacesListHandler() http.HandlerFunc {
44 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
45 | wss := GetWorkspaceAccess(r.Context())
46 | lst := []workspace.WorkspaceGetRequest{}
47 | usr := user.GetUser(r.Context())
48 | for _, ws := range wss {
49 | wsgr := workspace.WorkspaceGetRequest{Name: ws.Name, Description: ws.Description}
50 | roles := []string{}
51 | if ws.UserHasAccess(usr) {
52 | roles = append(roles, "ws-collaborator")
53 | }
54 | if ws.UserHasAdminAccess(usr) {
55 | roles = append(roles, "ws-owner")
56 | }
57 | customRoles, ok := ws.UserCanSeeCustomRoles(usr)
58 | if ok {
59 | for _, customRole := range customRoles {
60 | roles = append(roles, string(customRole))
61 | }
62 | }
63 | wsgr.Roles = roles
64 | lst = append(lst, wsgr)
65 | }
66 |
67 | WriteResponse(w, http.StatusOK, nil, struct {
68 | Items []workspace.WorkspaceGetRequest `json:"items"`
69 | }{Items: lst}, "workspace")
70 | })
71 | }
72 |
73 | func WorkspacesCreateHandler(k8sclient kubernetes.Interface, namespace string, wsClient workspace.WorkspaceClient) http.HandlerFunc {
74 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
75 | var creationData workspace.InputData
76 | err := json.NewDecoder(r.Body).Decode(&creationData)
77 | if err != nil {
78 | WriteResponse(w, http.StatusInternalServerError, nil, struct {
79 | Error string
80 | }{Error: fmt.Sprintf("error decoding the input data: %v\n", err)}, "workspace")
81 | }
82 |
83 | wsCreation := models.WorkspacesInputToCreateData(creationData, namespace)
84 | msg, err := wsClient.Create(k8sclient, wsCreation)
85 | if err != nil {
86 | WriteResponse(w, http.StatusInternalServerError, nil, struct {
87 | Error string
88 | }{Error: fmt.Sprintf("error creating workspace: %v\n", err)}, "workspace")
89 | }
90 |
91 | WriteResponse(w, http.StatusCreated, nil, struct {
92 | Workspace string
93 | }{
94 | Workspace: fmt.Sprintf("Success: %s", msg),
95 | }, "workspace")
96 | })
97 | }
98 |
99 | func WorkspacesUpdateHandler(k8sclient kubernetes.Interface, namespace string, wsClient workspace.WorkspaceClient) http.HandlerFunc {
100 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
101 | var updateData workspace.InputData
102 | err := json.NewDecoder(r.Body).Decode(&updateData)
103 | if err != nil {
104 | WriteResponse(w, http.StatusInternalServerError, nil, struct {
105 | Error string
106 | }{Error: fmt.Sprintf("error decoding the input data: %v\n", err)}, "workspace")
107 | }
108 |
109 | wsUpdate := models.WorkspacesInputToUpdateData(updateData, namespace)
110 | msg, err := wsClient.Update(k8sclient, wsUpdate)
111 | if err != nil {
112 | WriteResponse(w, http.StatusInternalServerError, nil, struct {
113 | Error string
114 | }{Error: fmt.Sprintf("error updating workspace: %v\n", err)}, "workspace")
115 | }
116 |
117 | WriteResponse(w, http.StatusOK, nil, struct {
118 | Workspace string
119 | }{
120 | Workspace: fmt.Sprintf("Success: %s", msg),
121 | }, "workspace")
122 | })
123 | }
124 |
125 | func WorkspacesDeleteHandler(k8sclient kubernetes.Interface, namespace string, wsClient workspace.WorkspaceClient) http.HandlerFunc {
126 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
127 | var deleteData workspace.InputData
128 | err := json.NewDecoder(r.Body).Decode(&deleteData)
129 | if err != nil {
130 | WriteResponse(w, http.StatusInternalServerError, nil, struct {
131 | Error string
132 | }{Error: fmt.Sprintf("error decoding the input data: %v\n", err)}, "workspace")
133 |
134 | }
135 | msg, err := wsClient.Delete(k8sclient, namespace, deleteData.Name)
136 | if err != nil {
137 | WriteResponse(w, http.StatusInternalServerError, nil, struct {
138 | Error string
139 | }{Error: fmt.Sprintf("error deleteing: %v\n", err)}, "workspace")
140 | }
141 | WriteResponse(w, http.StatusOK, nil, struct {
142 | Workspace string
143 | }{
144 | Workspace: fmt.Sprintf("Success: %s", msg),
145 | }, "workspace")
146 | })
147 | }
148 |
--------------------------------------------------------------------------------
/models/examples/multi-level-secrets.json:
--------------------------------------------------------------------------------
1 | {
2 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c0",
3 | "description": "My cool graph",
4 | "inputs": [
5 | { "name": "seedT", "mediatype": ["integer"], "type": "parameter" },
6 | { "name": "secretL1", "mediatype": ["env_secret"], "type": "env_secret" },
7 | { "name": "secretL2", "mediatype": ["env_secret"], "type": "env_secret" },
8 | { "name": "secretL3", "mediatype": ["env_secret"], "type": "env_secret" }
9 | ],
10 | "outputs": [],
11 | "type": "component",
12 | "implementation": {
13 | "type": "graph",
14 | "inputMappings": [
15 | {
16 | "source": { "port": "seedT" },
17 | "target": { "node": "N1", "port": "seedN1" }
18 | },
19 | {
20 | "source": { "port": "secretL1" },
21 | "target": { "node": "N1", "port": "secretB1" }
22 | },
23 | {
24 | "source": { "port": "secretL2" },
25 | "target": { "node": "N1", "port": "secretB2" }
26 | },
27 | {
28 | "source": { "port": "secretL1" },
29 | "target": { "node": "N2", "port": "secretG1" }
30 | },
31 | {
32 | "source": { "port": "secretL2" },
33 | "target": { "node": "N2", "port": "secretG2" }
34 | },
35 | {
36 | "source": { "port": "secretL3" },
37 | "target": { "node": "N2", "port": "secretG3" }
38 | }
39 | ],
40 | "nodes": [
41 | {
42 | "id": "N1",
43 | "node": {
44 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b1",
45 | "description": "B1",
46 | "inputs": [
47 | { "name": "seedN1", "mediatype": ["integer"], "type": "parameter" },
48 | { "name": "secretB1", "mediatype": ["env_secret"], "type": "env_secret" },
49 | { "name": "secretB2", "mediatype": ["env_secret"], "type": "env_secret" }
50 | ],
51 | "outputs": [],
52 | "type": "component",
53 | "implementation": {
54 | "type": "brick",
55 | "container": {
56 | "name": "containername_n1_b1",
57 | "image": "alpine:latest",
58 | "command": ["sh", "-c"],
59 | "args": []
60 | },
61 | "args": [
62 | { "source": "echo " },
63 | {
64 | "source": { "port": "seedN1" },
65 | "target": { "type": "parameter", "name": "seed" }
66 | },
67 | { "source": "; echo $secretB1 ; echo $secretB2"}
68 | ]
69 | }
70 | }
71 | },
72 | {
73 | "id": "N2",
74 | "node": {
75 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144c2",
76 | "description": "G2",
77 | "inputs": [
78 | { "name": "secretG1", "mediatype": ["env_secret"], "type": "env_secret" },
79 | { "name": "secretG2", "mediatype": ["env_secret"], "type": "env_secret" },
80 | { "name": "secretG3", "mediatype": ["env_secret"], "type": "env_secret" },
81 | { "name": "secretG4", "mediatype": ["env_secret"], "type": "env_secret" }
82 | ],
83 | "outputs": [],
84 | "type": "component",
85 | "implementation": {
86 | "type": "graph",
87 | "inputMappings": [
88 | {
89 | "source": { "port": "secretG1" },
90 | "target": { "node": "N2G2B2", "port": "secretW1" }
91 | },
92 | {
93 | "source": { "port": "secretG2" },
94 | "target": { "node": "N2G2B2", "port": "secretW2" }
95 | },
96 | {
97 | "source": { "port": "secretG3" },
98 | "target": { "node": "N2G2B2", "port": "secretW3" }
99 | },
100 | {
101 | "source": { "port": "secretG4" },
102 | "target": { "node": "N2G2B2", "port": "secretW4" }
103 | }
104 | ],
105 | "nodes": [
106 | {
107 | "id": "N2G2B2",
108 | "node": {
109 | "uid": "192161d7-e3f2-4991-adc0-a99c88c144b2",
110 | "description": "B2",
111 | "inputs": [
112 | { "name": "secretW1", "mediatype": ["env_secret"], "type": "env_secret" },
113 | { "name": "secretW2", "mediatype": ["env_secret"], "type": "env_secret" },
114 | { "name": "secretW3", "mediatype": ["env_secret"], "type": "env_secret" },
115 | { "name": "secretW4", "mediatype": ["env_secret"], "type": "env_secret" }
116 | ],
117 | "outputs": [],
118 | "type": "component",
119 | "implementation": {
120 | "type": "brick",
121 | "container": {
122 | "name": "containername",
123 | "image": "alpine:latest",
124 | "command": ["sh", "-c"],
125 | "args": []
126 | },
127 | "args": [
128 | {"source": "echo $secretW1; echo $secretW2; echo $secretW3; echo $secretW4"}
129 | ]
130 | }
131 | }
132 | }
133 | ],
134 | "edges": []
135 | }
136 | }
137 | }
138 | ],
139 | "edges": [
140 | ]
141 | }
142 | }
143 |
144 |
145 |
--------------------------------------------------------------------------------
/e2etest/component_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "bytes"
5 | "io"
6 | // flowifypkg "github.com/equinor/flowify-workflows-server/pkg/apiclient/interceptor"
7 | // "github.com/equinor/flowify-workflows-server/workflowserver"
8 | )
9 |
10 | func body2string(body io.ReadCloser) []byte {
11 | buf := new(bytes.Buffer)
12 | buf.ReadFrom(body)
13 |
14 | return buf.Bytes()
15 | }
16 |
17 | func wrap(workflowstring string) string {
18 | return "{ \"template\":" + workflowstring + "}"
19 | }
20 |
21 | func (s *e2eTestSuite) Test_components() {
22 | /*
23 |
24 | requestor := make_requestor(s.client)
25 |
26 | ccstore := workflowserver.NewFlowifyWorkflowStorageClient(storageclient.NewMongoClient())
27 |
28 | // Clear DB collection before returning the client handler
29 | ccstore.Clear()
30 |
31 | // Change names to match e2e tests jwt token
32 | wf1 := mockdata.WorkflowTemplate1
33 | wf2 := mockdata.WorkflowTemplate2
34 |
35 | resp, err := requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf1))
36 | s.NoError(err)
37 | s.Equal(http.StatusOK, http.StatusOK)
38 |
39 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf1))
40 | s.NoError(err)
41 | s.Equal(http.StatusOK, http.StatusOK)
42 |
43 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf1))
44 | s.NoError(err)
45 | s.Equal(http.StatusOK, http.StatusOK)
46 |
47 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodPost, wrap(wf2))
48 | s.NoError(err)
49 | s.Equal(http.StatusOK, http.StatusOK)
50 |
51 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/?workspace=test", http.MethodGet, "")
52 | s.Equal(http.StatusOK, resp.StatusCode)
53 |
54 | var l1 workflowserver.WorkflowList
55 | json.Unmarshal(body2string(resp.Body), &l1)
56 | s.Len(l1.Items, 2)
57 |
58 | var wft v1alpha1.WorkflowTemplate
59 | json.Unmarshal(l1.Items[0].Content, &wft)
60 |
61 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/"+wft.ObjectMeta.Name+"/versions?workspace=test", http.MethodGet, "")
62 | s.Equal(http.StatusOK, resp.StatusCode)
63 |
64 | var l2 workflowserver.VersionList
65 | json.Unmarshal(body2string(resp.Body), &l2)
66 |
67 | s.Len(l2.Versions, 3)
68 |
69 | Names := make([]string, 3)
70 | Versions := make([]string, 3)
71 |
72 | for i, item := range l2.Versions {
73 | Names[i] = item.WrittenBy
74 | Versions[i] = item.Version
75 | }
76 |
77 | s.Len(l2.Versions, 3)
78 |
79 | s.ElementsMatch([]string{"0", "1", "2"}, Versions)
80 | s.ElementsMatch([]string{"test@test.com", "test@test.com", "test@test.com"}, Names) // injected from the used test auth token
81 |
82 | resp, err = requestor("http://localhost:8842/api/v1/flowify-workflows/workflowtemplate1?workspace=test", http.MethodGet, "")
83 | s.NoError(err)
84 | s.Equal(http.StatusOK, resp.StatusCode)
85 | s.True(json.Valid(body2string(resp.Body)))
86 |
87 | _, err = requestor("http://localhost:8842/api/v1/flowify-workflows/workflowtemplate1?version=1&workspace=test", http.MethodGet, "")
88 | s.NoError(err)
89 | s.Equal(http.StatusOK, resp.StatusCode)
90 |
91 | // Submit non-existing version
92 | req := flowifypkg.WorkflowSubmitRequest{Namespace: "test", ResourceKind: "WorkflowTemplate", ResourceName: "workflowtemplate2", Version: "1"}
93 | payload, err := json.Marshal(req)
94 | s.NoError(err)
95 |
96 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/submit", http.MethodPost, string(payload))
97 | s.NoError(err)
98 | s.Equal(http.StatusNotFound, resp.StatusCode)
99 |
100 | // Submit existing version, with explicit version
101 | req = flowifypkg.WorkflowSubmitRequest{Namespace: "test", ResourceKind: "WorkflowTemplate", ResourceName: "workflowtemplate2", Version: "0"}
102 | payload, err = json.Marshal(req)
103 | s.NoError(err)
104 |
105 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/submit", http.MethodPost, string(payload))
106 | s.NoError(err)
107 | s.Equal(http.StatusOK, resp.StatusCode)
108 |
109 | // submit with implicit 'last' version
110 | req = flowifypkg.WorkflowSubmitRequest{Namespace: "test", ResourceKind: "WorkflowTemplate", ResourceName: "workflowtemplate2"}
111 | payload, err = json.Marshal(req)
112 | s.NoError(err)
113 |
114 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/submit", http.MethodPost, string(payload))
115 | s.NoError(err)
116 | s.Equal(http.StatusOK, resp.StatusCode)
117 |
118 | var wf v1alpha1.Workflow
119 | err = json.Unmarshal(body2string(resp.Body), &wf)
120 | s.NoError(err)
121 |
122 | name := wf.ObjectMeta.Name
123 |
124 | // Fetch workflow, and verify name is the same as the submitted workflow object
125 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/"+name, http.MethodGet, "")
126 | s.Equal(http.StatusOK, resp.StatusCode)
127 | s.NoError(err)
128 |
129 | err = json.Unmarshal(body2string(resp.Body), &wf)
130 | s.NoError(err)
131 |
132 | s.Equal(name, wf.ObjectMeta.Name)
133 |
134 | // Check that the workflowtemplate was reaped
135 | resp, err = requestor("http://localhost:8842/api/v1/workflow-templates/test", http.MethodGet, "")
136 | s.NoError(err)
137 | s.Equal(http.StatusOK, resp.StatusCode)
138 |
139 | var wftList v1alpha1.WorkflowTemplateList
140 | err = json.Unmarshal(body2string(resp.Body), &wftList)
141 |
142 | s.Len(wftList.Items, 0)
143 | s.NoError(err)
144 |
145 | // Remove the workflow
146 | resp, err = requestor("http://localhost:8842/api/v1/workflows/test/"+name, http.MethodDelete, "")
147 | s.NoError(err)
148 | s.Equal(http.StatusOK, resp.StatusCode)
149 | */
150 | }
151 |
--------------------------------------------------------------------------------
/auth/azure_token.go:
--------------------------------------------------------------------------------
1 | package auth
2 |
3 | import (
4 | "crypto/subtle"
5 | "fmt"
6 | "net/http"
7 | "strings"
8 | "time"
9 |
10 | "github.com/equinor/flowify-workflows-server/user"
11 | "github.com/pkg/errors"
12 | "github.com/sirupsen/logrus"
13 |
14 | "github.com/golang-jwt/jwt/v4"
15 | )
16 |
17 | // implements user.User
18 | type AzureTokenUser struct {
19 | Name string `json:"name"`
20 | Email string `json:"email"`
21 | Oid string `json:"oid"`
22 | Roles []user.Role `json:"roles"`
23 | jwt.RegisteredClaims
24 |
25 | expectedAudience string
26 | expectedIssuer string
27 | }
28 |
29 | func NewAzureTokenUser(audience string, issuer string) AzureTokenUser {
30 | // empty user
31 | user := AzureTokenUser{}
32 |
33 | // for validation
34 | user.expectedAudience = audience
35 | user.expectedIssuer = issuer
36 |
37 | return user
38 | }
39 |
40 | // The time to use when validating token life-time,
41 | // defaults to time.Now which is UTC, https://tools.ietf.org/html/rfc7519#section-4.1.4
42 | // can be temporarily overridden when testing
43 | var TimeFunc = time.Now
44 |
45 | // the same as the jwt KeyFunc
46 | type AzureKeyFunc = func(claim *jwt.Token) (interface{}, error)
47 |
48 | type AzureTokenAuthenticatorOptions struct {
49 | // Disable verification of the signature of the tokens, (claims are still validated)
50 | DisableVerification bool
51 | }
52 |
53 | type AzureTokenAuthenticator struct {
54 | KeyFunc AzureKeyFunc
55 | // the intended audience to be verified with the token `aud` claim
56 | Audience string
57 | // the issuer id to be verified with the token `iss` claim
58 | Issuer string
59 |
60 | // Use only in safe environments
61 | Options AzureTokenAuthenticatorOptions
62 | }
63 |
64 | func NewAzureTokenAuthenticator(KeyFunc AzureKeyFunc,
65 | Audience string,
66 | Issuer string,
67 | Options AzureTokenAuthenticatorOptions) AuthenticationClient {
68 |
69 | return AzureTokenAuthenticator{KeyFunc: KeyFunc,
70 | Audience: Audience, Issuer: Issuer,
71 | Options: Options}
72 | }
73 |
74 | func (a AzureTokenAuthenticator) Authenticate(r *http.Request) (user.User, error) {
75 | authStr := r.Header.Get("Authorization")
76 |
77 | // Permission injection is required
78 | if authStr == "" {
79 | return AzureTokenUser{}, fmt.Errorf("no Authorization header given")
80 | }
81 |
82 | parts := strings.SplitN(authStr, " ", 2)
83 |
84 | if len(parts) < 2 || !strings.EqualFold(parts[0], "bearer") {
85 | return AzureTokenUser{}, fmt.Errorf("bad Authorization header")
86 | }
87 |
88 | user := NewAzureTokenUser(a.Audience, a.Issuer)
89 | err := user.Parse(parts[1], a.KeyFunc, a.Options.DisableVerification)
90 | if err != nil {
91 | return AzureTokenUser{}, errors.Wrap(err, "authentication error")
92 | }
93 | return user, nil
94 | }
95 |
96 | func (t AzureTokenUser) GetUid() string { return t.Oid }
97 | func (t AzureTokenUser) GetName() string { return t.Name }
98 | func (t AzureTokenUser) GetEmail() string { return t.Email }
99 | func (t AzureTokenUser) GetRoles() []user.Role { return t.Roles }
100 | func (t *AzureTokenUser) Parse(tokenString string, keyFunc AzureKeyFunc, disableVerification bool) error {
101 | if disableVerification {
102 | logrus.Warn("jwt token verification is DISABLED")
103 | if _, _, err := jwt.NewParser().ParseUnverified(tokenString, t); err != nil {
104 | return err
105 | }
106 |
107 | // parse unverified doesn't call validation, do it explicitly
108 | return t.Valid()
109 | }
110 |
111 | _, err := jwt.ParseWithClaims(tokenString, t, keyFunc)
112 | return err
113 | }
114 |
115 | // called from the jwt-parser code to ensure the token is valid wrt
116 | // also called explicitly from the no-verification path of Parse
117 | func (t AzureTokenUser) Valid() error {
118 | now := TimeFunc()
119 |
120 | requireSet := true
121 | // The claims below are optional, by default, but we force them tested
122 |
123 | if !t.VerifyExpiresAt(now, requireSet) {
124 | if t.ExpiresAt != nil {
125 | logrus.Warnf("token expired: 'now' > 'exp', %s < %s", now.UTC().Format(time.RFC3339), t.ExpiresAt.UTC().Format(time.RFC3339))
126 | } else {
127 | logrus.Warn("token missing 'exp' claim")
128 | }
129 | return fmt.Errorf("token expired")
130 | }
131 |
132 | if !t.VerifyIssuedAt(now, requireSet) {
133 | if t.IssuedAt != nil {
134 | logrus.Warnf("token used before issued: 'now' < 'iat', %s < %s", now.UTC().Format(time.RFC3339), t.IssuedAt.UTC().Format(time.RFC3339))
135 | } else {
136 | logrus.Warn("token missing 'iat' claim")
137 | }
138 |
139 | return fmt.Errorf("token not valid")
140 | }
141 |
142 | if !t.VerifyNotBefore(now, requireSet) {
143 | if t.NotBefore != nil {
144 | logrus.Warnf("token used before valid: 'now' < 'nbf' %s < %s", now.UTC().Format(time.RFC3339), t.NotBefore.UTC().Format(time.RFC3339))
145 | } else {
146 | logrus.Warn("token missing 'nbf' claim")
147 | }
148 | return fmt.Errorf("token not yet valid")
149 | }
150 |
151 | if !t.VerifyAudience(t.expectedAudience, requireSet) {
152 | if t.Audience != nil {
153 | logrus.Warnf("token bad aud claim (%s), expected %s", t.Audience, t.expectedAudience)
154 | } else {
155 | logrus.Warn("token missing 'aud' claim")
156 | }
157 |
158 | return fmt.Errorf("invalid token `aud`")
159 | }
160 |
161 | // dont mistake comparison semantics, 1 is *match*
162 | if subtle.ConstantTimeCompare([]byte(t.Issuer), []byte(t.expectedIssuer)) != 1 {
163 | logrus.Warnf("token bad iss claim (%s), expected: %s", t.Issuer, t.expectedIssuer)
164 | return fmt.Errorf("invalid token `iss`")
165 | }
166 |
167 | return nil
168 | }
169 |
170 | /*
171 | func readAll(url string) ([]byte, error) {
172 | r, err := http.Get(url)
173 | if err != nil {
174 | return []byte{}, err
175 | }
176 | if r.StatusCode != http.StatusOK {
177 | return []byte{}, fmt.Errorf("could not get azure validation info")
178 | }
179 |
180 | buf := new(bytes.Buffer)
181 | if err := func() error { // scope for defer and err
182 | _, err := buf.ReadFrom(r.Body)
183 | defer r.Body.Close()
184 | return err
185 | }(); err != nil {
186 | return []byte{}, err
187 | }
188 | return buf.Bytes(), nil
189 | }
190 | */
191 |
--------------------------------------------------------------------------------